diff --git a/github.com/coreos/etcd/.travis.yml b/github.com/coreos/etcd/.travis.yml index b82cebb20d..fb8e9b8328 100644 --- a/github.com/coreos/etcd/.travis.yml +++ b/github.com/coreos/etcd/.travis.yml @@ -4,7 +4,7 @@ go_import_path: github.com/coreos/etcd sudo: false go: - - 1.8.1 + - 1.8.3 - tip notifications: @@ -14,6 +14,8 @@ notifications: env: matrix: - TARGET=amd64 + - TARGET=darwin-amd64 + - TARGET=windows-amd64 - TARGET=arm64 - TARGET=arm - TARGET=386 @@ -24,6 +26,10 @@ matrix: allow_failures: - go: tip exclude: + - go: tip + env: TARGET=darwin-amd64 + - go: tip + env: TARGET=windows-amd64 - go: tip env: TARGET=arm - go: tip @@ -35,16 +41,21 @@ matrix: addons: apt: + sources: + - debian-sid packages: - libpcap-dev - libaspell-dev - libhunspell-dev + - shellcheck before_install: - go get -v -u github.com/chzchzchz/goword + - go get -v -u github.com/coreos/license-bill-of-materials - go get -v -u honnef.co/go/tools/cmd/gosimple - go get -v -u honnef.co/go/tools/cmd/unused - go get -v -u honnef.co/go/tools/cmd/staticcheck + - ./scripts/install-marker.sh amd64 # disable godep restore override install: @@ -56,6 +67,12 @@ script: amd64) GOARCH=amd64 ./test ;; + darwin-amd64) + GO_BUILD_FLAGS="-a -v" GOPATH="" GOOS=darwin GOARCH=amd64 ./build + ;; + windows-amd64) + GO_BUILD_FLAGS="-a -v" GOPATH="" GOOS=windows GOARCH=amd64 ./build + ;; 386) GOARCH=386 PASSES="build unit" ./test ;; diff --git a/github.com/coreos/etcd/CODE_OF_CONDUCT.md b/github.com/coreos/etcd/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..c0c20dd815 --- /dev/null +++ b/github.com/coreos/etcd/CODE_OF_CONDUCT.md @@ -0,0 +1,63 @@ +## CoreOS Community Code of Conduct + +### Contributor Code of Conduct + +As contributors and maintainers of this project, and in the interest of +fostering an open and welcoming community, we pledge to respect all people who +contribute through reporting issues, posting feature requests, updating +documentation, submitting pull requests or patches, and other activities. + +We are committed to making participation in this project a harassment-free +experience for everyone, regardless of level of experience, gender, gender +identity and expression, sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing others' private information, such as physical or electronic addresses, without explicit permission +* Other unethical or unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct. By adopting this Code of Conduct, +project maintainers commit themselves to fairly and consistently applying these +principles to every aspect of managing this project. Project maintainers who do +not follow or enforce the Code of Conduct may be permanently removed from the +project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting a project maintainer, Brandon Philips +, and/or Meghan Schofield +. + +This Code of Conduct is adapted from the Contributor Covenant +(http://contributor-covenant.org), version 1.2.0, available at +http://contributor-covenant.org/version/1/2/0/ + +### CoreOS Events Code of Conduct + +CoreOS events are working conferences intended for professional networking and +collaboration in the CoreOS community. Attendees are expected to behave +according to professional standards and in accordance with their employer’s +policies on appropriate workplace behavior. + +While at CoreOS events or related social networking opportunities, attendees +should not engage in discriminatory or offensive speech or actions including +but not limited to gender, sexuality, race, age, disability, or religion. +Speakers should be especially aware of these concerns. + +CoreOS does not condone any statements by speakers contrary to these standards. +CoreOS reserves the right to deny entrance and/or eject from an event (without +refund) any individual found to be engaging in discriminatory or offensive +speech or actions. + +Please bring any concerns to the immediate attention of designated on-site +staff, Brandon Philips , and/or Meghan Schofield +. diff --git a/github.com/coreos/etcd/CONTRIBUTING.md b/github.com/coreos/etcd/CONTRIBUTING.md index 635f73a305..31cef1fa0d 100644 --- a/github.com/coreos/etcd/CONTRIBUTING.md +++ b/github.com/coreos/etcd/CONTRIBUTING.md @@ -5,7 +5,7 @@ etcd is Apache 2.0 licensed and accepts contributions via GitHub pull requests. # Email and chat - Email: [etcd-dev](https://groups.google.com/forum/?hl=en#!forum/etcd-dev) -- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) IRC channel on freenode.org +- IRC: #[etcd](irc://irc.freenode.org:6667/#etcd) IRC channel on freenode.org ## Getting started diff --git a/github.com/coreos/etcd/Dockerfile-release.arm64 b/github.com/coreos/etcd/Dockerfile-release.arm64 new file mode 100644 index 0000000000..d8816e58d2 --- /dev/null +++ b/github.com/coreos/etcd/Dockerfile-release.arm64 @@ -0,0 +1,11 @@ +FROM aarch64/ubuntu:16.04 + +ADD etcd /usr/local/bin/ +ADD etcdctl /usr/local/bin/ +ADD var/etcd /var/etcd +ADD var/lib/etcd /var/lib/etcd + +EXPOSE 2379 2380 + +# Define default command. +CMD ["/usr/local/bin/etcd"] diff --git a/github.com/coreos/etcd/Dockerfile-release.ppc64le b/github.com/coreos/etcd/Dockerfile-release.ppc64le index 06365c8bc9..2fb02c412c 100644 --- a/github.com/coreos/etcd/Dockerfile-release.ppc64le +++ b/github.com/coreos/etcd/Dockerfile-release.ppc64le @@ -2,8 +2,8 @@ FROM ppc64le/ubuntu:16.04 ADD etcd /usr/local/bin/ ADD etcdctl /usr/local/bin/ -RUN mkdir -p /var/etcd/ -RUN mkdir -p /var/lib/etcd/ +ADD var/etcd /var/etcd +ADD var/lib/etcd /var/lib/etcd EXPOSE 2379 2380 diff --git a/github.com/coreos/etcd/Documentation/dev-guide/api_concurrency_reference_v3.md b/github.com/coreos/etcd/Documentation/dev-guide/api_concurrency_reference_v3.md index cf534f4b69..9ae1dab757 100644 --- a/github.com/coreos/etcd/Documentation/dev-guide/api_concurrency_reference_v3.md +++ b/github.com/coreos/etcd/Documentation/dev-guide/api_concurrency_reference_v3.md @@ -6,7 +6,7 @@ This is a generated documentation. Please read the proto files for more. ##### service `Lock` (etcdserver/api/v3lock/v3lockpb/v3lock.proto) -for grpc-gateway The lock service exposes client-side locking facilities as a gRPC interface. +The lock service exposes client-side locking facilities as a gRPC interface. | Method | Request Type | Response Type | Description | | ------ | ------------ | ------------- | ----------- | @@ -51,7 +51,7 @@ for grpc-gateway The lock service exposes client-side locking facilities as a gR ##### service `Election` (etcdserver/api/v3election/v3electionpb/v3election.proto) -for grpc-gateway The election service exposes client-side election facilities as a gRPC interface. +The election service exposes client-side election facilities as a gRPC interface. | Method | Request Type | Response Type | Description | | ------ | ------------ | ------------- | ----------- | diff --git a/github.com/coreos/etcd/Documentation/dev-guide/api_grpc_gateway.md b/github.com/coreos/etcd/Documentation/dev-guide/api_grpc_gateway.md index 7169a7a019..a61acaf9ad 100644 --- a/github.com/coreos/etcd/Documentation/dev-guide/api_grpc_gateway.md +++ b/github.com/coreos/etcd/Documentation/dev-guide/api_grpc_gateway.md @@ -6,9 +6,11 @@ etcd v3 uses [gRPC][grpc] for its messaging protocol. The etcd project includes ## Using grpc-gateway -The gateway accepts a [JSON mapping][json-mapping] for etcd's [protocol buffer][api-ref] message definitions. Note that `key` and `value` fields are defined as byte arrays and therefore must be base64 encoded in JSON. +The gateway accepts a [JSON mapping][json-mapping] for etcd's [protocol buffer][api-ref] message definitions. Note that `key` and `value` fields are defined as byte arrays and therefore must be base64 encoded in JSON. The following examples use `curl`, but any HTTP/JSON client should work all the same. -Use `curl` to put and get a key: +### Put and get keys + +Use the `v3alpha/kv/range` and `v3alpha/kv/put` services to read and write keys: ```bash <= key. If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), then the range request gets all keys prefixed with key. If both key and range_end are '\0', then the range request returns all keys. | bytes | | limit | limit is a limit on the number of keys returned for the request. When limit is set to 0, it is treated as no limit. | int64 | | revision | revision is the point-in-time of the key-value store to use for the range. If revision is less or equal to zero, the range is over the newest key-value store. If the revision has been compacted, ErrCompacted is returned as a response. | int64 | @@ -672,6 +686,7 @@ Empty field. | request_range | | RangeRequest | | request_put | | PutRequest | | request_delete_range | | DeleteRangeRequest | +| request_txn | | TxnRequest | @@ -694,6 +709,7 @@ Empty field. | response_range | | RangeResponse | | response_put | | PutResponse | | response_delete_range | | DeleteRangeResponse | +| response_txn | | TxnResponse | @@ -770,7 +786,7 @@ From google paxosdb paper: Our implementation hinges around a powerful primitive | range_end | range_end is the end of the range [key, range_end) to watch. If range_end is not given, only the key argument is watched. If range_end is equal to '\0', all keys greater than or equal to the key argument are watched. If the range_end is one bit larger than the given key, then all keys with the prefix (the given key) will be watched. | bytes | | start_revision | start_revision is an optional revision to watch from (inclusive). No start_revision is "now". | int64 | | progress_notify | progress_notify is set so that the etcd server will periodically send a WatchResponse with no events to the new watcher if there are no recent events. It is useful when clients wish to recover a disconnected watcher starting from a recent known revision. The etcd server may decide how often it will send notifications based on current load. | bool | -| filters | filter out put event. filter out delete event. filters filter the events at server side before it sends back to the watcher. | (slice of) FilterType | +| filters | filters filter the events at server side before it sends back to the watcher. | (slice of) FilterType | | prev_kv | If prev_kv is set, created watcher gets the previous KV before the event happens. If the previous KV is already compacted, nothing will be returned. | bool | @@ -794,6 +810,7 @@ From google paxosdb paper: Our implementation hinges around a powerful primitive | created | created is set to true if the response is for a create watch request. The client should record the watch_id and expect to receive events for the created watcher from the same stream. All events sent to the created watcher will attach with the same watch_id. | bool | | canceled | canceled is set to true if the response is for a cancel watch request. No further events will be sent to the canceled watcher. | bool | | compact_revision | compact_revision is set to the minimum index if a watcher tries to watch at a compacted index. This happens when creating a watcher at a compacted revision or the watcher cannot catch up with the progress of the key-value store. The client should treat the watcher as canceled and should not try to create any watcher with the same start_revision again. | int64 | +| cancel_reason | cancel_reason indicates the reason for canceling the watcher. | string | | events | | (slice of) mvccpb.Event | diff --git a/github.com/coreos/etcd/Documentation/dev-guide/apispec/swagger/rpc.swagger.json b/github.com/coreos/etcd/Documentation/dev-guide/apispec/swagger/rpc.swagger.json index 785db91ec9..28e20e0dd6 100644 --- a/github.com/coreos/etcd/Documentation/dev-guide/apispec/swagger/rpc.swagger.json +++ b/github.com/coreos/etcd/Documentation/dev-guide/apispec/swagger/rpc.swagger.json @@ -1,32 +1,27 @@ { - "swagger": "2.0", - "info": { - "title": "etcdserver/etcdserverpb/rpc.proto", - "version": "version not set" - }, - "schemes": [ - "http", - "https" - ], "consumes": [ "application/json" ], "produces": [ "application/json" ], + "schemes": [ + "http", + "https" + ], + "swagger": "2.0", + "info": { + "title": "etcdserver/etcdserverpb/rpc.proto", + "version": "version not set" + }, "paths": { "/v3alpha/auth/authenticate": { "post": { + "tags": [ + "Auth" + ], "summary": "Authenticate processes an authenticate request.", "operationId": "Authenticate", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthenticateResponse" - } - } - }, "parameters": [ { "name": "body", @@ -37,23 +32,23 @@ } } ], - "tags": [ - "Auth" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbAuthenticateResponse" + } + } + } } }, "/v3alpha/auth/disable": { "post": { + "tags": [ + "Auth" + ], "summary": "AuthDisable disables authentication.", "operationId": "AuthDisable", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthDisableResponse" - } - } - }, "parameters": [ { "name": "body", @@ -64,23 +59,23 @@ } } ], - "tags": [ - "Auth" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbAuthDisableResponse" + } + } + } } }, "/v3alpha/auth/enable": { "post": { + "tags": [ + "Auth" + ], "summary": "AuthEnable enables authentication.", "operationId": "AuthEnable", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthEnableResponse" - } - } - }, "parameters": [ { "name": "body", @@ -91,23 +86,23 @@ } } ], - "tags": [ - "Auth" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbAuthEnableResponse" + } + } + } } }, "/v3alpha/auth/role/add": { "post": { + "tags": [ + "Auth" + ], "summary": "RoleAdd adds a new role.", "operationId": "RoleAdd", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthRoleAddResponse" - } - } - }, "parameters": [ { "name": "body", @@ -118,23 +113,23 @@ } } ], - "tags": [ - "Auth" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbAuthRoleAddResponse" + } + } + } } }, "/v3alpha/auth/role/delete": { "post": { + "tags": [ + "Auth" + ], "summary": "RoleDelete deletes a specified role.", "operationId": "RoleDelete", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthRoleDeleteResponse" - } - } - }, "parameters": [ { "name": "body", @@ -145,23 +140,23 @@ } } ], - "tags": [ - "Auth" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbAuthRoleDeleteResponse" + } + } + } } }, "/v3alpha/auth/role/get": { "post": { + "tags": [ + "Auth" + ], "summary": "RoleGet gets detailed role information.", "operationId": "RoleGet", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthRoleGetResponse" - } - } - }, "parameters": [ { "name": "body", @@ -172,23 +167,23 @@ } } ], - "tags": [ - "Auth" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbAuthRoleGetResponse" + } + } + } } }, "/v3alpha/auth/role/grant": { "post": { + "tags": [ + "Auth" + ], "summary": "RoleGrantPermission grants a permission of a specified key or range to a specified role.", "operationId": "RoleGrantPermission", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthRoleGrantPermissionResponse" - } - } - }, "parameters": [ { "name": "body", @@ -199,23 +194,23 @@ } } ], - "tags": [ - "Auth" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbAuthRoleGrantPermissionResponse" + } + } + } } }, "/v3alpha/auth/role/list": { "post": { + "tags": [ + "Auth" + ], "summary": "RoleList gets lists of all roles.", "operationId": "RoleList", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthRoleListResponse" - } - } - }, "parameters": [ { "name": "body", @@ -226,23 +221,23 @@ } } ], - "tags": [ - "Auth" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbAuthRoleListResponse" + } + } + } } }, "/v3alpha/auth/role/revoke": { "post": { + "tags": [ + "Auth" + ], "summary": "RoleRevokePermission revokes a key or range permission of a specified role.", "operationId": "RoleRevokePermission", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthRoleRevokePermissionResponse" - } - } - }, "parameters": [ { "name": "body", @@ -253,23 +248,23 @@ } } ], - "tags": [ - "Auth" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbAuthRoleRevokePermissionResponse" + } + } + } } }, "/v3alpha/auth/user/add": { "post": { + "tags": [ + "Auth" + ], "summary": "UserAdd adds a new user.", "operationId": "UserAdd", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthUserAddResponse" - } - } - }, "parameters": [ { "name": "body", @@ -280,23 +275,23 @@ } } ], - "tags": [ - "Auth" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbAuthUserAddResponse" + } + } + } } }, "/v3alpha/auth/user/changepw": { "post": { + "tags": [ + "Auth" + ], "summary": "UserChangePassword changes the password of a specified user.", "operationId": "UserChangePassword", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthUserChangePasswordResponse" - } - } - }, "parameters": [ { "name": "body", @@ -307,23 +302,23 @@ } } ], - "tags": [ - "Auth" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbAuthUserChangePasswordResponse" + } + } + } } }, "/v3alpha/auth/user/delete": { "post": { + "tags": [ + "Auth" + ], "summary": "UserDelete deletes a specified user.", "operationId": "UserDelete", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthUserDeleteResponse" - } - } - }, "parameters": [ { "name": "body", @@ -334,23 +329,23 @@ } } ], - "tags": [ - "Auth" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbAuthUserDeleteResponse" + } + } + } } }, "/v3alpha/auth/user/get": { "post": { + "tags": [ + "Auth" + ], "summary": "UserGet gets detailed user information.", "operationId": "UserGet", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthUserGetResponse" - } - } - }, "parameters": [ { "name": "body", @@ -361,23 +356,23 @@ } } ], - "tags": [ - "Auth" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbAuthUserGetResponse" + } + } + } } }, "/v3alpha/auth/user/grant": { "post": { + "tags": [ + "Auth" + ], "summary": "UserGrant grants a role to a specified user.", "operationId": "UserGrantRole", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthUserGrantRoleResponse" - } - } - }, "parameters": [ { "name": "body", @@ -388,23 +383,23 @@ } } ], - "tags": [ - "Auth" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbAuthUserGrantRoleResponse" + } + } + } } }, "/v3alpha/auth/user/list": { "post": { + "tags": [ + "Auth" + ], "summary": "UserList gets a list of all users.", "operationId": "UserList", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthUserListResponse" - } - } - }, "parameters": [ { "name": "body", @@ -415,23 +410,23 @@ } } ], - "tags": [ - "Auth" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbAuthUserListResponse" + } + } + } } }, "/v3alpha/auth/user/revoke": { "post": { + "tags": [ + "Auth" + ], "summary": "UserRevokeRole revokes a role of specified user.", "operationId": "UserRevokeRole", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbAuthUserRevokeRoleResponse" - } - } - }, "parameters": [ { "name": "body", @@ -442,23 +437,23 @@ } } ], - "tags": [ - "Auth" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbAuthUserRevokeRoleResponse" + } + } + } } }, "/v3alpha/cluster/member/add": { "post": { + "tags": [ + "Cluster" + ], "summary": "MemberAdd adds a member into the cluster.", "operationId": "MemberAdd", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbMemberAddResponse" - } - } - }, "parameters": [ { "name": "body", @@ -469,23 +464,23 @@ } } ], - "tags": [ - "Cluster" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbMemberAddResponse" + } + } + } } }, "/v3alpha/cluster/member/list": { "post": { + "tags": [ + "Cluster" + ], "summary": "MemberList lists all the members in the cluster.", "operationId": "MemberList", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbMemberListResponse" - } - } - }, "parameters": [ { "name": "body", @@ -496,23 +491,23 @@ } } ], - "tags": [ - "Cluster" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbMemberListResponse" + } + } + } } }, "/v3alpha/cluster/member/remove": { "post": { + "tags": [ + "Cluster" + ], "summary": "MemberRemove removes an existing member from the cluster.", "operationId": "MemberRemove", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbMemberRemoveResponse" - } - } - }, "parameters": [ { "name": "body", @@ -523,23 +518,23 @@ } } ], - "tags": [ - "Cluster" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbMemberRemoveResponse" + } + } + } } }, "/v3alpha/cluster/member/update": { "post": { + "tags": [ + "Cluster" + ], "summary": "MemberUpdate updates the member configuration.", "operationId": "MemberUpdate", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbMemberUpdateResponse" - } - } - }, "parameters": [ { "name": "body", @@ -550,23 +545,23 @@ } } ], - "tags": [ - "Cluster" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbMemberUpdateResponse" + } + } + } } }, "/v3alpha/kv/compaction": { "post": { + "tags": [ + "KV" + ], "summary": "Compact compacts the event history in the etcd key-value store. The key-value\nstore should be periodically compacted or the event history will continue to grow\nindefinitely.", "operationId": "Compact", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbCompactionResponse" - } - } - }, "parameters": [ { "name": "body", @@ -577,23 +572,23 @@ } } ], - "tags": [ - "KV" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbCompactionResponse" + } + } + } } }, "/v3alpha/kv/deleterange": { "post": { + "tags": [ + "KV" + ], "summary": "DeleteRange deletes the given range from the key-value store.\nA delete request increments the revision of the key-value store\nand generates a delete event in the event history for every deleted key.", "operationId": "DeleteRange", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbDeleteRangeResponse" - } - } - }, "parameters": [ { "name": "body", @@ -604,23 +599,23 @@ } } ], - "tags": [ - "KV" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbDeleteRangeResponse" + } + } + } } }, "/v3alpha/kv/lease/revoke": { "post": { + "tags": [ + "Lease" + ], "summary": "LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.", "operationId": "LeaseRevoke", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbLeaseRevokeResponse" - } - } - }, "parameters": [ { "name": "body", @@ -631,23 +626,23 @@ } } ], - "tags": [ - "Lease" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbLeaseRevokeResponse" + } + } + } } }, "/v3alpha/kv/lease/timetolive": { "post": { + "tags": [ + "Lease" + ], "summary": "LeaseTimeToLive retrieves lease information.", "operationId": "LeaseTimeToLive", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbLeaseTimeToLiveResponse" - } - } - }, "parameters": [ { "name": "body", @@ -658,23 +653,23 @@ } } ], - "tags": [ - "Lease" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbLeaseTimeToLiveResponse" + } + } + } } }, "/v3alpha/kv/put": { "post": { + "tags": [ + "KV" + ], "summary": "Put puts the given key into the key-value store.\nA put request increments the revision of the key-value store\nand generates one event in the event history.", "operationId": "Put", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbPutResponse" - } - } - }, "parameters": [ { "name": "body", @@ -685,23 +680,23 @@ } } ], - "tags": [ - "KV" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbPutResponse" + } + } + } } }, "/v3alpha/kv/range": { "post": { + "tags": [ + "KV" + ], "summary": "Range gets the keys in the range from the key-value store.", "operationId": "Range", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbRangeResponse" - } - } - }, "parameters": [ { "name": "body", @@ -712,23 +707,23 @@ } } ], - "tags": [ - "KV" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbRangeResponse" + } + } + } } }, "/v3alpha/kv/txn": { "post": { + "tags": [ + "KV" + ], "summary": "Txn processes multiple requests in a single transaction.\nA txn request increments the revision of the key-value store\nand generates events with the same revision for every completed request.\nIt is not allowed to modify the same key several times within one txn.", "operationId": "Txn", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbTxnResponse" - } - } - }, "parameters": [ { "name": "body", @@ -739,23 +734,23 @@ } } ], - "tags": [ - "KV" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbTxnResponse" + } + } + } } }, "/v3alpha/lease/grant": { "post": { + "tags": [ + "Lease" + ], "summary": "LeaseGrant creates a lease which expires if the server does not receive a keepAlive\nwithin a given time to live period. All keys attached to the lease will be expired and\ndeleted if the lease expires. Each expired key generates a delete event in the event history.", "operationId": "LeaseGrant", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbLeaseGrantResponse" - } - } - }, "parameters": [ { "name": "body", @@ -766,27 +761,27 @@ } } ], - "tags": [ - "Lease" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbLeaseGrantResponse" + } + } + } } }, "/v3alpha/lease/keepalive": { "post": { + "tags": [ + "Lease" + ], "summary": "LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client\nto the server and streaming keep alive responses from the server to the client.", "operationId": "LeaseKeepAlive", - "responses": { - "200": { - "description": "(streaming responses)", - "schema": { - "$ref": "#/definitions/etcdserverpbLeaseKeepAliveResponse" - } - } - }, "parameters": [ { - "name": "body", "description": "(streaming inputs)", + "name": "body", "in": "body", "required": true, "schema": { @@ -794,23 +789,23 @@ } } ], - "tags": [ - "Lease" - ] + "responses": { + "200": { + "description": "(streaming responses)", + "schema": { + "$ref": "#/definitions/etcdserverpbLeaseKeepAliveResponse" + } + } + } } }, "/v3alpha/maintenance/alarm": { "post": { + "tags": [ + "Maintenance" + ], "summary": "Alarm activates, deactivates, and queries alarms regarding cluster health.", "operationId": "Alarm", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbAlarmResponse" - } - } - }, "parameters": [ { "name": "body", @@ -820,24 +815,24 @@ "$ref": "#/definitions/etcdserverpbAlarmRequest" } } - ], - "tags": [ - "Maintenance" - ] + ], + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbAlarmResponse" + } + } + } } }, "/v3alpha/maintenance/defragment": { "post": { + "tags": [ + "Maintenance" + ], "summary": "Defragment defragments a member's backend database to recover storage space.", "operationId": "Defragment", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbDefragmentResponse" - } - } - }, "parameters": [ { "name": "body", @@ -848,23 +843,23 @@ } } ], - "tags": [ - "Maintenance" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbDefragmentResponse" + } + } + } } }, "/v3alpha/maintenance/hash": { "post": { + "tags": [ + "Maintenance" + ], "summary": "Hash returns the hash of the local KV state for consistency checking purpose.\nThis is designed for testing; do not use this in production when there\nare ongoing transactions.", "operationId": "Hash", - "responses": { - "200": { - "description": "", - "schema": { - "$ref": "#/definitions/etcdserverpbHashResponse" - } - } - }, "parameters": [ { "name": "body", @@ -875,23 +870,23 @@ } } ], - "tags": [ - "Maintenance" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbHashResponse" + } + } + } } }, "/v3alpha/maintenance/snapshot": { "post": { + "tags": [ + "Maintenance" + ], "summary": "Snapshot sends a snapshot of the entire backend from a member over a stream to a client.", "operationId": "Snapshot", - "responses": { - "200": { - "description": "(streaming responses)", - "schema": { - "$ref": "#/definitions/etcdserverpbSnapshotResponse" - } - } - }, "parameters": [ { "name": "body", @@ -902,54 +897,81 @@ } } ], - "tags": [ - "Maintenance" - ] + "responses": { + "200": { + "description": "(streaming responses)", + "schema": { + "$ref": "#/definitions/etcdserverpbSnapshotResponse" + } + } + } } }, "/v3alpha/maintenance/status": { "post": { + "tags": [ + "Maintenance" + ], "summary": "Status gets the status of the member.", "operationId": "Status", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/etcdserverpbStatusRequest" + } + } + ], "responses": { "200": { - "description": "", + "description": "(empty)", "schema": { "$ref": "#/definitions/etcdserverpbStatusResponse" } } - }, + } + } + }, + "/v3alpha/maintenance/transfer-leadership": { + "post": { + "tags": [ + "Maintenance" + ], + "summary": "MoveLeader requests current leader node to transfer its leadership to transferee.", + "operationId": "MoveLeader", "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { - "$ref": "#/definitions/etcdserverpbStatusRequest" + "$ref": "#/definitions/etcdserverpbMoveLeaderRequest" } } ], - "tags": [ - "Maintenance" - ] + "responses": { + "200": { + "description": "(empty)", + "schema": { + "$ref": "#/definitions/etcdserverpbMoveLeaderResponse" + } + } + } } }, "/v3alpha/watch": { "post": { + "tags": [ + "Watch" + ], "summary": "Watch watches for events happening or that have happened. Both input and output\nare streams; the input stream is for creating and canceling watchers and the output\nstream sends events. One watch RPC can watch on multiple key ranges, streaming events\nfor several watches at once. The entire event history can be watched starting from the\nlast compaction revision.", "operationId": "Watch", - "responses": { - "200": { - "description": "(streaming responses)", - "schema": { - "$ref": "#/definitions/etcdserverpbWatchResponse" - } - } - }, "parameters": [ { - "name": "body", "description": "(streaming inputs)", + "name": "body", "in": "body", "required": true, "schema": { @@ -957,116 +979,121 @@ } } ], - "tags": [ - "Watch" - ] + "responses": { + "200": { + "description": "(streaming responses)", + "schema": { + "$ref": "#/definitions/etcdserverpbWatchResponse" + } + } + } } } }, "definitions": { "AlarmRequestAlarmAction": { "type": "string", + "default": "GET", "enum": [ "GET", "ACTIVATE", "DEACTIVATE" - ], - "default": "GET" + ] }, "CompareCompareResult": { "type": "string", + "default": "EQUAL", "enum": [ "EQUAL", "GREATER", "LESS", "NOT_EQUAL" - ], - "default": "EQUAL" + ] }, "CompareCompareTarget": { "type": "string", + "default": "VERSION", "enum": [ "VERSION", "CREATE", "MOD", "VALUE" - ], - "default": "VERSION" + ] }, "EventEventType": { "type": "string", + "default": "PUT", "enum": [ "PUT", "DELETE" - ], - "default": "PUT" + ] }, "RangeRequestSortOrder": { "type": "string", + "default": "NONE", "enum": [ "NONE", "ASCEND", "DESCEND" - ], - "default": "NONE" + ] }, "RangeRequestSortTarget": { "type": "string", + "default": "KEY", "enum": [ "KEY", "VERSION", "CREATE", "MOD", "VALUE" - ], - "default": "KEY" + ] }, "WatchCreateRequestFilterType": { + "description": " - NOPUT: filter out put event.\n - NODELETE: filter out delete event.", "type": "string", + "default": "NOPUT", "enum": [ "NOPUT", "NODELETE" - ], - "default": "NOPUT", - "description": " - NOPUT: filter out put event.\n - NODELETE: filter out delete event." + ] }, "authpbPermission": { "type": "object", + "title": "Permission is a single entity", "properties": { - "permType": { - "$ref": "#/definitions/authpbPermissionType" - }, "key": { "type": "string", "format": "byte" }, + "permType": { + "$ref": "#/definitions/authpbPermissionType" + }, "range_end": { "type": "string", "format": "byte" } - }, - "title": "Permission is a single entity" + } }, "authpbPermissionType": { "type": "string", + "default": "READ", "enum": [ "READ", "WRITE", "READWRITE" - ], - "default": "READ" + ] }, "etcdserverpbAlarmMember": { "type": "object", "properties": { + "alarm": { + "description": "alarm is the type of alarm which has been raised.", + "$ref": "#/definitions/etcdserverpbAlarmType" + }, "memberID": { + "description": "memberID is the ID of the member associated with the raised alarm.", "type": "string", - "format": "uint64", - "description": "memberID is the ID of the member associated with the raised alarm." - }, - "alarm": { - "$ref": "#/definitions/etcdserverpbAlarmType", - "description": "alarm is the type of alarm which has been raised." + "format": "uint64" } } }, @@ -1074,42 +1101,42 @@ "type": "object", "properties": { "action": { - "$ref": "#/definitions/AlarmRequestAlarmAction", - "description": "action is the kind of alarm request to issue. The action\nmay GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a\nraised alarm." + "description": "action is the kind of alarm request to issue. The action\nmay GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a\nraised alarm.", + "$ref": "#/definitions/AlarmRequestAlarmAction" + }, + "alarm": { + "description": "alarm is the type of alarm to consider for this request.", + "$ref": "#/definitions/etcdserverpbAlarmType" }, "memberID": { + "description": "memberID is the ID of the member associated with the alarm. If memberID is 0, the\nalarm request covers all members.", "type": "string", - "format": "uint64", - "description": "memberID is the ID of the member associated with the alarm. If memberID is 0, the\nalarm request covers all members." - }, - "alarm": { - "$ref": "#/definitions/etcdserverpbAlarmType", - "description": "alarm is the type of alarm to consider for this request." + "format": "uint64" } } }, "etcdserverpbAlarmResponse": { "type": "object", "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, "alarms": { + "description": "alarms is a list of alarms associated with the alarm request.", "type": "array", "items": { "$ref": "#/definitions/etcdserverpbAlarmMember" - }, - "description": "alarms is a list of alarms associated with the alarm request." + } + }, + "header": { + "$ref": "#/definitions/etcdserverpbResponseHeader" } } }, "etcdserverpbAlarmType": { "type": "string", + "default": "NONE", "enum": [ "NONE", "NOSPACE" - ], - "default": "NONE" + ] }, "etcdserverpbAuthDisableRequest": { "type": "object" @@ -1137,8 +1164,8 @@ "type": "object", "properties": { "name": { - "type": "string", - "description": "name is the name of the role to add to the authentication system." + "description": "name is the name of the role to add to the authentication system.", + "type": "string" } } }, @@ -1192,12 +1219,12 @@ "type": "object", "properties": { "name": { - "type": "string", - "description": "name is the name of the role which will be granted the permission." + "description": "name is the name of the role which will be granted the permission.", + "type": "string" }, "perm": { - "$ref": "#/definitions/authpbPermission", - "description": "perm is the permission to grant to the role." + "description": "perm is the permission to grant to the role.", + "$ref": "#/definitions/authpbPermission" } } }, @@ -1229,14 +1256,14 @@ "etcdserverpbAuthRoleRevokePermissionRequest": { "type": "object", "properties": { - "role": { - "type": "string" - }, "key": { "type": "string" }, "range_end": { "type": "string" + }, + "role": { + "type": "string" } } }, @@ -1271,12 +1298,12 @@ "type": "object", "properties": { "name": { - "type": "string", - "description": "name is the name of the user whose password is being changed." + "description": "name is the name of the user whose password is being changed.", + "type": "string" }, "password": { - "type": "string", - "description": "password is the new password for the user." + "description": "password is the new password for the user.", + "type": "string" } } }, @@ -1292,8 +1319,8 @@ "type": "object", "properties": { "name": { - "type": "string", - "description": "name is the name of the user to delete." + "description": "name is the name of the user to delete.", + "type": "string" } } }, @@ -1330,13 +1357,13 @@ "etcdserverpbAuthUserGrantRoleRequest": { "type": "object", "properties": { - "user": { - "type": "string", - "description": "user is the name of the user which should be granted a given role." - }, "role": { - "type": "string", - "description": "role is the name of the role to grant to the user." + "description": "role is the name of the role to grant to the user.", + "type": "string" + }, + "user": { + "description": "user is the name of the user which should be granted a given role.", + "type": "string" } } }, @@ -1408,20 +1435,20 @@ } }, "etcdserverpbCompactionRequest": { + "description": "CompactionRequest compacts the key-value store up to a given revision. All superseded keys\nwith a revision less than the compaction revision will be removed.", "type": "object", "properties": { - "revision": { - "type": "string", - "format": "int64", - "description": "revision is the key-value store revision for the compaction operation." - }, "physical": { + "description": "physical is set so the RPC will wait until the compaction is physically\napplied to the local database such that compacted entries are totally\nremoved from the backend database.", "type": "boolean", - "format": "boolean", - "description": "physical is set so the RPC will wait until the compaction is physically\napplied to the local database such that compacted entries are totally\nremoved from the backend database." + "format": "boolean" + }, + "revision": { + "description": "revision is the key-value store revision for the compaction operation.", + "type": "string", + "format": "int64" } - }, - "description": "CompactionRequest compacts the key-value store up to a given revision. All superseded keys\nwith a revision less than the compaction revision will be removed." + } }, "etcdserverpbCompactionResponse": { "type": "object", @@ -1434,38 +1461,43 @@ "etcdserverpbCompare": { "type": "object", "properties": { - "result": { - "$ref": "#/definitions/CompareCompareResult", - "description": "result is logical comparison operation for this comparison." - }, - "target": { - "$ref": "#/definitions/CompareCompareTarget", - "description": "target is the key-value field to inspect for the comparison." + "create_revision": { + "type": "string", + "format": "int64", + "title": "create_revision is the creation revision of the given key" }, "key": { + "description": "key is the subject key for the comparison operation.", "type": "string", - "format": "byte", - "description": "key is the subject key for the comparison operation." + "format": "byte" }, - "version": { + "mod_revision": { + "description": "mod_revision is the last modified revision of the given key.", "type": "string", - "format": "int64", - "title": "version is the version of the given key" + "format": "int64" }, - "create_revision": { + "range_end": { + "description": "range_end compares the given target to all keys in the range [key, range_end).\nSee RangeRequest for more details on key ranges.", "type": "string", - "format": "int64", - "title": "create_revision is the creation revision of the given key" + "format": "byte" }, - "mod_revision": { - "type": "string", - "format": "int64", - "description": "mod_revision is the last modified revision of the given key." + "result": { + "description": "result is logical comparison operation for this comparison.", + "$ref": "#/definitions/CompareCompareResult" + }, + "target": { + "description": "target is the key-value field to inspect for the comparison.", + "$ref": "#/definitions/CompareCompareTarget" }, "value": { + "description": "value is the value of the given key, in bytes.", + "type": "string", + "format": "byte" + }, + "version": { "type": "string", - "format": "byte", - "description": "value is the value of the given key, in bytes." + "format": "int64", + "title": "version is the version of the given key" } } }, @@ -1484,39 +1516,39 @@ "type": "object", "properties": { "key": { + "description": "key is the first key to delete in the range.", "type": "string", - "format": "byte", - "description": "key is the first key to delete in the range." - }, - "range_end": { - "type": "string", - "format": "byte", - "description": "range_end is the key following the last key to delete for the range [key, range_end).\nIf range_end is not given, the range is defined to contain only the key argument.\nIf range_end is one bit larger than the given key, then the range is all the keys\nwith the prefix (the given key).\nIf range_end is '\\0', the range is all keys greater than or equal to the key argument." + "format": "byte" }, "prev_kv": { + "description": "If prev_kv is set, etcd gets the previous key-value pairs before deleting it.\nThe previous key-value pairs will be returned in the delete response.", "type": "boolean", - "format": "boolean", - "description": "If prev_kv is set, etcd gets the previous key-value pairs before deleting it.\nThe previous key-value pairs will be returned in the delete response." + "format": "boolean" + }, + "range_end": { + "description": "range_end is the key following the last key to delete for the range [key, range_end).\nIf range_end is not given, the range is defined to contain only the key argument.\nIf range_end is one bit larger than the given key, then the range is all the keys\nwith the prefix (the given key).\nIf range_end is '\\0', the range is all keys greater than or equal to the key argument.", + "type": "string", + "format": "byte" } } }, "etcdserverpbDeleteRangeResponse": { "type": "object", "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, "deleted": { + "description": "deleted is the number of keys deleted by the delete range request.", "type": "string", - "format": "int64", - "description": "deleted is the number of keys deleted by the delete range request." + "format": "int64" + }, + "header": { + "$ref": "#/definitions/etcdserverpbResponseHeader" }, "prev_kvs": { + "description": "if prev_kv is set in the request, the previous key-value pairs will be returned.", "type": "array", "items": { "$ref": "#/definitions/mvccpbKeyValue" - }, - "description": "if prev_kv is set in the request, the previous key-value pairs will be returned." + } } } }, @@ -1526,49 +1558,49 @@ "etcdserverpbHashResponse": { "type": "object", "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, "hash": { + "description": "hash is the hash value computed from the responding member's key-value store.", "type": "integer", - "format": "int64", - "description": "hash is the hash value computed from the responding member's key-value store." + "format": "int64" + }, + "header": { + "$ref": "#/definitions/etcdserverpbResponseHeader" } } }, "etcdserverpbLeaseGrantRequest": { "type": "object", "properties": { - "TTL": { + "ID": { + "description": "ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.", "type": "string", - "format": "int64", - "description": "TTL is the advisory time-to-live in seconds." + "format": "int64" }, - "ID": { + "TTL": { + "description": "TTL is the advisory time-to-live in seconds.", "type": "string", - "format": "int64", - "description": "ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID." + "format": "int64" } } }, "etcdserverpbLeaseGrantResponse": { "type": "object", "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, "ID": { + "description": "ID is the lease ID for the granted lease.", "type": "string", - "format": "int64", - "description": "ID is the lease ID for the granted lease." + "format": "int64" }, "TTL": { + "description": "TTL is the server chosen lease time-to-live in seconds.", "type": "string", - "format": "int64", - "description": "TTL is the server chosen lease time-to-live in seconds." + "format": "int64" }, "error": { "type": "string" + }, + "header": { + "$ref": "#/definitions/etcdserverpbResponseHeader" } } }, @@ -1576,27 +1608,27 @@ "type": "object", "properties": { "ID": { + "description": "ID is the lease ID for the lease to keep alive.", "type": "string", - "format": "int64", - "description": "ID is the lease ID for the lease to keep alive." + "format": "int64" } } }, "etcdserverpbLeaseKeepAliveResponse": { "type": "object", "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, "ID": { + "description": "ID is the lease ID from the keep alive request.", "type": "string", - "format": "int64", - "description": "ID is the lease ID from the keep alive request." + "format": "int64" }, "TTL": { + "description": "TTL is the new time-to-live for the lease.", "type": "string", - "format": "int64", - "description": "TTL is the new time-to-live for the lease." + "format": "int64" + }, + "header": { + "$ref": "#/definitions/etcdserverpbResponseHeader" } } }, @@ -1604,9 +1636,9 @@ "type": "object", "properties": { "ID": { + "description": "ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.", "type": "string", - "format": "int64", - "description": "ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted." + "format": "int64" } } }, @@ -1622,45 +1654,45 @@ "type": "object", "properties": { "ID": { + "description": "ID is the lease ID for the lease.", "type": "string", - "format": "int64", - "description": "ID is the lease ID for the lease." + "format": "int64" }, "keys": { + "description": "keys is true to query all the keys attached to this lease.", "type": "boolean", - "format": "boolean", - "description": "keys is true to query all the keys attached to this lease." + "format": "boolean" } } }, "etcdserverpbLeaseTimeToLiveResponse": { - "type": "object", - "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, + "type": "object", + "properties": { "ID": { + "description": "ID is the lease ID from the keep alive request.", "type": "string", - "format": "int64", - "description": "ID is the lease ID from the keep alive request." + "format": "int64" }, "TTL": { + "description": "TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.", "type": "string", - "format": "int64", - "description": "TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds." + "format": "int64" }, "grantedTTL": { + "description": "GrantedTTL is the initial granted time in seconds upon lease creation/renewal.", "type": "string", - "format": "int64", - "description": "GrantedTTL is the initial granted time in seconds upon lease creation/renewal." + "format": "int64" + }, + "header": { + "$ref": "#/definitions/etcdserverpbResponseHeader" }, "keys": { + "description": "Keys is the list of keys attached to this lease.", "type": "array", "items": { "type": "string", "format": "byte" - }, - "description": "Keys is the list of keys attached to this lease." + } } } }, @@ -1668,27 +1700,27 @@ "type": "object", "properties": { "ID": { + "description": "ID is the member ID for this member.", "type": "string", - "format": "uint64", - "description": "ID is the member ID for this member." - }, - "name": { - "type": "string", - "description": "name is the human-readable name of the member. If the member is not started, the name will be an empty string." + "format": "uint64" }, - "peerURLs": { + "clientURLs": { + "description": "clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.", "type": "array", "items": { "type": "string" - }, - "description": "peerURLs is the list of URLs the member exposes to the cluster for communication." + } }, - "clientURLs": { + "name": { + "description": "name is the human-readable name of the member. If the member is not started, the name will be an empty string.", + "type": "string" + }, + "peerURLs": { + "description": "peerURLs is the list of URLs the member exposes to the cluster for communication.", "type": "array", "items": { "type": "string" - }, - "description": "clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty." + } } } }, @@ -1696,11 +1728,11 @@ "type": "object", "properties": { "peerURLs": { + "description": "peerURLs is the list of URLs the added member will use to communicate with the cluster.", "type": "array", "items": { "type": "string" - }, - "description": "peerURLs is the list of URLs the added member will use to communicate with the cluster." + } } } }, @@ -1711,15 +1743,15 @@ "$ref": "#/definitions/etcdserverpbResponseHeader" }, "member": { - "$ref": "#/definitions/etcdserverpbMember", - "description": "member is the member information for the added member." + "description": "member is the member information for the added member.", + "$ref": "#/definitions/etcdserverpbMember" }, "members": { + "description": "members is a list of all members after adding the new member.", "type": "array", "items": { "$ref": "#/definitions/etcdserverpbMember" - }, - "description": "members is a list of all members after adding the new member." + } } } }, @@ -1733,11 +1765,11 @@ "$ref": "#/definitions/etcdserverpbResponseHeader" }, "members": { + "description": "members is a list of all members associated with the cluster.", "type": "array", "items": { "$ref": "#/definitions/etcdserverpbMember" - }, - "description": "members is a list of all members associated with the cluster." + } } } }, @@ -1745,9 +1777,9 @@ "type": "object", "properties": { "ID": { + "description": "ID is the member ID of the member to remove.", "type": "string", - "format": "uint64", - "description": "ID is the member ID of the member to remove." + "format": "uint64" } } }, @@ -1758,11 +1790,11 @@ "$ref": "#/definitions/etcdserverpbResponseHeader" }, "members": { + "description": "members is a list of all members after removing the member.", "type": "array", "items": { "$ref": "#/definitions/etcdserverpbMember" - }, - "description": "members is a list of all members after removing the member." + } } } }, @@ -1770,16 +1802,16 @@ "type": "object", "properties": { "ID": { + "description": "ID is the member ID of the member to update.", "type": "string", - "format": "uint64", - "description": "ID is the member ID of the member to update." + "format": "uint64" }, "peerURLs": { + "description": "peerURLs is the new list of URLs the member will use to communicate with the cluster.", "type": "array", "items": { "type": "string" - }, - "description": "peerURLs is the new list of URLs the member will use to communicate with the cluster." + } } } }, @@ -1790,46 +1822,64 @@ "$ref": "#/definitions/etcdserverpbResponseHeader" }, "members": { + "description": "members is a list of all members after updating the member.", "type": "array", "items": { "$ref": "#/definitions/etcdserverpbMember" - }, - "description": "members is a list of all members after updating the member." + } } } }, - "etcdserverpbPutRequest": { + "etcdserverpbMoveLeaderRequest": { "type": "object", "properties": { - "key": { + "targetID": { + "description": "targetID is the node ID for the new leader.", "type": "string", - "format": "byte", - "description": "key is the key, in bytes, to put into the key-value store." + "format": "uint64" + } + } + }, + "etcdserverpbMoveLeaderResponse": { + "type": "object", + "properties": { + "header": { + "$ref": "#/definitions/etcdserverpbResponseHeader" + } + } + }, + "etcdserverpbPutRequest": { + "type": "object", + "properties": { + "ignore_lease": { + "description": "If ignore_lease is set, etcd updates the key using its current lease.\nReturns an error if the key does not exist.", + "type": "boolean", + "format": "boolean" }, - "value": { + "ignore_value": { + "description": "If ignore_value is set, etcd updates the key using its current value.\nReturns an error if the key does not exist.", + "type": "boolean", + "format": "boolean" + }, + "key": { + "description": "key is the key, in bytes, to put into the key-value store.", "type": "string", - "format": "byte", - "description": "value is the value, in bytes, to associate with the key in the key-value store." + "format": "byte" }, "lease": { + "description": "lease is the lease ID to associate with the key in the key-value store. A lease\nvalue of 0 indicates no lease.", "type": "string", - "format": "int64", - "description": "lease is the lease ID to associate with the key in the key-value store. A lease\nvalue of 0 indicates no lease." + "format": "int64" }, "prev_kv": { + "description": "If prev_kv is set, etcd gets the previous key-value pair before changing it.\nThe previous key-value pair will be returned in the put response.", "type": "boolean", - "format": "boolean", - "description": "If prev_kv is set, etcd gets the previous key-value pair before changing it.\nThe previous key-value pair will be returned in the put response." - }, - "ignore_value": { - "type": "boolean", - "format": "boolean", - "description": "If ignore_value is set, etcd updates the key using its current value.\nReturns an error if the key does not exist." + "format": "boolean" }, - "ignore_lease": { - "type": "boolean", - "format": "boolean", - "description": "If ignore_lease is set, etcd updates the key using its current lease.\nReturns an error if the key does not exist." + "value": { + "description": "value is the value, in bytes, to associate with the key in the key-value store.", + "type": "string", + "format": "byte" } } }, @@ -1840,115 +1890,118 @@ "$ref": "#/definitions/etcdserverpbResponseHeader" }, "prev_kv": { - "$ref": "#/definitions/mvccpbKeyValue", - "description": "if prev_kv is set in the request, the previous key-value pair will be returned." + "description": "if prev_kv is set in the request, the previous key-value pair will be returned.", + "$ref": "#/definitions/mvccpbKeyValue" } } }, "etcdserverpbRangeRequest": { "type": "object", "properties": { + "count_only": { + "description": "count_only when set returns only the count of the keys in the range.", + "type": "boolean", + "format": "boolean" + }, "key": { + "description": "key is the first key for the range. If range_end is not given, the request only looks up key.", "type": "string", - "format": "byte", - "description": "key is the first key for the range. If range_end is not given, the request only looks up key." + "format": "byte" }, - "range_end": { - "type": "string", - "format": "byte", - "description": "range_end is the upper bound on the requested range [key, range_end).\nIf range_end is '\\0', the range is all keys \u003e= key.\nIf range_end is key plus one (e.g., \"aa\"+1 == \"ab\", \"a\\xff\"+1 == \"b\"),\nthen the range request gets all keys prefixed with key.\nIf both key and range_end are '\\0', then the range request returns all keys." + "keys_only": { + "description": "keys_only when set returns only the keys and not the values.", + "type": "boolean", + "format": "boolean" }, "limit": { + "description": "limit is a limit on the number of keys returned for the request. When limit is set to 0,\nit is treated as no limit.", "type": "string", - "format": "int64", - "description": "limit is a limit on the number of keys returned for the request. When limit is set to 0,\nit is treated as no limit." + "format": "int64" }, - "revision": { + "max_create_revision": { + "description": "max_create_revision is the upper bound for returned key create revisions; all keys with\ngreater create revisions will be filtered away.", "type": "string", - "format": "int64", - "description": "revision is the point-in-time of the key-value store to use for the range.\nIf revision is less or equal to zero, the range is over the newest key-value store.\nIf the revision has been compacted, ErrCompacted is returned as a response." - }, - "sort_order": { - "$ref": "#/definitions/RangeRequestSortOrder", - "description": "sort_order is the order for returned sorted results." - }, - "sort_target": { - "$ref": "#/definitions/RangeRequestSortTarget", - "description": "sort_target is the key-value field to use for sorting." - }, - "serializable": { - "type": "boolean", - "format": "boolean", - "description": "serializable sets the range request to use serializable member-local reads.\nRange requests are linearizable by default; linearizable requests have higher\nlatency and lower throughput than serializable requests but reflect the current\nconsensus of the cluster. For better performance, in exchange for possible stale reads,\na serializable range request is served locally without needing to reach consensus\nwith other nodes in the cluster." + "format": "int64" }, - "keys_only": { - "type": "boolean", - "format": "boolean", - "description": "keys_only when set returns only the keys and not the values." + "max_mod_revision": { + "description": "max_mod_revision is the upper bound for returned key mod revisions; all keys with\ngreater mod revisions will be filtered away.", + "type": "string", + "format": "int64" }, - "count_only": { - "type": "boolean", - "format": "boolean", - "description": "count_only when set returns only the count of the keys in the range." + "min_create_revision": { + "description": "min_create_revision is the lower bound for returned key create revisions; all keys with\nlesser create trevisions will be filtered away.", + "type": "string", + "format": "int64" }, "min_mod_revision": { + "description": "min_mod_revision is the lower bound for returned key mod revisions; all keys with\nlesser mod revisions will be filtered away.", "type": "string", - "format": "int64", - "description": "min_mod_revision is the lower bound for returned key mod revisions; all keys with\nlesser mod revisions will be filtered away." + "format": "int64" }, - "max_mod_revision": { + "range_end": { + "description": "range_end is the upper bound on the requested range [key, range_end).\nIf range_end is '\\0', the range is all keys \u003e= key.\nIf range_end is key plus one (e.g., \"aa\"+1 == \"ab\", \"a\\xff\"+1 == \"b\"),\nthen the range request gets all keys prefixed with key.\nIf both key and range_end are '\\0', then the range request returns all keys.", "type": "string", - "format": "int64", - "description": "max_mod_revision is the upper bound for returned key mod revisions; all keys with\ngreater mod revisions will be filtered away." + "format": "byte" }, - "min_create_revision": { + "revision": { + "description": "revision is the point-in-time of the key-value store to use for the range.\nIf revision is less or equal to zero, the range is over the newest key-value store.\nIf the revision has been compacted, ErrCompacted is returned as a response.", "type": "string", - "format": "int64", - "description": "min_create_revision is the lower bound for returned key create revisions; all keys with\nlesser create trevisions will be filtered away." + "format": "int64" }, - "max_create_revision": { - "type": "string", - "format": "int64", - "description": "max_create_revision is the upper bound for returned key create revisions; all keys with\ngreater create revisions will be filtered away." + "serializable": { + "description": "serializable sets the range request to use serializable member-local reads.\nRange requests are linearizable by default; linearizable requests have higher\nlatency and lower throughput than serializable requests but reflect the current\nconsensus of the cluster. For better performance, in exchange for possible stale reads,\na serializable range request is served locally without needing to reach consensus\nwith other nodes in the cluster.", + "type": "boolean", + "format": "boolean" + }, + "sort_order": { + "description": "sort_order is the order for returned sorted results.", + "$ref": "#/definitions/RangeRequestSortOrder" + }, + "sort_target": { + "description": "sort_target is the key-value field to use for sorting.", + "$ref": "#/definitions/RangeRequestSortTarget" } } }, "etcdserverpbRangeResponse": { "type": "object", "properties": { + "count": { + "description": "count is set to the number of keys within the range when requested.", + "type": "string", + "format": "int64" + }, "header": { "$ref": "#/definitions/etcdserverpbResponseHeader" }, "kvs": { + "description": "kvs is the list of key-value pairs matched by the range request.\nkvs is empty when count is requested.", "type": "array", "items": { "$ref": "#/definitions/mvccpbKeyValue" - }, - "description": "kvs is the list of key-value pairs matched by the range request.\nkvs is empty when count is requested." + } }, "more": { + "description": "more indicates if there are more keys to return in the requested range.", "type": "boolean", - "format": "boolean", - "description": "more indicates if there are more keys to return in the requested range." - }, - "count": { - "type": "string", - "format": "int64", - "description": "count is set to the number of keys within the range when requested." + "format": "boolean" } } }, "etcdserverpbRequestOp": { "type": "object", "properties": { - "request_range": { - "$ref": "#/definitions/etcdserverpbRangeRequest" + "request_delete_range": { + "$ref": "#/definitions/etcdserverpbDeleteRangeRequest" }, "request_put": { "$ref": "#/definitions/etcdserverpbPutRequest" }, - "request_delete_range": { - "$ref": "#/definitions/etcdserverpbDeleteRangeRequest" + "request_range": { + "$ref": "#/definitions/etcdserverpbRangeRequest" + }, + "request_txn": { + "$ref": "#/definitions/etcdserverpbTxnRequest" } } }, @@ -1956,38 +2009,41 @@ "type": "object", "properties": { "cluster_id": { + "description": "cluster_id is the ID of the cluster which sent the response.", "type": "string", - "format": "uint64", - "description": "cluster_id is the ID of the cluster which sent the response." + "format": "uint64" }, "member_id": { + "description": "member_id is the ID of the member which sent the response.", "type": "string", - "format": "uint64", - "description": "member_id is the ID of the member which sent the response." + "format": "uint64" }, - "revision": { + "raft_term": { + "description": "raft_term is the raft term when the request was applied.", "type": "string", - "format": "int64", - "description": "revision is the key-value store revision when the request was applied." + "format": "uint64" }, - "raft_term": { + "revision": { + "description": "revision is the key-value store revision when the request was applied.", "type": "string", - "format": "uint64", - "description": "raft_term is the raft term when the request was applied." + "format": "int64" } } }, "etcdserverpbResponseOp": { "type": "object", "properties": { - "response_range": { - "$ref": "#/definitions/etcdserverpbRangeResponse" + "response_delete_range": { + "$ref": "#/definitions/etcdserverpbDeleteRangeResponse" }, "response_put": { "$ref": "#/definitions/etcdserverpbPutResponse" }, - "response_delete_range": { - "$ref": "#/definitions/etcdserverpbDeleteRangeResponse" + "response_range": { + "$ref": "#/definitions/etcdserverpbRangeResponse" + }, + "response_txn": { + "$ref": "#/definitions/etcdserverpbTxnResponse" } } }, @@ -1997,19 +2053,19 @@ "etcdserverpbSnapshotResponse": { "type": "object", "properties": { + "blob": { + "description": "blob contains the next chunk of the snapshot in the snapshot stream.", + "type": "string", + "format": "byte" + }, "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader", - "description": "header has the current key-value store information. The first header in the snapshot\nstream indicates the point in time of the snapshot." + "description": "header has the current key-value store information. The first header in the snapshot\nstream indicates the point in time of the snapshot.", + "$ref": "#/definitions/etcdserverpbResponseHeader" }, "remaining_bytes": { "type": "string", "format": "uint64", "title": "remaining_bytes is the number of blob bytes to be sent after this message" - }, - "blob": { - "type": "string", - "format": "byte", - "description": "blob contains the next chunk of the snapshot in the snapshot stream." } } }, @@ -2019,61 +2075,61 @@ "etcdserverpbStatusResponse": { "type": "object", "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "version": { - "type": "string", - "description": "version is the cluster protocol version used by the responding member." - }, "dbSize": { + "description": "dbSize is the size of the backend database, in bytes, of the responding member.", "type": "string", - "format": "int64", - "description": "dbSize is the size of the backend database, in bytes, of the responding member." + "format": "int64" + }, + "header": { + "$ref": "#/definitions/etcdserverpbResponseHeader" }, "leader": { + "description": "leader is the member ID which the responding member believes is the current leader.", "type": "string", - "format": "uint64", - "description": "leader is the member ID which the responding member believes is the current leader." + "format": "uint64" }, "raftIndex": { + "description": "raftIndex is the current raft index of the responding member.", "type": "string", - "format": "uint64", - "description": "raftIndex is the current raft index of the responding member." + "format": "uint64" }, "raftTerm": { + "description": "raftTerm is the current raft term of the responding member.", "type": "string", - "format": "uint64", - "description": "raftTerm is the current raft term of the responding member." + "format": "uint64" + }, + "version": { + "description": "version is the cluster protocol version used by the responding member.", + "type": "string" } } }, "etcdserverpbTxnRequest": { + "description": "From google paxosdb paper:\nOur implementation hinges around a powerful primitive which we call MultiOp. All other database\noperations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically\nand consists of three components:\n1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check\nfor the absence or presence of a value, or compare with a given value. Two different tests in the guard\nmay apply to the same or different entries in the database. All tests in the guard are applied and\nMultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise\nit executes f op (see item 3 below).\n2. A list of database operations called t op. Each operation in the list is either an insert, delete, or\nlookup operation, and applies to a single database entry. Two different operations in the list may apply\nto the same or different entries in the database. These operations are executed\nif guard evaluates to\ntrue.\n3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.", "type": "object", "properties": { "compare": { + "description": "compare is a list of predicates representing a conjunction of terms.\nIf the comparisons succeed, then the success requests will be processed in order,\nand the response will contain their respective responses in order.\nIf the comparisons fail, then the failure requests will be processed in order,\nand the response will contain their respective responses in order.", "type": "array", "items": { "$ref": "#/definitions/etcdserverpbCompare" - }, - "description": "compare is a list of predicates representing a conjunction of terms.\nIf the comparisons succeed, then the success requests will be processed in order,\nand the response will contain their respective responses in order.\nIf the comparisons fail, then the failure requests will be processed in order,\nand the response will contain their respective responses in order." + } }, - "success": { + "failure": { + "description": "failure is a list of requests which will be applied when compare evaluates to false.", "type": "array", "items": { "$ref": "#/definitions/etcdserverpbRequestOp" - }, - "description": "success is a list of requests which will be applied when compare evaluates to true." + } }, - "failure": { + "success": { + "description": "success is a list of requests which will be applied when compare evaluates to true.", "type": "array", "items": { "$ref": "#/definitions/etcdserverpbRequestOp" - }, - "description": "failure is a list of requests which will be applied when compare evaluates to false." + } } - }, - "description": "From google paxosdb paper:\nOur implementation hinges around a powerful primitive which we call MultiOp. All other database\noperations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically\nand consists of three components:\n1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check\nfor the absence or presence of a value, or compare with a given value. Two different tests in the guard\nmay apply to the same or different entries in the database. All tests in the guard are applied and\nMultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise\nit executes f op (see item 3 below).\n2. A list of database operations called t op. Each operation in the list is either an insert, delete, or\nlookup operation, and applies to a single database entry. Two different operations in the list may apply\nto the same or different entries in the database. These operations are executed\nif guard evaluates to\ntrue.\n3. A list of database operations called f op. Like t op, but executed if guard evaluates to false." + } }, "etcdserverpbTxnResponse": { "type": "object", @@ -2081,17 +2137,17 @@ "header": { "$ref": "#/definitions/etcdserverpbResponseHeader" }, - "succeeded": { - "type": "boolean", - "format": "boolean", - "description": "succeeded is set to true if the compare evaluated to true or false otherwise." - }, "responses": { + "description": "responses is a list of responses corresponding to the results from applying\nsuccess if succeeded is true or failure if succeeded is false.", "type": "array", "items": { "$ref": "#/definitions/etcdserverpbResponseOp" - }, - "description": "responses is a list of responses corresponding to the results from applying\nsuccess if succeeded is true or failure if succeeded is false." + } + }, + "succeeded": { + "description": "succeeded is set to true if the compare evaluated to true or false otherwise.", + "type": "boolean", + "format": "boolean" } } }, @@ -2099,145 +2155,161 @@ "type": "object", "properties": { "watch_id": { + "description": "watch_id is the watcher id to cancel so that no more events are transmitted.", "type": "string", - "format": "int64", - "description": "watch_id is the watcher id to cancel so that no more events are transmitted." + "format": "int64" } } }, "etcdserverpbWatchCreateRequest": { "type": "object", "properties": { - "key": { - "type": "string", - "format": "byte", - "description": "key is the key to register for watching." + "filters": { + "description": "filters filter the events at server side before it sends back to the watcher.", + "type": "array", + "items": { + "$ref": "#/definitions/WatchCreateRequestFilterType" + } }, - "range_end": { + "key": { + "description": "key is the key to register for watching.", "type": "string", - "format": "byte", - "description": "range_end is the end of the range [key, range_end) to watch. If range_end is not given,\nonly the key argument is watched. If range_end is equal to '\\0', all keys greater than\nor equal to the key argument are watched.\nIf the range_end is one bit larger than the given key,\nthen all keys with the prefix (the given key) will be watched." + "format": "byte" }, - "start_revision": { - "type": "string", - "format": "int64", - "description": "start_revision is an optional revision to watch from (inclusive). No start_revision is \"now\"." + "prev_kv": { + "description": "If prev_kv is set, created watcher gets the previous KV before the event happens.\nIf the previous KV is already compacted, nothing will be returned.", + "type": "boolean", + "format": "boolean" }, "progress_notify": { + "description": "progress_notify is set so that the etcd server will periodically send a WatchResponse with\nno events to the new watcher if there are no recent events. It is useful when clients\nwish to recover a disconnected watcher starting from a recent known revision.\nThe etcd server may decide how often it will send notifications based on current load.", "type": "boolean", - "format": "boolean", - "description": "progress_notify is set so that the etcd server will periodically send a WatchResponse with\nno events to the new watcher if there are no recent events. It is useful when clients\nwish to recover a disconnected watcher starting from a recent known revision.\nThe etcd server may decide how often it will send notifications based on current load." + "format": "boolean" }, - "filters": { - "type": "array", - "items": { - "$ref": "#/definitions/WatchCreateRequestFilterType" - }, - "description": "filters filter the events at server side before it sends back to the watcher." + "range_end": { + "description": "range_end is the end of the range [key, range_end) to watch. If range_end is not given,\nonly the key argument is watched. If range_end is equal to '\\0', all keys greater than\nor equal to the key argument are watched.\nIf the range_end is one bit larger than the given key,\nthen all keys with the prefix (the given key) will be watched.", + "type": "string", + "format": "byte" }, - "prev_kv": { - "type": "boolean", - "format": "boolean", - "description": "If prev_kv is set, created watcher gets the previous KV before the event happens.\nIf the previous KV is already compacted, nothing will be returned." + "start_revision": { + "description": "start_revision is an optional revision to watch from (inclusive). No start_revision is \"now\".", + "type": "string", + "format": "int64" } } }, "etcdserverpbWatchRequest": { "type": "object", "properties": { - "create_request": { - "$ref": "#/definitions/etcdserverpbWatchCreateRequest" - }, "cancel_request": { "$ref": "#/definitions/etcdserverpbWatchCancelRequest" + }, + "create_request": { + "$ref": "#/definitions/etcdserverpbWatchCreateRequest" } } }, "etcdserverpbWatchResponse": { "type": "object", "properties": { - "header": { - "$ref": "#/definitions/etcdserverpbResponseHeader" - }, - "watch_id": { - "type": "string", - "format": "int64", - "description": "watch_id is the ID of the watcher that corresponds to the response." - }, - "created": { - "type": "boolean", - "format": "boolean", - "description": "created is set to true if the response is for a create watch request.\nThe client should record the watch_id and expect to receive events for\nthe created watcher from the same stream.\nAll events sent to the created watcher will attach with the same watch_id." + "cancel_reason": { + "description": "cancel_reason indicates the reason for canceling the watcher.", + "type": "string" }, "canceled": { + "description": "canceled is set to true if the response is for a cancel watch request.\nNo further events will be sent to the canceled watcher.", "type": "boolean", - "format": "boolean", - "description": "canceled is set to true if the response is for a cancel watch request.\nNo further events will be sent to the canceled watcher." + "format": "boolean" }, "compact_revision": { + "description": "compact_revision is set to the minimum index if a watcher tries to watch\nat a compacted index.\n\nThis happens when creating a watcher at a compacted revision or the watcher cannot\ncatch up with the progress of the key-value store.\n\nThe client should treat the watcher as canceled and should not try to create any\nwatcher with the same start_revision again.", "type": "string", - "format": "int64", - "description": "compact_revision is set to the minimum index if a watcher tries to watch\nat a compacted index.\n\nThis happens when creating a watcher at a compacted revision or the watcher cannot\ncatch up with the progress of the key-value store. \n\nThe client should treat the watcher as canceled and should not try to create any\nwatcher with the same start_revision again." + "format": "int64" + }, + "created": { + "description": "created is set to true if the response is for a create watch request.\nThe client should record the watch_id and expect to receive events for\nthe created watcher from the same stream.\nAll events sent to the created watcher will attach with the same watch_id.", + "type": "boolean", + "format": "boolean" }, "events": { "type": "array", "items": { "$ref": "#/definitions/mvccpbEvent" } + }, + "header": { + "$ref": "#/definitions/etcdserverpbResponseHeader" + }, + "watch_id": { + "description": "watch_id is the ID of the watcher that corresponds to the response.", + "type": "string", + "format": "int64" } } }, "mvccpbEvent": { "type": "object", "properties": { - "type": { - "$ref": "#/definitions/EventEventType", - "description": "type is the kind of event. If type is a PUT, it indicates\nnew data has been stored to the key. If type is a DELETE,\nit indicates the key was deleted." - }, "kv": { - "$ref": "#/definitions/mvccpbKeyValue", - "description": "kv holds the KeyValue for the event.\nA PUT event contains current kv pair.\nA PUT event with kv.Version=1 indicates the creation of a key.\nA DELETE/EXPIRE event contains the deleted key with\nits modification revision set to the revision of deletion." + "description": "kv holds the KeyValue for the event.\nA PUT event contains current kv pair.\nA PUT event with kv.Version=1 indicates the creation of a key.\nA DELETE/EXPIRE event contains the deleted key with\nits modification revision set to the revision of deletion.", + "$ref": "#/definitions/mvccpbKeyValue" }, "prev_kv": { - "$ref": "#/definitions/mvccpbKeyValue", - "description": "prev_kv holds the key-value pair before the event happens." + "description": "prev_kv holds the key-value pair before the event happens.", + "$ref": "#/definitions/mvccpbKeyValue" + }, + "type": { + "description": "type is the kind of event. If type is a PUT, it indicates\nnew data has been stored to the key. If type is a DELETE,\nit indicates the key was deleted.", + "$ref": "#/definitions/EventEventType" } } }, "mvccpbKeyValue": { "type": "object", "properties": { - "key": { + "create_revision": { + "description": "create_revision is the revision of last creation on this key.", "type": "string", - "format": "byte", - "description": "key is the key in bytes. An empty key is not allowed." + "format": "int64" }, - "create_revision": { + "key": { + "description": "key is the key in bytes. An empty key is not allowed.", "type": "string", - "format": "int64", - "description": "create_revision is the revision of last creation on this key." + "format": "byte" }, - "mod_revision": { + "lease": { + "description": "lease is the ID of the lease that attached to key.\nWhen the attached lease expires, the key will be deleted.\nIf lease is 0, then no lease is attached to the key.", "type": "string", - "format": "int64", - "description": "mod_revision is the revision of last modification on this key." + "format": "int64" }, - "version": { + "mod_revision": { + "description": "mod_revision is the revision of last modification on this key.", "type": "string", - "format": "int64", - "description": "version is the version of the key. A deletion resets\nthe version to zero and any modification of the key\nincreases its version." + "format": "int64" }, "value": { + "description": "value is the value held by the key, in bytes.", "type": "string", - "format": "byte", - "description": "value is the value held by the key, in bytes." + "format": "byte" }, - "lease": { + "version": { + "description": "version is the version of the key. A deletion resets\nthe version to zero and any modification of the key\nincreases its version.", "type": "string", - "format": "int64", - "description": "lease is the ID of the lease that attached to key.\nWhen the attached lease expires, the key will be deleted.\nIf lease is 0, then no lease is attached to the key." + "format": "int64" } } } - } -} + }, + "securityDefinitions": { + "ApiKey": { + "type": "apiKey", + "name": "Authorization", + "in": "header" + } + }, + "security": [ + { + "ApiKey": [] + } + ] +} \ No newline at end of file diff --git a/github.com/coreos/etcd/Documentation/dev-guide/experimental_apis.md b/github.com/coreos/etcd/Documentation/dev-guide/experimental_apis.md index c792e13fcf..aefaeebceb 100644 --- a/github.com/coreos/etcd/Documentation/dev-guide/experimental_apis.md +++ b/github.com/coreos/etcd/Documentation/dev-guide/experimental_apis.md @@ -4,8 +4,4 @@ For the most part, the etcd project is stable, but we are still moving fast! We ## The current experimental API/features are: -- [gateway][gateway]: beta, to be stable in 3.2 release -- [gRPC proxy][grpc-proxy]: alpha, to be stable in 3.2 release - -[gateway]: ../op-guide/gateway.md -[grpc-proxy]: ../op-guide/grpc_proxy.md +(none currently) diff --git a/github.com/coreos/etcd/Documentation/dev-guide/interacting_v3.md b/github.com/coreos/etcd/Documentation/dev-guide/interacting_v3.md index dd79421313..aa58b216ba 100644 --- a/github.com/coreos/etcd/Documentation/dev-guide/interacting_v3.md +++ b/github.com/coreos/etcd/Documentation/dev-guide/interacting_v3.md @@ -215,7 +215,7 @@ $ etcdctl del foo foo9 Here is the command to delete key `zoo` with the deleted key value pair returned: ```bash -$ etcdctl del --prev-kv zoo +$ etcdctl del --prev-kv zoo 1 # one key is deleted zoo # deleted key val # the value of the deleted key @@ -224,7 +224,7 @@ val # the value of the deleted key Here is the command to delete keys having prefix as `zoo`: ```bash -$ etcdctl del --prefix zoo +$ etcdctl del --prefix zoo 2 # two keys are deleted ``` @@ -290,7 +290,7 @@ barz1 Here is the command to watch on multiple keys `foo` and `zoo`: ```bash -$ etcdctl watch -i +$ etcdctl watch -i $ watch foo $ watch zoo # in another terminal: etcdctl put foo bar @@ -430,9 +430,9 @@ Here is the command to keep the same lease alive: ```bash $ etcdctl lease keep-alive 32695410dcc0ca06 -lease 32695410dcc0ca06 keepalived with TTL(100) -lease 32695410dcc0ca06 keepalived with TTL(100) -lease 32695410dcc0ca06 keepalived with TTL(100) +lease 32695410dcc0ca06 keepalived with TTL(10) +lease 32695410dcc0ca06 keepalived with TTL(10) +lease 32695410dcc0ca06 keepalived with TTL(10) ... ``` @@ -472,4 +472,3 @@ lease 694d5765fc71500b granted with TTL(500s), remaining(132s), attached keys([z # if the lease has expired or does not exist it will give the below response: Error: etcdserver: requested lease not found ``` - diff --git a/github.com/coreos/etcd/Documentation/dev-guide/local_cluster.md b/github.com/coreos/etcd/Documentation/dev-guide/local_cluster.md index cbd1f070a2..92ed1bc220 100644 --- a/github.com/coreos/etcd/Documentation/dev-guide/local_cluster.md +++ b/github.com/coreos/etcd/Documentation/dev-guide/local_cluster.md @@ -1,90 +1,163 @@ -# Setup a local cluster +# Set up a local cluster -For testing and development deployments, the quickest and easiest way is to set up a local cluster. For a production deployment, refer to the [clustering][clustering] section. +For testing and development deployments, the quickest and easiest way is to configure a local cluster. For a production deployment, refer to the [clustering][clustering] section. ## Local standalone cluster -Deploying an etcd cluster as a standalone cluster is straightforward. Start it with just one command: +### Starting a cluster + +Run the following to deploy an etcd cluster as a standalone cluster: ``` $ ./etcd ... ``` -The started etcd member listens on `localhost:2379` for client requests. +If the `etcd` binary is not present in the current working directory, it might be located either at `$GOPATH/bin/etcd` or at `/usr/local/bin/etcd`. Run the command appropriately. -To interact with the started cluster by using etcdctl: +The running etcd member listens on `localhost:2379` for client requests. -``` -# use API version 3 -$ export ETCDCTL_API=3 +### Interacting with the cluster -$ ./etcdctl put foo bar -OK +Use `etcdctl` to interact with the running cluster: -$ ./etcdctl get foo -bar -``` +1. Configure the environment to have `ETCDCTL_API=3` so `etcdctl` uses the etcd API version 3 instead of defaulting to version 2. + + ``` + # use API version 3 + $ export ETCDCTL_API=3 + ``` + +2. Store an example key-value pair in the cluster: + + ``` + $ ./etcdctl put foo bar + OK + ``` + + If OK is printed, storing key-value pair is successful. + +3. Retrieve the value of `foo`: + + ``` + $ ./etcdctl get foo + bar + ``` + + If `bar` is returned, interaction with the etcd cluster is working as expected. ## Local multi-member cluster -A `Procfile` at the base of this git repo is provided to easily set up a local multi-member cluster. To start a multi-member cluster go to the root of an etcd source tree and run: +### Starting a cluster -``` -# install goreman program to control Profile-based applications. -$ go get github.com/mattn/goreman -$ goreman -f Procfile start -... -``` +A `Procfile` at the base of the etcd git repository is provided to easily configure a local multi-member cluster. To start a multi-member cluster, navigate to the root of the etcd source tree and perform the following: -The started members listen on `localhost:2379`, `localhost:22379`, and `localhost:32379` for client requests respectively. +1. Install `goreman` to control Procfile-based applications: -To interact with the started cluster by using etcdctl: + ``` + $ go get github.com/mattn/goreman + ``` -``` -# use API version 3 -$ export ETCDCTL_API=3 - -$ etcdctl --write-out=table --endpoints=localhost:2379 member list -+------------------+---------+--------+------------------------+------------------------+ -| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | -+------------------+---------+--------+------------------------+------------------------+ -| 8211f1d0f64f3269 | started | infra1 | http://127.0.0.1:2380 | http://127.0.0.1:2379 | -| 91bc3c398fb3c146 | started | infra2 | http://127.0.0.1:22380 | http://127.0.0.1:22379 | -| fd422379fda50e48 | started | infra3 | http://127.0.0.1:32380 | http://127.0.0.1:32379 | -+------------------+---------+--------+------------------------+------------------------+ - -$ etcdctl put foo bar -OK -``` +2. Start a cluster with `goreman` using etcd's stock Procfile: -To exercise etcd's fault tolerance, kill a member: + ``` + $ goreman -f Procfile start + ``` -``` -# kill etcd2 -$ goreman run stop etcd2 + The members start running. They listen on `localhost:2379`, `localhost:22379`, and `localhost:32379` respectively for client requests. -$ etcdctl put key hello -OK +### Interacting with the cluster -$ etcdctl get key -hello +Use `etcdctl` to interact with the running cluster: -# try to get key from the killed member -$ etcdctl --endpoints=localhost:22379 get key -2016/04/18 23:07:35 grpc: Conn.resetTransport failed to create client transport: connection error: desc = "transport: dial tcp 127.0.0.1:22379: getsockopt: connection refused"; Reconnecting to "localhost:22379" -Error: grpc: timed out trying to connect +1. Configure the environment to have `ETCDCTL_API=3` so `etcdctl` uses the etcd API version 3 instead of defaulting to version 2. -# restart the killed member -$ goreman run restart etcd2 + ``` + # use API version 3 + $ export ETCDCTL_API=3 + ``` -# get the key from restarted member -$ etcdctl --endpoints=localhost:22379 get key -hello -``` +2. Print the list of members: + + ``` + $ etcdctl --write-out=table --endpoints=localhost:2379 member list + ``` + The list of etcd members are displayed as follows: + + ``` + +------------------+---------+--------+------------------------+------------------------+ + | ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | + +------------------+---------+--------+------------------------+------------------------+ + | 8211f1d0f64f3269 | started | infra1 | http://127.0.0.1:2380 | http://127.0.0.1:2379 | + | 91bc3c398fb3c146 | started | infra2 | http://127.0.0.1:22380 | http://127.0.0.1:22379 | + | fd422379fda50e48 | started | infra3 | http://127.0.0.1:32380 | http://127.0.0.1:32379 | + +------------------+---------+--------+------------------------+------------------------+ + ``` + +3. Store an example key-value pair in the cluster: -To learn more about interacting with etcd, read [interacting with etcd section][interacting]. + ``` + $ etcdctl put foo bar + OK + ``` + + If OK is printed, storing key-value pair is successful. + +### Testing fault tolerance + +To exercise etcd's fault tolerance, kill a member and attempt to retrieve the key. + +1. Identify the process name of the member to be stopped. + + The `Procfile` lists the properties of the multi-member cluster. For example, consider the member with the process name, `etcd2`. + +2. Stop the member: + + ``` + # kill etcd2 + $ goreman run stop etcd2 + ``` + +3. Store a key: + + ``` + $ etcdctl put key hello + OK + ``` + +4. Retrieve the key that is stored in the previous step: + + ``` + $ etcdctl get key + hello + ``` + +5. Retrieve a key from the stopped member: + + ``` + $ etcdctl --endpoints=localhost:22379 get key + ``` + + The command should display an error caused by connection failure: + + ``` + 2017/06/18 23:07:35 grpc: Conn.resetTransport failed to create client transport: connection error: desc = "transport: dial tcp 127.0.0.1:22379: getsockopt: connection refused"; Reconnecting to "localhost:22379" + Error: grpc: timed out trying to connect + ``` +6. Restart the stopped member: + + ``` + $ goreman run restart etcd2 + ``` + +7. Get the key from the restarted member: + + ``` + $ etcdctl --endpoints=localhost:22379 get key + hello + ``` + + Restarting the member re-establish the connection. `etcdctl` will now be able to retrieve the key successfully. To learn more about interacting with etcd, read [interacting with etcd section][interacting]. [interacting]: ./interacting_v3.md [clustering]: ../op-guide/clustering.md - diff --git a/github.com/coreos/etcd/Documentation/dev-internal/logging.md b/github.com/coreos/etcd/Documentation/dev-internal/logging.md index 2eca00dd5f..aa97dc2b5a 100644 --- a/github.com/coreos/etcd/Documentation/dev-internal/logging.md +++ b/github.com/coreos/etcd/Documentation/dev-internal/logging.md @@ -3,7 +3,7 @@ etcd uses the [capnslog][capnslog] library for logging application output categorized into *levels*. A log message's level is determined according to these conventions: * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost - * Examples: + * Examples: * A failure to allocate disk space for WAL * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning. @@ -26,4 +26,4 @@ etcd uses the [capnslog][capnslog] library for logging application output catego * Send a normal message to a remote peer * Write a log entry to disk -[capnslog]: [https://github.com/coreos/pkg/tree/master/capnslog] +[capnslog]: https://github.com/coreos/pkg/tree/master/capnslog diff --git a/github.com/coreos/etcd/Documentation/dev-internal/release.md b/github.com/coreos/etcd/Documentation/dev-internal/release.md index 19153a0c38..890c32dafa 100644 --- a/github.com/coreos/etcd/Documentation/dev-internal/release.md +++ b/github.com/coreos/etcd/Documentation/dev-internal/release.md @@ -66,8 +66,8 @@ The following commands are used for public release sign: ``` cd release -for i in etcd-*{.zip,.tar.gz}; do gpg2 --default-key $SUBKEYID --armor --output ${i}.asc --detach-sign ${i}; done -for i in etcd-*{.zip,.tar.gz}; do gpg2 --verify ${i}.asc ${i}; done +for i in etcd-*{.zip,.tar.gz,.aci}; do gpg2 --default-key $SUBKEYID --armor --output ${i}.asc --detach-sign ${i}; done +for i in etcd-*{.zip,.tar.gz,.aci}; do gpg2 --verify ${i}.asc ${i}; done # sign zipped source code files wget https://github.com/coreos/etcd/archive/${VERSION}.zip diff --git a/github.com/coreos/etcd/Documentation/dl_build.md b/github.com/coreos/etcd/Documentation/dl_build.md index 500f0d3d6b..af6ddf25e1 100644 --- a/github.com/coreos/etcd/Documentation/dl_build.md +++ b/github.com/coreos/etcd/Documentation/dl_build.md @@ -2,7 +2,7 @@ ## System requirements -The etcd performance benchmarks run etcd on 8 vCPU, 16GB RAM, 50GB SSD GCE instances, but any relatively modern machine with low latency storage and a few gigabytes of memory should suffice for most use cases. Applications with large v2 data stores will require more memory than a large v3 data store since data is kept in anonymous memory instead of memory mapped from a file. than For running etcd on a cloud provider, we suggest at least a medium instance on AWS or a standard-1 instance on GCE. +The etcd performance benchmarks run etcd on 8 vCPU, 16GB RAM, 50GB SSD GCE instances, but any relatively modern machine with low latency storage and a few gigabytes of memory should suffice for most use cases. Applications with large v2 data stores will require more memory than a large v3 data store since data is kept in anonymous memory instead of memory mapped from a file. For running etcd on a cloud provider, see the [Example hardware configuration][example-hardware-configurations] documentation. ## Download the pre-built binary @@ -10,16 +10,7 @@ The easiest way to get etcd is to use one of the pre-built release binaries whic ## Build the latest version -For those wanting to try the very latest version, build etcd from the `master` branch. [Go](https://golang.org/) version 1.7+ is required to build the latest version of etcd. To ensure etcd is built against well-tested libraries, etcd vendors its dependencies for official release binaries. However, etcd's vendoring is also optional to avoid potential import conflicts when embedding the etcd server or using the etcd client. - -First, confirm go 1.7+ is installed: - -```sh -# go is required -$ go version -go version go1.7.3 darwin/amd64 - -``` +For those wanting to try the very latest version, build etcd from the `master` branch. [Go](https://golang.org/) version 1.8+ is required to build the latest version of etcd. To ensure etcd is built against well-tested libraries, etcd vendors its dependencies for official release binaries. However, etcd's vendoring is also optional to avoid potential import conflicts when embedding the etcd server or using the etcd client. To build `etcd` from the `master` branch without a `GOPATH` using the official `build` script: @@ -27,7 +18,6 @@ To build `etcd` from the `master` branch without a `GOPATH` using the official ` $ git clone https://github.com/coreos/etcd.git $ cd etcd $ ./build -$ ./bin/etcd ``` To build a vendored `etcd` from the `master` branch via `go get`: @@ -37,7 +27,6 @@ To build a vendored `etcd` from the `master` branch via `go get`: $ echo $GOPATH /Users/example/go $ go get github.com/coreos/etcd/cmd/etcd -$ $GOPATH/bin/etcd ``` To build `etcd` from the `master` branch without vendoring (may not build due to upstream conflicts): @@ -47,20 +36,28 @@ To build `etcd` from the `master` branch without vendoring (may not build due to $ echo $GOPATH /Users/example/go $ go get github.com/coreos/etcd -$ $GOPATH/bin/etcd ``` ## Test the installation Check the etcd binary is built correctly by starting etcd and setting a key. -Start etcd: +### Starting etcd + +If etcd is built without using GOPATH, run the following: ``` $ ./bin/etcd ``` +If etcd is built using GOPATH, run the following: -Set a key: +``` +$ $GOPATH/bin/etcd +``` + +### Setting a key + +Run the following: ``` $ ETCDCTL_API=3 ./bin/etcdctl put foo bar @@ -73,4 +70,4 @@ If OK is printed, then etcd is working! [go]: https://golang.org/doc/install [build-script]: ../build [cmd-directory]: ../cmd - +[example-hardware-configurations]: op-guide/hardware.md#example-hardware-configurations diff --git a/github.com/coreos/etcd/Documentation/docs.md b/github.com/coreos/etcd/Documentation/docs.md index f632781d8a..29550f3cff 100644 --- a/github.com/coreos/etcd/Documentation/docs.md +++ b/github.com/coreos/etcd/Documentation/docs.md @@ -22,35 +22,50 @@ The easiest way to get started using etcd as a distributed key-value store is to ## Operating etcd clusters -Administrators who need to create reliable and scalable key-value stores for the developers they support should begin with a [cluster on multiple machines][clustering]. +Administrators who need a fault-tolerant etcd cluster for either development or production should begin with a [cluster on multiple machines][clustering]. - - [Setting up etcd clusters][clustering] - - [Setting up etcd gateways][gateway] - - [Setting up etcd gRPC proxy][grpc_proxy] +### Setting up etcd + + - [Configuration flags][conf] + - [Multi-member cluster][clustering] + - [gRPC proxy][grpc_proxy] + - [L4 gateway][gateway] + +### System configuration + + - [Supported systems][supported_platforms] - [Hardware recommendations][hardware] - - [Configuration][conf] - - [Security][security] - - [Authentication][authentication] - - [Monitoring][monitoring] - - [Maintenance][maintenance] - - [Understand failures][failures] - - [Disaster recovery][recovery] - - [Performance][performance] - - [Versioning][versioning] + - [Performance benchmarking][performance] + - [Tuning][tuning] ### Platform guides - - [Supported systems][supported_platforms] - - [Docker container][container_docker] - - [rkt container][container_rkt] - [Amazon Web Services][aws_platform] + - [Container Linux, systemd][container_linux_platform] - [FreeBSD][freebsd_platform] + - [Docker container][container_docker] + - [rkt container][container_rkt] + +### Security + + - [TLS][security] + - [Role-based access control][authentication] + +### Maintenance and troubleshooting + + - [Frequently asked questions][faq] + - [Monitoring][monitoring] + - [Maintenance][maintenance] + - [Failure modes][failures] + - [Disaster recovery][recovery] ### Upgrading and compatibility + - [Version numbers][versioning] - [Migrate applications from using API v2 to API v3][v2_migration] - [Upgrading a v2.3 cluster to v3.0][v3_upgrade] - [Upgrading a v3.0 cluster to v3.1][v31_upgrade] + - [Upgrading a v3.1 cluster to v3.2][v32_upgrade] ## Learning @@ -63,17 +78,13 @@ To learn more about the concepts and internals behind etcd, read the following p - Internals - [Auth subsystem][auth_design] -## Frequently Asked Questions (FAQ) - -Answers to [common questions] about etcd. - [api_ref]: dev-guide/api_reference_v3.md [api_concurrency_ref]: dev-guide/api_concurrency_reference_v3.md [api_grpc_gateway]: dev-guide/api_grpc_gateway.md [clustering]: op-guide/clustering.md [conf]: op-guide/configuration.md [system-limit]: dev-guide/limit.md -[common questions]: faq.md +[faq]: faq.md [why]: learning/why.md [data_model]: learning/data_model.md [demo]: demo.md @@ -100,10 +111,13 @@ Answers to [common questions] about etcd. [understand_apis]: learning/api.md [versioning]: op-guide/versioning.md [supported_platforms]: op-guide/supported-platform.md +[container_linux_platform]: platforms/container-linux-systemd.md [freebsd_platform]: platforms/freebsd.md [aws_platform]: platforms/aws.md [experimental]: dev-guide/experimental_apis.md [v3_upgrade]: upgrades/upgrade_3_0.md [v31_upgrade]: upgrades/upgrade_3_1.md +[v32_upgrade]: upgrades/upgrade_3_2.md [authentication]: op-guide/authentication.md [auth_design]: learning/auth_design.md +[tuning]: tuning.md diff --git a/github.com/coreos/etcd/Documentation/faq.md b/github.com/coreos/etcd/Documentation/faq.md index fe7bea60ad..ef2ce2d2d1 100644 --- a/github.com/coreos/etcd/Documentation/faq.md +++ b/github.com/coreos/etcd/Documentation/faq.md @@ -1,36 +1,40 @@ -## Frequently Asked Questions (FAQ) +# Frequently Asked Questions (FAQ) -### etcd, general +## etcd, general -#### Do clients have to send requests to the etcd leader? +### Do clients have to send requests to the etcd leader? [Raft][raft] is leader-based; the leader handles all client requests which need cluster consensus. However, the client does not need to know which node is the leader. Any request that requires consensus sent to a follower is automatically forwarded to the leader. Requests that do not require consensus (e.g., serialized reads) can be processed by any cluster member. -### Configuration +## Configuration -#### What is the difference between advertise-urls and listen-urls? +### What is the difference between listen--urls, advertise-client-urls or initial-advertise-peer-urls? -`listen-urls` specifies the local addresses etcd server binds to for accepting incoming connections. To listen on a port for all interfaces, specify `0.0.0.0` as the listen IP address. +`listen-client-urls` and `listen-peer-urls` specify the local addresses etcd server binds to for accepting incoming connections. To listen on a port for all interfaces, specify `0.0.0.0` as the listen IP address. -`advertise-urls` specifies the addresses etcd clients or other etcd members should use to contact the etcd server. The advertise addresses must be reachable from the remote machines. Do not advertise addresses like `localhost` or `0.0.0.0` for a production setup since these addresses are unreachable from remote machines. +`advertise-client-urls` and `initial-advertise-peer-urls` specify the addresses etcd clients or other etcd members should use to contact the etcd server. The advertise addresses must be reachable from the remote machines. Do not advertise addresses like `localhost` or `0.0.0.0` for a production setup since these addresses are unreachable from remote machines. -### Deployment +### Why doesn't changing `--listen-peer-urls` or `--initial-advertise-peer-urls` update the advertised peer URLs in `etcdctl member list`? -#### System requirements +A member's advertised peer URLs come from `--initial-advertise-peer-urls` on initial cluster boot. Changing the listen peer URLs or the initial advertise peers after booting the member won't affect the exported advertise peer URLs since changes must go through quorum to avoid membership configuration split brain. Use `etcdctl member update` to update a member's peer URLs. + +## Deployment + +### System requirements Since etcd writes data to disk, SSD is highly recommended. To prevent performance degradation or unintentionally overloading the key-value store, etcd enforces a 2GB default storage size quota, configurable up to 8GB. To avoid swapping or running out of memory, the machine should have at least as much RAM to cover the quota. At CoreOS, an etcd cluster is usually deployed on dedicated CoreOS Container Linux machines with dual-core processors, 2GB of RAM, and 80GB of SSD *at the very least*. **Note that performance is intrinsically workload dependent; please test before production deployment**. See [hardware][hardware-setup] for more recommendations. Most stable production environment is Linux operating system with amd64 architecture; see [supported platform][supported-platform] for more. -#### Why an odd number of cluster members? +### Why an odd number of cluster members? An etcd cluster needs a majority of nodes, a quorum, to agree on updates to the cluster state. For a cluster with n members, quorum is (n/2)+1. For any odd-sized cluster, adding one node will always increase the number of nodes necessary for quorum. Although adding a node to an odd-sized cluster appears better since there are more machines, the fault tolerance is worse since exactly the same number of nodes may fail without losing quorum but there are more nodes that can fail. If the cluster is in a state where it can't tolerate any more failures, adding a node before removing nodes is dangerous because if the new node fails to register with the cluster (e.g., the address is misconfigured), quorum will be permanently lost. -#### What is maximum cluster size? +### What is maximum cluster size? Theoretically, there is no hard limit. However, an etcd cluster probably should have no more than seven nodes. [Google Chubby lock service][chubby], similar to etcd and widely deployed within Google for many years, suggests running five nodes. A 5-member etcd cluster can tolerate two member failures, which is enough in most cases. Although larger clusters provide better fault tolerance, the write performance suffers because data must be replicated across more machines. -#### What is failure tolerance? +### What is failure tolerance? An etcd cluster operates so long as a member quorum can be established. If quorum is lost through transient network failures (e.g., partitions), etcd automatically and safely resumes once the network recovers and restores quorum; Raft enforces cluster consistency. For power loss, etcd persists the Raft log to disk; etcd replays the log to the point of failure and resumes cluster participation. For permanent hardware failure, the node may be removed from the cluster through [runtime reconfiguration][runtime reconfiguration]. @@ -50,19 +54,19 @@ It is recommended to have an odd number of members in a cluster. An odd-size clu Adding a member to bring the size of cluster up to an even number doesn't buy additional fault tolerance. Likewise, during a network partition, an odd number of members guarantees that there will always be a majority partition that can continue to operate and be the source of truth when the partition ends. -#### Does etcd work in cross-region or cross data center deployments? +### Does etcd work in cross-region or cross data center deployments? Deploying etcd across regions improves etcd's fault tolerance since members are in separate failure domains. The cost is higher consensus request latency from crossing data center boundaries. Since etcd relies on a member quorum for consensus, the latency from crossing data centers will be somewhat pronounced because at least a majority of cluster members must respond to consensus requests. Additionally, cluster data must be replicated across all peers, so there will be bandwidth cost as well. With longer latencies, the default etcd configuration may cause frequent elections or heartbeat timeouts. See [tuning] for adjusting timeouts for high latency deployments. -### Operation +## Operation -#### How to backup a etcd cluster? +### How to backup a etcd cluster? etcdctl provides a `snapshot` command to create backups. See [backup][backup] for more details. -#### Should I add a member before removing an unhealthy member? +### Should I add a member before removing an unhealthy member? When replacing an etcd node, it's important to remove the member first and then add its replacement. @@ -74,7 +78,7 @@ Additionally, that new member is risky because it may turn out to be misconfigur On the other hand, if the downed member is removed from cluster membership first, the number of members becomes 2 and the quorum remains at 2. Following that removal by adding a new member will also keep the quorum steady at 2. So, even if the new node can't be brought up, it's still possible to remove the new member through quorum on the remaining live members. -#### Why won't etcd accept my membership changes? +### Why won't etcd accept my membership changes? etcd sets `strict-reconfig-check` in order to reject reconfiguration requests that would cause quorum loss. Abandoning quorum is really risky (especially when the cluster is already unhealthy). Although it may be tempting to disable quorum checking if there's quorum loss to add a new member, this could lead to full fledged cluster inconsistency. For many applications, this will make the problem even worse ("disk geometry corruption" being a candidate for most terrifying). @@ -82,16 +86,32 @@ etcd sets `strict-reconfig-check` in order to reject reconfiguration requests th This is intentional; disk latency is part of leader liveness. Suppose the cluster leader takes a minute to fsync a raft log update to disk, but the etcd cluster has a one second election timeout. Even though the leader can process network messages within the election interval (e.g., send heartbeats), it's effectively unavailable because it can't commit any new proposals; it's waiting on the slow disk. If the cluster frequently loses its leader due to disk latencies, try [tuning][tuning] the disk settings or etcd time parameters. -### Performance +### What does the etcd warning "request ignored (cluster ID mismatch)" mean? + +Every new etcd cluster generates a new cluster ID based on the initial cluster configuration and a user-provided unique `initial-cluster-token` value. By having unique cluster ID's, etcd is protected from cross-cluster interaction which could corrupt the cluster. + +Usually this warning happens after tearing down an old cluster, then reusing some of the peer addresses for the new cluster. If any etcd process from the old cluster is still running it will try to contact the new cluster. The new cluster will recognize a cluster ID mismatch, then ignore the request and emit this warning. This warning is often cleared by ensuring peer addresses among distinct clusters are disjoint. + +### What does "mvcc: database space exceeded" mean and how do I fix it? -#### How should I benchmark etcd? +The [multi-version concurrency control][api-mvcc] data model in etcd keeps an exact history of the keyspace. Without periodically compacting this history (e.g., by setting `--auto-compaction`), etcd will eventually exhaust its storage space. If etcd runs low on storage space, it raises a space quota alarm to protect the cluster from further writes. So long as the alarm is raised, etcd responds to write requests with the error `mvcc: database space exceeded`. + +To recover from the low space quota alarm: + +1. [Compact][maintenance-compact] etcd's history. +2. [Defragment][maintenance-defragment] every etcd endpoint. +3. [Disarm][maintenance-disarm] the alarm. + +## Performance + +### How should I benchmark etcd? Try the [benchmark] tool. Current [benchmark results][benchmark-result] are available for comparison. -#### What does the etcd warning "apply entries took too long" mean? +### What does the etcd warning "apply entries took too long" mean? After a majority of etcd members agree to commit a request, each etcd server applies the request to its data store and persists the result to disk. Even with a slow mechanical disk or a virtualized network disk, such as Amazon’s EBS or Google’s PD, applying a request should normally take fewer than 50 milliseconds. If the average apply duration exceeds 100 milliseconds, etcd will warn that entries are taking too long to apply. - + Usually this issue is caused by a slow disk. The disk could be experiencing contention among etcd and other applications, or the disk is too simply slow (e.g., a shared virtualized disk). To rule out a slow disk from causing this warning, monitor [backend_commit_duration_seconds][backend_commit_metrics] (p99 duration should be less than 25ms) to confirm the disk is reasonably fast. If the disk is too slow, assigning a dedicated disk to etcd or using faster disk will typically solve the problem. The second most common cause is CPU starvation. If monitoring of the machine’s CPU usage shows heavy utilization, there may not be enough compute capacity for etcd. Moving etcd to dedicated machine, increasing process resource isolation cgroups, or renicing the etcd server process into a higher priority can usually solve the problem. @@ -100,7 +120,7 @@ Expensive user requests which access too many keys (e.g., fetching the entire ke If none of the above suggestions clear the warnings, please [open an issue][new_issue] with detailed logging, monitoring, metrics and optionally workload information. -#### What does the etcd warning "failed to send out heartbeat on time" mean? +### What does the etcd warning "failed to send out heartbeat on time" mean? etcd uses a leader-based consensus protocol for consistent data replication and log execution. Cluster members elect a single leader, all other members become followers. The elected leader must periodically send heartbeats to its followers to maintain its leadership. Followers infer leader failure if no heartbeats are received within an election interval and trigger an election. If a leader doesn’t send its heartbeats in time but is still running, the election is spurious and likely caused by insufficient resources. To catch these soft failures, if the leader skips two heartbeat intervals, etcd will warn it failed to send a heartbeat on time. @@ -112,11 +132,10 @@ A slow network can also cause this issue. If network metrics among the etcd mach If none of the above suggestions clear the warnings, please [open an issue][new_issue] with detailed logging, monitoring, metrics and optionally workload information. -#### What does the etcd warning "request ignored (cluster ID mismatch)" mean? +### What does the etcd warning "snapshotting is taking more than x seconds to finish ..." mean? -Every new etcd cluster generates a new cluster ID based on the initial cluster configuration and a user-provided unique `initial-cluster-token` value. By having unique cluster ID's, etcd is protected from cross-cluster interaction which could corrupt the cluster. +etcd sends a snapshot of its complete key-value store to refresh slow followers and for [backups][backup]. Slow snapshot transfer times increase MTTR; if the cluster is ingesting data with high throughput, slow followers may livelock by needing a new snapshot before finishing receiving a snapshot. To catch slow snapshot performance, etcd warns when sending a snapshot takes more than thirty seconds and exceeds the expected transfer time for a 1Gbps connection. -Usually this warning happens after tearing down an old cluster, then reusing some of the peer addresses for the new cluster. If any etcd process from the old cluster is still running it will try to contact the new cluster. The new cluster will recognize a cluster ID mismatch, then ignore the request and emit this warning. This warning is often cleared by ensuring peer addresses among distinct clusters are disjoint. [hardware-setup]: ./op-guide/hardware.md [supported-platform]: ./op-guide/supported-platform.md @@ -130,3 +149,7 @@ Usually this warning happens after tearing down an old cluster, then reusing som [runtime reconfiguration]: https://github.com/coreos/etcd/blob/master/Documentation/op-guide/runtime-configuration.md [benchmark]: https://github.com/coreos/etcd/tree/master/tools/benchmark [benchmark-result]: https://github.com/coreos/etcd/blob/master/Documentation/op-guide/performance.md +[api-mvcc]: learning/api.md#revisions +[maintenance-compact]: op-guide/maintenance.md#history-compaction +[maintenance-defragment]: op-guide/maintenance.md#defragmentation +[maintenance-disarm]: ../etcdctl/README.md#alarm-disarm diff --git a/github.com/coreos/etcd/Documentation/integrations.md b/github.com/coreos/etcd/Documentation/integrations.md index 7c9bb210f1..2466e96ee4 100644 --- a/github.com/coreos/etcd/Documentation/integrations.md +++ b/github.com/coreos/etcd/Documentation/integrations.md @@ -40,7 +40,7 @@ **Python libraries** -- [kragniz/python-etcd3](https://github.com/kragniz/python-etcd3) - Work in progress client for v3 +- [kragniz/python-etcd3](https://github.com/kragniz/python-etcd3) - Client for v3 - [jplana/python-etcd](https://github.com/jplana/python-etcd) - Supports v2 - [russellhaering/txetcd](https://github.com/russellhaering/txetcd) - a Twisted Python library - [cholcombe973/autodock](https://github.com/cholcombe973/autodock) - A docker deployment automation tool @@ -50,6 +50,7 @@ **Node libraries** +- [mixer/etcd3](https://github.com/mixer/etcd3) - Supports v3 - [stianeikeland/node-etcd](https://github.com/stianeikeland/node-etcd) - Supports v2 (w Coffeescript) - [lavagetto/nodejs-etcd](https://github.com/lavagetto/nodejs-etcd) - Supports v2 - [deedubs/node-etcd-config](https://github.com/deedubs/node-etcd-config) - Supports v2 @@ -131,7 +132,7 @@ - [cloudfoundry/cf-release](https://github.com/cloudfoundry/cf-release/tree/master/jobs/etcd) **Projects using etcd** - +- [etcd Raft users](../raft/README.md#notable-users) - projects using etcd's raft library implementation. - [apache/celix](https://github.com/apache/celix) - an implementation of the OSGi specification adapted to C and C++ - [binocarlos/yoda](https://github.com/binocarlos/yoda) - etcd + ZeroMQ - [blox/blox](https://github.com/blox/blox) - a collection of open source projects for container management and orchestration with AWS ECS @@ -156,3 +157,5 @@ - [ryandoyle/nss-etcd](https://github.com/ryandoyle/nss-etcd) - A GNU libc NSS module for resolving names from etcd. - [Gru](https://github.com/dnaeon/gru) - Orchestration made easy with Go - [Vitess](http://vitess.io/) - Vitess is a database clustering system for horizontal scaling of MySQL. +- [lclarkmichalek/etcdhcp](https://github.com/lclarkmichalek/etcdhcp) - DHCP server that uses etcd for persistence and coordination. +- [openstack/networking-vpp](https://github.com/openstack/networking-vpp) - A networking driver that programs the [FD.io VPP dataplane](https://wiki.fd.io/view/VPP) to provide [OpenStack](https://www.openstack.org/) cloud virtual networking diff --git a/github.com/coreos/etcd/Documentation/learning/api.md b/github.com/coreos/etcd/Documentation/learning/api.md index 50053267d1..1ccca9a07d 100644 --- a/github.com/coreos/etcd/Documentation/learning/api.md +++ b/github.com/coreos/etcd/Documentation/learning/api.md @@ -47,7 +47,7 @@ message ResponseHeader { An application may read the Cluster_ID (Member_ID) field to ensure it is communicating with the intended cluster (member). -Applications can use the `Revision` to know the latest revision of the key-value store. This is especially useful when applications specify a historical revision to make time `travel query` and wishes to know the latest revision at the time of the request. +Applications can use the `Revision` to know the latest revision of the key-value store. This is especially useful when applications specify a historical revision to make time `travel query` and wish to know the latest revision at the time of the request. Applications can use `Raft_Term` to detect when the cluster completes a new leader election. @@ -348,7 +348,7 @@ message Event { Watches are long-running requests and use gRPC streams to stream event data. A watch stream is bi-directional; the client writes to the stream to establish watches and reads to receive watch event. A single watch stream can multiplex many distinct watches by tagging events with per-watch identifiers. This multiplexing helps reducing the memory footprint and connection overhead on the core etcd cluster. Watches make three guarantees about events: -* Ordered - events are ordered by revision; an event will never appear on a watch if it precedes an event in time that has already already been posted. +* Ordered - events are ordered by revision; an event will never appear on a watch if it precedes an event in time that has already been posted. * Reliable - a sequence of events will never drop any subsequence of events; if there are events ordered in time as a < b < c, then if the watch receives events a and c, it is guaranteed to receive b. * Atomic - a list of events is guaranteed to encompass complete revisions; updates in the same revision over multiple keys will not be split over several lists of events. @@ -449,7 +449,7 @@ message LeaseRevokeRequest { ### Keep alives -Leases are refreshed using a bi-directional stream created with the `LeaseKeepAlive` API call. When the client wishes to refresh a lease, it sends a `LeaseGrantRequest` over the stream: +Leases are refreshed using a bi-directional stream created with the `LeaseKeepAlive` API call. When the client wishes to refresh a lease, it sends a `LeaseKeepAliveRequest` over the stream: ```protobuf message LeaseKeepAliveRequest { diff --git a/github.com/coreos/etcd/Documentation/learning/auth_design.md b/github.com/coreos/etcd/Documentation/learning/auth_design.md index 192f4b2177..52c979731b 100644 --- a/github.com/coreos/etcd/Documentation/learning/auth_design.md +++ b/github.com/coreos/etcd/Documentation/learning/auth_design.md @@ -60,7 +60,7 @@ For avoiding such a situation, the API layer performs *version number validation After authenticating with `Authenticate()`, a client can create a gRPC connection as it would without auth. In addition to the existing initialization process, the client must associate the token with the newly created connection. `grpc.WithPerRPCCredentials()` provides the functionality for this purpose. -Every authenticated request from the client has a token. The token can be obtained with `grpc.metadata.FromContext()` in the server side. The server can obtain who is issuing the request and when the user was authorized. The information will be filled by the API layer in the header (`etcdserverpb.RequestHeader.Username` and `etcdserverpb.RequestHeader.AuthRevision`) of a raft log entry (`etcdserverpb.InternalRaftRequest`). +Every authenticated request from the client has a token. The token can be obtained with `grpc.metadata.FromIncomingContext()` in the server side. The server can obtain who is issuing the request and when the user was authorized. The information will be filled by the API layer in the header (`etcdserverpb.RequestHeader.Username` and `etcdserverpb.RequestHeader.AuthRevision`) of a raft log entry (`etcdserverpb.InternalRaftRequest`). ### Checking permission in the state machine diff --git a/github.com/coreos/etcd/Documentation/learning/data_model.md b/github.com/coreos/etcd/Documentation/learning/data_model.md index c2986a6f9c..ab5935dc1a 100644 --- a/github.com/coreos/etcd/Documentation/learning/data_model.md +++ b/github.com/coreos/etcd/Documentation/learning/data_model.md @@ -2,19 +2,19 @@ etcd is designed to reliably store infrequently updated data and provide reliable watch queries. etcd exposes previous versions of key-value pairs to support inexpensive snapshots and watch history events (“time travel queries”). A persistent, multi-version, concurrency-control data model is a good fit for these use cases. -etcd stores data in a multiversion [persistent][persistent-ds] key-value store. The persistent key-value store preserves the previous version of a key-value pair when its value is superseded with new data. The key-value store is effectively immutable; its operations do not update the structure in-place, but instead always generates a new updated structure. All past versions of keys are still accessible and watchable after modification. To prevent the data store from growing indefinitely over time from maintaining old versions, the store may be compacted to shed the oldest versions of superseded data. +etcd stores data in a multiversion [persistent][persistent-ds] key-value store. The persistent key-value store preserves the previous version of a key-value pair when its value is superseded with new data. The key-value store is effectively immutable; its operations do not update the structure in-place, but instead always generate a new updated structure. All past versions of keys are still accessible and watchable after modification. To prevent the data store from growing indefinitely over time and from maintaining old versions, the store may be compacted to shed the oldest versions of superseded data. ### Logical view The store’s logical view is a flat binary key space. The key space has a lexically sorted index on byte string keys so range queries are inexpensive. -The key space maintains multiple revisions. Each atomic mutative operation (e.g., a transaction operation may contain multiple operations) creates a new revision on the key space. All data held by previous revisions remains unchanged. Old versions of key can still be accessed through previous revisions. Likewise, revisions are indexed as well; ranging over revisions with watchers is efficient. If the store is compacted to recover space, revisions before the compact revision will be removed. +The key space maintains multiple revisions. Each atomic mutative operation (e.g., a transaction operation may contain multiple operations) creates a new revision on the key space. All data held by previous revisions remains unchanged. Old versions of key can still be accessed through previous revisions. Likewise, revisions are indexed as well; ranging over revisions with watchers is efficient. If the store is compacted to save space, revisions before the compact revision will be removed. -A key’s lifetime spans a generation. Each key may have one or multiple generations. Creating a key increments the generation of that key, starting at 1 if the key never existed. Deleting a key generates a key tombstone, concluding the key’s current generation. Each modification of a key creates a new version of the key. Once a compaction happens, any generation ended before the given revision will be removed and values set before the compaction revision except the latest one will be removed. +A key’s lifetime spans a generation, denoted by its version. Each key may have one or multiple generations. Creating a key increments the version of that key, starting at 1 if the key never existed. Deleting a key generates a key tombstone, concluding the key’s current generation by resetting its version. Each modification of a key creates a new generation of the key and increases its version. Once a compaction happens, any version ended before the given revision will be removed and values set before the compaction revision except the latest one will be removed. ### Physical view -etcd stores the physical data as key-value pairs in a persistent [b+tree][b+tree]. Each revision of the store’s state only contains the delta from its previous revision to be efficient. A single revision may correspond to multiple keys in the tree. +etcd stores the physical data as key-value pairs in a persistent [b+tree][b+tree]. Each revision of the store’s state only contains the delta from its previous revision to be efficient. A single revision may correspond to multiple keys in the tree. The key of key-value pair is a 3-tuple (major, sub, type). Major is the store revision holding the key. Sub differentiates among keys within the same revision. Type is an optional suffix for special value (e.g., `t` if the value contains a tombstone). The value of the key-value pair contains the modification from previous revision, thus one delta from previous revision. The b+tree is ordered by key in lexical byte-order. Ranged lookups over revision deltas are fast; this enables quickly finding modifications from one specific revision to another. Compaction removes out-of-date keys-value pairs. diff --git a/github.com/coreos/etcd/Documentation/learning/why.md b/github.com/coreos/etcd/Documentation/learning/why.md index ac61a58385..29a86b86c7 100644 --- a/github.com/coreos/etcd/Documentation/learning/why.md +++ b/github.com/coreos/etcd/Documentation/learning/why.md @@ -1,17 +1,17 @@ -# Why etcd +# etcd versus other key-value stores The name "etcd" originated from two ideas, the unix "/etc" folder and "d"istibuted systems. The "/etc" folder is a place to store configuration data for a single system whereas etcd stores configuration information for large scale distributed systems. Hence, a "d"istributed "/etc" is "etcd". -etcd stores metadata in a consistent and fault-tolerant way. Distributed systems use etcd as a consistent key-value store for configuration management, service discovery, and coordinating distributed work. Common distributed patterns using etcd include [leader election][etcd-etcdctl-elect], [distributed locks][etcd-etcdctl-lock], and monitoring machine liveness. +etcd is designed as a general substrate for large scale distributed systems. These are systems that will never tolerate split-brain operation and are willing to sacrifice availability to achieve this end. etcd stores metadata in a consistent and fault-tolerant way. An etcd cluster is meant to provide key-value storage with best of class stability, reliability, scalability and performance. + +Distributed systems use etcd as a consistent key-value store for configuration management, service discovery, and coordinating distributed work. Many [organizations][production-users] use etcd to implement production systems such as container schedulers, service discovery services, and distributed data storage. Common distributed patterns using etcd include [leader election][etcd-etcdctl-elect], [distributed locks][etcd-etcdctl-lock], and monitoring machine liveness. ## Use cases -- Container Linux by CoreOS: Application running on [Container Linux][container-linux] gets automatic, zero-downtime Linux kernel updates. Container Linux uses [locksmith] to coordinate updates. locksmith implements a distributed semaphore over etcd to ensure only a subset of a cluster is rebooting at any given time. +- Container Linux by CoreOS: Applications running on [Container Linux][container-linux] get automatic, zero-downtime Linux kernel updates. Container Linux uses [locksmith] to coordinate updates. Locksmith implements a distributed semaphore over etcd to ensure only a subset of a cluster is rebooting at any given time. - [Kubernetes][kubernetes] stores configuration data into etcd for service discovery and cluster management; etcd's consistency is crucial for correctly scheduling and operating services. The Kubernetes API server persists cluster state into etcd. It uses etcd's watch API to monitor the cluster and roll out critical configuration changes. -## etcd versus other key-value stores - -When deciding whether to use etcd as a key-value store, it’s worth keeping in mind etcd’s main goal. Namely, etcd is designed as a general substrate for large scale distributed systems. These are systems that will never tolerate split-brain operation and are willing to sacrifice availability to achieve this end. An etcd cluster is meant to provide consistent key-value storage with best of class stability, reliability, scalability and performance. The upshot of this focus is many [organizations][production-users] already use etcd to implement production systems such as container schedulers, service discovery services, distributed data storage, and more. +## Comparison chart Perhaps etcd already seems like a good fit, but as with all technological decisions, proceed with caution. Please note this documentation is written by the etcd team. Although the ideal is a disinterested comparison of technology and features, the authors’ expertise and biases obviously favor etcd. Use only as directed. @@ -84,7 +84,7 @@ For distributed coordination, choosing etcd can help prevent operational headach [tidb]: https://github.com/pingcap/tidb [etcd-v3lock]: https://godoc.org/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb [etcd-v3election]: https://godoc.org/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb -[etcd-etcdctl-lock]: ../../etcdctl/README.md#lock-lockname +[etcd-etcdctl-lock]: ../../etcdctl/README.md#lock-lockname-command-arg1-arg2- [etcd-etcdctl-elect]: ../../etcdctl/README.md#elect-options-election-name-proposal [etcd-mvcc]: data_model.md [etcd-recipe]: https://godoc.org/github.com/coreos/etcd/contrib/recipes @@ -107,10 +107,9 @@ For distributed coordination, choosing etcd can help prevent operational headach [etcd-rbac]: ../op-guide/authentication.md#working-with-roles [zk-acl]: https://zookeeper.apache.org/doc/r3.1.2/zookeeperProgrammers.html#sc_ZooKeeperAccessControl [consul-acl]: https://www.consul.io/docs/internals/acl.html -[cockroach-grant]: https://www.cockroachlabs.com/docs/grant.html +[cockroach-grant]: https://www.cockroachlabs.com/docs/stable/grant.html [spanner-roles]: https://cloud.google.com/spanner/docs/iam#roles [zk-bindings]: https://zookeeper.apache.org/doc/r3.1.2/zookeeperProgrammers.html#ch_bindings [container-linux]: https://coreos.com/why [locksmith]: https://github.com/coreos/locksmith [kubernetes]: http://kubernetes.io/docs/whatisk8s - diff --git a/github.com/coreos/etcd/Documentation/op-guide/authentication.md b/github.com/coreos/etcd/Documentation/op-guide/authentication.md index 9fd8f0f975..b8ab33ff77 100644 --- a/github.com/coreos/etcd/Documentation/op-guide/authentication.md +++ b/github.com/coreos/etcd/Documentation/op-guide/authentication.md @@ -1,8 +1,8 @@ -# Authentication Guide +# Role-based access control ## Overview -Authentication was added in etcd 2.1. The etcd v3 API slightly modified the authentication feature's API and user interface to better fit the new data model. This guide is intended to help users set up basic authentication in etcd v3. +Authentication was added in etcd 2.1. The etcd v3 API slightly modified the authentication feature's API and user interface to better fit the new data model. This guide is intended to help users set up basic authentication and role-based access control in etcd v3. ## Special users and roles diff --git a/github.com/coreos/etcd/Documentation/op-guide/clustering.md b/github.com/coreos/etcd/Documentation/op-guide/clustering.md index e058b79796..efdd3aa262 100644 --- a/github.com/coreos/etcd/Documentation/op-guide/clustering.md +++ b/github.com/coreos/etcd/Documentation/op-guide/clustering.md @@ -281,7 +281,7 @@ ETCD_DISCOVERY=https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573d --discovery https://discovery.etcd.io/3e86b59982e49066c5d813af1c2e2579cbf573de ``` -**Each member must have a different name flag specified or else discovery will fail due to duplicated names. `Hostname` or `machine-id` can be a good choice. ** +**Each member must have a different name flag specified or else discovery will fail due to duplicated names. `Hostname` or `machine-id` can be a good choice.** Now we start etcd with those relevant flags for each member: diff --git a/github.com/coreos/etcd/Documentation/op-guide/configuration.md b/github.com/coreos/etcd/Documentation/op-guide/configuration.md index 1f7064ea6f..8dadd31791 100644 --- a/github.com/coreos/etcd/Documentation/op-guide/configuration.md +++ b/github.com/coreos/etcd/Documentation/op-guide/configuration.md @@ -185,7 +185,10 @@ To start etcd automatically using custom settings at startup in Linux, using a [ The security flags help to [build a secure etcd cluster][security]. -### --ca-file [DEPRECATED] +### --ca-file + +**DEPRECATED** + + Path to the client server TLS CA file. `--ca-file ca.crt` could be replaced by `--trusted-ca-file ca.crt --client-cert-auth` and etcd will perform the same. + default: none + env variable: ETCD_CA_FILE @@ -215,7 +218,10 @@ The security flags help to [build a secure etcd cluster][security]. + default: false + env variable: ETCD_AUTO_TLS -### --peer-ca-file [DEPRECATED] +### --peer-ca-file + +**DEPRECATED** + + Path to the peer server TLS CA file. `--peer-ca-file ca.crt` could be replaced by `--peer-trusted-ca-file ca.crt --peer-client-cert-auth` and etcd will perform the same. + default: none + env variable: ETCD_PEER_CA_FILE @@ -299,7 +305,7 @@ Follow the instructions when using these flags. [build-cluster]: clustering.md#static [reconfig]: runtime-configuration.md [discovery]: clustering.md#discovery -[iana-ports]: https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=etcd +[iana-ports]: http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt [proxy]: ../v2/proxy.md [restore]: ../v2/admin_guide.md#restoring-a-backup [security]: security.md diff --git a/github.com/coreos/etcd/Documentation/op-guide/container.md b/github.com/coreos/etcd/Documentation/op-guide/container.md index 6840eec72d..bed9c08f28 100644 --- a/github.com/coreos/etcd/Documentation/op-guide/container.md +++ b/github.com/coreos/etcd/Documentation/op-guide/container.md @@ -68,6 +68,37 @@ Production clusters which refer to peers by DNS name known to the local resolver In order to expose the etcd API to clients outside of Docker host, use the host IP address of the container. Please see [`docker inspect`](https://docs.docker.com/engine/reference/commandline/inspect) for more detail on how to get the IP address. Alternatively, specify `--net=host` flag to `docker run` command to skip placing the container inside of a separate network stack. +### Running a single node etcd + +Use the host IP address when configuring etcd: + +``` +export NODE1=192.168.1.21 +``` + +Run the latest version of etcd: + +``` +docker run \ + -p 2379:2379 \ + -p 2380:2380 \ + --volume=${DATA_DIR}:/etcd-data \ + --name etcd quay.io/coreos/etcd:latest \ + /usr/local/bin/etcd \ + --data-dir=/etcd-data --name node1 \ + --initial-advertise-peer-urls http://${NODE1}:2380 --listen-peer-urls http://${NODE1}:2380 \ + --advertise-client-urls http://${NODE1}:2379 --listen-client-urls http://${NODE1}:2379 \ + --initial-cluster node1=http://${NODE1}:2380 +``` + +List the cluster member: + +``` +etcdctl --endpoints=http://${NODE1}:2379 member list +``` + +### Running a 3 node etcd cluster + ``` # For each machine ETCD_VERSION=latest @@ -85,41 +116,47 @@ DATA_DIR=/var/lib/etcd # For node 1 THIS_NAME=${NAME_1} THIS_IP=${HOST_1} -sudo docker run --net=host \ - --volume=${DATA_DIR}:/etcd-data \ - --name etcd quay.io/coreos/etcd:${ETCD_VERSION} \ - /usr/local/bin/etcd \ - --data-dir=/etcd-data --name ${THIS_NAME} \ - --initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \ - --advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \ - --initial-cluster ${CLUSTER} \ - --initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN} +docker run \ + -p 2379:2379 \ + -p 2380:2380 \ + --volume=${DATA_DIR}:/etcd-data \ + --name etcd quay.io/coreos/etcd:${ETCD_VERSION} \ + /usr/local/bin/etcd \ + --data-dir=/etcd-data --name ${THIS_NAME} \ + --initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \ + --advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \ + --initial-cluster ${CLUSTER} \ + --initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN} # For node 2 THIS_NAME=${NAME_2} THIS_IP=${HOST_2} -sudo docker run --net=host \ - --volume=${DATA_DIR}:/etcd-data \ - --name etcd quay.io/coreos/etcd:${ETCD_VERSION} \ - /usr/local/bin/etcd \ - --data-dir=/etcd-data --name ${THIS_NAME} \ - --initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \ - --advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \ - --initial-cluster ${CLUSTER} \ - --initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN} +docker run \ + -p 2379:2379 \ + -p 2380:2380 \ + --volume=${DATA_DIR}:/etcd-data \ + --name etcd quay.io/coreos/etcd:${ETCD_VERSION} \ + /usr/local/bin/etcd \ + --data-dir=/etcd-data --name ${THIS_NAME} \ + --initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \ + --advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \ + --initial-cluster ${CLUSTER} \ + --initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN} # For node 3 THIS_NAME=${NAME_3} THIS_IP=${HOST_3} -sudo docker run --net=host \ - --volume=${DATA_DIR}:/etcd-data \ - --name etcd quay.io/coreos/etcd:${ETCD_VERSION} \ - /usr/local/bin/etcd \ - --data-dir=/etcd-data --name ${THIS_NAME} \ - --initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \ - --advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \ - --initial-cluster ${CLUSTER} \ - --initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN} +docker run \ + -p 2379:2379 \ + -p 2380:2380 \ + --volume=${DATA_DIR}:/etcd-data \ + --name etcd quay.io/coreos/etcd:${ETCD_VERSION} \ + /usr/local/bin/etcd \ + --data-dir=/etcd-data --name ${THIS_NAME} \ + --initial-advertise-peer-urls http://${THIS_IP}:2380 --listen-peer-urls http://${THIS_IP}:2380 \ + --advertise-client-urls http://${THIS_IP}:2379 --listen-client-urls http://${THIS_IP}:2379 \ + --initial-cluster ${CLUSTER} \ + --initial-cluster-state ${CLUSTER_STATE} --initial-cluster-token ${TOKEN} ``` To run `etcdctl` using API version 3: @@ -141,17 +178,19 @@ rkt run \ --volume etcd-ssl-certs-bundle,kind=host,source=/etc/ssl/certs/ca-certificates.crt \ --mount volume=etcd-ssl-certs-bundle,target=/etc/ssl/certs/ca-certificates.crt \ quay.io/coreos/etcd:latest -- --name my-name \ - --initial-advertise-peer-urls http://localhost:2380 --listen-peer-urls http://localhost:2380 \ - --advertise-client-urls http://localhost:2379 --listen-client-urls http://localhost:2379 \ - --discovery https://discovery.etcd.io/c11fbcdc16972e45253491a24fcf45e1 + --initial-advertise-peer-urls http://localhost:2380 --listen-peer-urls http://localhost:2380 \ + --advertise-client-urls http://localhost:2379 --listen-client-urls http://localhost:2379 \ + --discovery https://discovery.etcd.io/c11fbcdc16972e45253491a24fcf45e1 ``` ``` docker run \ - --volume=/etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt \ - quay.io/coreos/etcd:latest \ - /usr/local/bin/etcd --name my-name \ - --initial-advertise-peer-urls http://localhost:2380 --listen-peer-urls http://localhost:2380 \ - --advertise-client-urls http://localhost:2379 --listen-client-urls http://localhost:2379 \ - --discovery https://discovery.etcd.io/86a9ff6c8cb8b4c4544c1a2f88f8b801 + -p 2379:2379 \ + -p 2380:2380 \ + --volume=/etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt \ + quay.io/coreos/etcd:latest \ + /usr/local/bin/etcd --name my-name \ + --initial-advertise-peer-urls http://localhost:2380 --listen-peer-urls http://localhost:2380 \ + --advertise-client-urls http://localhost:2379 --listen-client-urls http://localhost:2379 \ + --discovery https://discovery.etcd.io/86a9ff6c8cb8b4c4544c1a2f88f8b801 ``` diff --git a/github.com/coreos/etcd/Documentation/op-guide/etcd3_alert.rules b/github.com/coreos/etcd/Documentation/op-guide/etcd3_alert.rules index 90c3770e80..e25cbc4c2b 100644 --- a/github.com/coreos/etcd/Documentation/op-guide/etcd3_alert.rules +++ b/github.com/coreos/etcd/Documentation/op-guide/etcd3_alert.rules @@ -76,7 +76,7 @@ LABELS { } ANNOTATIONS { summary = "slow gRPC requests", - description = "on etcd instance {{ $labels.instance }} gRPC requests to {{ $label.grpc_method }} are slow", + description = "on etcd instance {{ $labels.instance }} gRPC requests to {{ $labels.grpc_method }} are slow", } # HTTP requests alerts @@ -117,7 +117,7 @@ LABELS { } ANNOTATIONS { summary = "slow HTTP requests", - description = "on etcd instance {{ $labels.instance }} HTTP requests to {{ $label.method }} are slow", + description = "on etcd instance {{ $labels.instance }} HTTP requests to {{ $labels.method }} are slow", } # file descriptor alerts @@ -161,7 +161,7 @@ LABELS { } ANNOTATIONS { summary = "etcd member communication is slow", - description = "etcd instance {{ $labels.instance }} member communication with {{ $label.To }} is slow", + description = "etcd instance {{ $labels.instance }} member communication with {{ $labels.To }} is slow", } # etcd proposal alerts diff --git a/github.com/coreos/etcd/Documentation/op-guide/failures.md b/github.com/coreos/etcd/Documentation/op-guide/failures.md index ce2bd6ca64..6139e05fc3 100644 --- a/github.com/coreos/etcd/Documentation/op-guide/failures.md +++ b/github.com/coreos/etcd/Documentation/op-guide/failures.md @@ -1,4 +1,4 @@ -# Understand failures +# Failure modes Failures are common in a large deployment of machines. A machine fails when its hardware or software malfunctions. Multiple machines fail together when there are power failures or network issues. Multiple kinds of failures can also happen at once; it is almost impossible to enumerate all possible failure cases. diff --git a/github.com/coreos/etcd/Documentation/op-guide/gateway.md b/github.com/coreos/etcd/Documentation/op-guide/gateway.md index a7cd1be6f6..10fe5d0c3b 100644 --- a/github.com/coreos/etcd/Documentation/op-guide/gateway.md +++ b/github.com/coreos/etcd/Documentation/op-guide/gateway.md @@ -10,8 +10,7 @@ The gateway supports multiple etcd server endpoints and works on a simple round- Every application that accesses etcd must first have the address of an etcd cluster client endpoint. If multiple applications on the same server access the same etcd cluster, every application still needs to know the advertised client endpoints of the etcd cluster. If the etcd cluster is reconfigured to have different endpoints, every application may also need to update its endpoint list. This wide-scale reconfiguration is both tedious and error prone. -etcd gateway solves this problem by serving as a stable local endpoint. A typical etcd gateway configuration has -each machine running a gateway listening on a local address and every etcd application connecting to its local gateway. The upshot is only the gateway needs to update its endpoints instead of updating each and every application. +etcd gateway solves this problem by serving as a stable local endpoint. A typical etcd gateway configuration has each machine running a gateway listening on a local address and every etcd application connecting to its local gateway. The upshot is only the gateway needs to update its endpoints instead of updating each and every application. In summary, to automatically propagate cluster endpoint changes, the etcd gateway runs on every machine serving multiple applications accessing the same etcd cluster. @@ -64,3 +63,43 @@ Start the etcd gateway to fetch the endpoints from the DNS SRV entries with the $ etcd gateway --discovery-srv=example.com 2016-08-16 11:21:18.867350 I | tcpproxy: ready to proxy client requests to [...] ``` + +## Configuration flags + +### etcd cluster + +#### --endpoints + + * Comma-separated list of etcd server targets for forwarding client connections. + * Default: `127.0.0.1:2379` + * Invalid example: `https://127.0.0.1:2379` (gateway does not terminate TLS) + +#### --discovery-srv + + * DNS domain used to bootstrap cluster endpoints through SRV recrods. + * Default: (not set) + +### Network + +#### --listen-addr + + * Interface and port to bind for accepting client requests. + * Default: `127.0.0.1:23790` + +#### --retry-delay + + * Duration of delay before retrying to connect to failed endpoints. + * Default: 1m0s + * Invalid example: "123" (expects time unit in format) + +### Security + +#### --insecure-discovery + + * Accept SRV records that are insecure or susceptible to man-in-the-middle attacks. + * Default: `false` + +#### --trusted-ca-file + + * Path to the client TLS CA file for the etcd cluster. Used to authenticate endpoints. + * Default: (not set) diff --git a/github.com/coreos/etcd/Documentation/op-guide/grafana.json b/github.com/coreos/etcd/Documentation/op-guide/grafana.json index f6d6b521ac..c549325365 100644 --- a/github.com/coreos/etcd/Documentation/op-guide/grafana.json +++ b/github.com/coreos/etcd/Documentation/op-guide/grafana.json @@ -114,18 +114,21 @@ "span": 5, "stack": false, "steppedLine": false, - "targets": [{ - "expr": "sum(rate({grpc_type=\"unary\",grpc_code!=\"OK\"} [1m]))", + "targets": [ + { + "expr": "sum(rate(grpc_server_started_total{grpc_type=\"unary\"}[5m]))", + "format": "time_series", "intervalFactor": 2, - "legendFormat": "{{instance}} RPC Rate", + "legendFormat": "RPC Rate", "metric": "grpc_server_started_total", "refId": "A", "step": 2 }, { - "expr": "sum(rate(grpc_server_started_total{grpc_type=\"unary\",grpc_code!=\"OK\"} [1m])) - sum(rate(grpc_server_handled_total{grpc_type=\"unary\"} [1m]))", + "expr": "sum(rate(grpc_server_handled_total{grpc_type=\"unary\",grpc_code!=\"OK\"}[5m]))", + "format": "time_series", "intervalFactor": 2, - "legendFormat": "{{instance}} RPC Failed Rate", + "legendFormat": "RPC Failed Rate", "metric": "grpc_server_handled_total", "refId": "B", "step": 2 @@ -197,7 +200,7 @@ "stack": true, "steppedLine": false, "targets": [{ - "expr": "sum(grpc_server_started_total {grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\",grpc_code!=\"OK\"}) - sum(grpc_server_handled_total {grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"})", + "expr": "sum(grpc_server_started_total{grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total{grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"})", "intervalFactor": 2, "legendFormat": "Watch Streams", "metric": "grpc_server_handled_total", @@ -205,7 +208,7 @@ "step": 4 }, { - "expr": "sum(grpc_server_started_total {grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total {grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"})", + "expr": "sum(grpc_server_started_total{grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total{grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"})", "intervalFactor": 2, "legendFormat": "Lease Streams", "metric": "grpc_server_handled_total", @@ -361,7 +364,7 @@ "stack": false, "steppedLine": true, "targets": [{ - "expr": "histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket [5m])) by (instance, le))", + "expr": "histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket[5m])) by (instance, le))", "hide": false, "intervalFactor": 2, "legendFormat": "{{instance}} WAL fsync", @@ -370,7 +373,7 @@ "step": 4 }, { - "expr": "histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket [5m])) by (instance, le))", + "expr": "histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket[5m])) by (instance, le))", "intervalFactor": 2, "legendFormat": "{{instance}} DB fsync", "metric": "etcd_disk_backend_commit_duration_seconds_bucket", @@ -522,7 +525,7 @@ "stack": true, "steppedLine": false, "targets": [{ - "expr": "rate(etcd_network_client_grpc_received_bytes_total [1m])", + "expr": "rate(etcd_network_client_grpc_received_bytes_total[5m])", "intervalFactor": 2, "legendFormat": "{{instance}} Client Traffic In", "metric": "etcd_network_client_grpc_received_bytes_total", @@ -595,7 +598,7 @@ "stack": true, "steppedLine": false, "targets": [{ - "expr": "rate(etcd_network_client_grpc_sent_bytes_total [1m])", + "expr": "rate(etcd_network_client_grpc_sent_bytes_total[5m])", "intervalFactor": 2, "legendFormat": "{{instance}} Client Traffic Out", "metric": "etcd_network_client_grpc_sent_bytes_total", @@ -668,7 +671,7 @@ "stack": false, "steppedLine": false, "targets": [{ - "expr": "sum(rate(etcd_network_peer_received_bytes_total [1m])) by (instance)", + "expr": "sum(rate(etcd_network_peer_received_bytes_total[5m])) by (instance)", "intervalFactor": 2, "legendFormat": "{{instance}} Peer Traffic In", "metric": "etcd_network_peer_received_bytes_total", @@ -742,7 +745,7 @@ "stack": false, "steppedLine": false, "targets": [{ - "expr": "sum(rate(etcd_network_peer_sent_bytes_total [1m])) by (instance)", + "expr": "sum(rate(etcd_network_peer_sent_bytes_total[5m])) by (instance)", "hide": false, "interval": "", "intervalFactor": 2, @@ -822,7 +825,7 @@ "stack": false, "steppedLine": false, "targets": [{ - "expr": "sum(rate(etcd_server_proposals_failed_total [1m]))", + "expr": "sum(rate(etcd_server_proposals_failed_total[5m]))", "intervalFactor": 2, "legendFormat": "Proposal Failure Rate", "metric": "etcd_server_proposals_failed_total", @@ -838,7 +841,7 @@ "step": 2 }, { - "expr": "sum(rate(etcd_server_proposals_committed_total [1m]))", + "expr": "sum(rate(etcd_server_proposals_committed_total[5m]))", "intervalFactor": 2, "legendFormat": "Proposal Commit Rate", "metric": "etcd_server_proposals_committed_total", @@ -846,7 +849,7 @@ "step": 2 }, { - "expr": "sum(rate(etcd_server_proposals_applied_total [1m]))", + "expr": "sum(rate(etcd_server_proposals_applied_total[5m]))", "intervalFactor": 2, "legendFormat": "Proposal Apply Rate", "refId": "D", @@ -922,9 +925,9 @@ "stack": false, "steppedLine": false, "targets": [{ - "expr": "etcd_server_leader_changes_seen_total", + "expr": "changes(etcd_server_leader_changes_seen_total[1d])", "intervalFactor": 2, - "legendFormat": "{{instance}} Leader Change Seen", + "legendFormat": "{{instance}} Total Leader Elections Per Day", "metric": "etcd_server_leader_changes_seen_total", "refId": "A", "step": 2 @@ -932,7 +935,7 @@ "thresholds": [], "timeFrom": null, "timeShift": null, - "title": "Rate Leader Elections", + "title": "Total Leader Elections Per Day", "tooltip": { "msResolution": false, "shared": true, @@ -1009,4 +1012,4 @@ "version": 215, "links": [], "gnetId": null -} \ No newline at end of file +} diff --git a/github.com/coreos/etcd/Documentation/op-guide/grpc_proxy.md b/github.com/coreos/etcd/Documentation/op-guide/grpc_proxy.md index 670aa68a19..4a2a6ca4bc 100644 --- a/github.com/coreos/etcd/Documentation/op-guide/grpc_proxy.md +++ b/github.com/coreos/etcd/Documentation/op-guide/grpc_proxy.md @@ -1,7 +1,5 @@ # gRPC proxy -*This is an alpha feature, we are looking for early feedback.* - The gRPC proxy is a stateless etcd reverse proxy operating at the gRPC layer (L7). The proxy is designed to reduce the total processing load on the core etcd cluster. For horizontal scalability, it coalesces watch and lease API requests. To protect the cluster against abusive clients, it caches key range requests. The gRPC proxy supports multiple etcd server endpoints. When the proxy starts, it randomly picks one etcd server endpoint to use. This endpoint serves all requests until the proxy detects an endpoint failure. If the gRPC proxy detects an endpoint failure, it switches to a different endpoint, if available, to hide failures from its clients. Other retry policies, such as weighted round-robin, may be supported in the future. @@ -101,7 +99,7 @@ bar ## Client endpoint synchronization and name resolution -The proxy supports registering its endpoints for discovery by writing to a user-defined endpoint. This serves two purposes. First, it allows clients to synchronize their endpoints against a set of proxy endpoints for high availability. Second, it is an endpoint provider for etcd [gRPC naming][dev-guide/grpc_naming.md]. +The proxy supports registering its endpoints for discovery by writing to a user-defined endpoint. This serves two purposes. First, it allows clients to synchronize their endpoints against a set of proxy endpoints for high availability. Second, it is an endpoint provider for etcd [gRPC naming](../dev-guide/grpc_naming.md). Register proxy(s) by providing a user-defined prefix: diff --git a/github.com/coreos/etcd/Documentation/op-guide/monitoring.md b/github.com/coreos/etcd/Documentation/op-guide/monitoring.md index 7a9812706e..cbd4356d47 100644 --- a/github.com/coreos/etcd/Documentation/op-guide/monitoring.md +++ b/github.com/coreos/etcd/Documentation/op-guide/monitoring.md @@ -5,14 +5,14 @@ Each etcd server exports metrics under the `/metrics` path on its client port. The metrics can be fetched with `curl`: ```sh -$ curl -L http://localhost:2379/metrics - -# HELP etcd_debugging_mvcc_keys_total Total number of keys. -# TYPE etcd_debugging_mvcc_keys_total gauge -etcd_debugging_mvcc_keys_total 0 -# HELP etcd_debugging_mvcc_pending_events_total Total number of pending events to be sent. -# TYPE etcd_debugging_mvcc_pending_events_total gauge -etcd_debugging_mvcc_pending_events_total 0 +$ curl -L http://localhost:2379/metrics | grep -v debugging # ignore unstable debugging metrics + +# HELP etcd_disk_backend_commit_duration_seconds The latency distributions of commit called by backend. +# TYPE etcd_disk_backend_commit_duration_seconds histogram +etcd_disk_backend_commit_duration_seconds_bucket{le="0.002"} 72756 +etcd_disk_backend_commit_duration_seconds_bucket{le="0.004"} 401587 +etcd_disk_backend_commit_duration_seconds_bucket{le="0.008"} 405979 +etcd_disk_backend_commit_duration_seconds_bucket{le="0.016"} 406464 ... ``` @@ -24,7 +24,7 @@ Running a [Prometheus][prometheus] monitoring service is the easiest way to inge First, install Prometheus: ```sh -PROMETHEUS_VERSION="1.3.1" +PROMETHEUS_VERSION="2.0.0" wget https://github.com/prometheus/prometheus/releases/download/v$PROMETHEUS_VERSION/prometheus-$PROMETHEUS_VERSION.linux-amd64.tar.gz -O /tmp/prometheus-$PROMETHEUS_VERSION.linux-amd64.tar.gz tar -xvzf /tmp/prometheus-$PROMETHEUS_VERSION.linux-amd64.tar.gz --directory /tmp/ --strip-components=1 /tmp/prometheus -version diff --git a/github.com/coreos/etcd/Documentation/op-guide/performance.md b/github.com/coreos/etcd/Documentation/op-guide/performance.md index 4a6a70e6db..926caf327e 100644 --- a/github.com/coreos/etcd/Documentation/op-guide/performance.md +++ b/github.com/coreos/etcd/Documentation/op-guide/performance.md @@ -17,58 +17,54 @@ For some baseline performance numbers, we consider a three member etcd cluster w - Google Cloud Compute Engine - 3 machines of 8 vCPUs + 16GB Memory + 50GB SSD - 1 machine(client) of 16 vCPUs + 30GB Memory + 50GB SSD -- Ubuntu 15.10 -- etcd v3 master branch (commit SHA d8f325d), Go 1.6.2 +- Ubuntu 17.04 +- etcd 3.2.0, go 1.8.3 With this configuration, etcd can approximately write: -| Number of keys | Key size in bytes | Value size in bytes | Number of connections | Number of clients | Target etcd server | Average write QPS | Average latency per request | Memory | -|----------------|-------------------|---------------------|-----------------------|-------------------|--------------------|-------------------|-----------------------------|--------| -| 10,000 | 8 | 256 | 1 | 1 | leader only | 525 | 2ms | 35 MB | -| 100,000 | 8 | 256 | 100 | 1000 | leader only | 25,000 | 30ms | 35 MB | -| 100,000 | 8 | 256 | 100 | 1000 | all members | 33,000 | 25ms | 35 MB | +| Number of keys | Key size in bytes | Value size in bytes | Number of connections | Number of clients | Target etcd server | Average write QPS | Average latency per request | Average server RSS | +|---------------:|------------------:|--------------------:|----------------------:|------------------:|--------------------|------------------:|----------------------------:|-------------------:| +| 10,000 | 8 | 256 | 1 | 1 | leader only | 583 | 1.6ms | 48 MB | +| 100,000 | 8 | 256 | 100 | 1000 | leader only | 44,341 | 22ms | 124MB | +| 100,000 | 8 | 256 | 100 | 1000 | all members | 50,104 | 20ms | 126MB | Sample commands are: -``` -# assuming IP_1 is leader, write requests to the leader -benchmark --endpoints={IP_1} --conns=1 --clients=1 \ +```sh +# write to leader +benchmark --endpoints=${HOST_1} --target-leader --conns=1 --clients=1 \ put --key-size=8 --sequential-keys --total=10000 --val-size=256 -benchmark --endpoints={IP_1} --conns=100 --clients=1000 \ +benchmark --endpoints=${HOST_1} --target-leader --conns=100 --clients=1000 \ put --key-size=8 --sequential-keys --total=100000 --val-size=256 # write to all members -benchmark --endpoints={IP_1},{IP_2},{IP_3} --conns=100 --clients=1000 \ +benchmark --endpoints=${HOST_1},${HOST_2},${HOST_3} --conns=100 --clients=1000 \ put --key-size=8 --sequential-keys --total=100000 --val-size=256 ``` Linearizable read requests go through a quorum of cluster members for consensus to fetch the most recent data. Serializable read requests are cheaper than linearizable reads since they are served by any single etcd member, instead of a quorum of members, in exchange for possibly serving stale data. etcd can read: -| Number of requests | Key size in bytes | Value size in bytes | Number of connections | Number of clients | Consistency | Average latency per request | Average read QPS | -|--------------------|-------------------|---------------------|-----------------------|-------------------|-------------|-----------------------------|------------------| -| 10,000 | 8 | 256 | 1 | 1 | Linearizable | 2ms | 560 | -| 10,000 | 8 | 256 | 1 | 1 | Serializable | 0.4ms | 7,500 | -| 100,000 | 8 | 256 | 100 | 1000 | Linearizable | 15ms | 43,000 | -| 100,000 | 8 | 256 | 100 | 1000 | Serializable | 9ms | 93,000 | +| Number of requests | Key size in bytes | Value size in bytes | Number of connections | Number of clients | Consistency | Average read QPS | Average latency per request | +|-------------------:|------------------:|--------------------:|----------------------:|------------------:|-------------|-----------------:|----------------------------:| +| 10,000 | 8 | 256 | 1 | 1 | Linearizable | 1,353 | 0.7ms | +| 10,000 | 8 | 256 | 1 | 1 | Serializable | 2,909 | 0.3ms | +| 100,000 | 8 | 256 | 100 | 1000 | Linearizable | 141,578 | 5.5ms | +| 100,000 | 8 | 256 | 100 | 1000 | Serializable | 185,758 | 2.2ms | Sample commands are: -``` -# Linearizable read requests -benchmark --endpoints={IP_1},{IP_2},{IP_3} --conns=1 --clients=1 \ +```sh +# Single connection read requests +benchmark --endpoints=${HOST_1},${HOST_2},${HOST_3} --conns=1 --clients=1 \ range YOUR_KEY --consistency=l --total=10000 -benchmark --endpoints={IP_1},{IP_2},{IP_3} --conns=100 --clients=1000 \ - range YOUR_KEY --consistency=l --total=100000 +benchmark --endpoints=${HOST_1},${HOST_2},${HOST_3} --conns=1 --clients=1 \ + range YOUR_KEY --consistency=s --total=10000 -# Serializable read requests for each member and sum up the numbers -for endpoint in {IP_1} {IP_2} {IP_3}; do - benchmark --endpoints=$endpoint --conns=1 --clients=1 \ - range YOUR_KEY --consistency=s --total=10000 -done -for endpoint in {IP_1} {IP_2} {IP_3}; do - benchmark --endpoints=$endpoint --conns=100 --clients=1000 \ - range YOUR_KEY --consistency=s --total=100000 -done +# Many concurrent read requests +benchmark --endpoints=${HOST_1},${HOST_2},${HOST_3} --conns=100 --clients=1000 \ + range YOUR_KEY --consistency=l --total=100000 +benchmark --endpoints=${HOST_1},${HOST_2},${HOST_3} --conns=100 --clients=1000 \ + range YOUR_KEY --consistency=s --total=100000 ``` -We encourage running the benchmark test when setting up an etcd cluster for the first time in a new environment to ensure the cluster achieves adequate performance; cluster latency and throughput can be sensitive to minor environment differences. \ No newline at end of file +We encourage running the benchmark test when setting up an etcd cluster for the first time in a new environment to ensure the cluster achieves adequate performance; cluster latency and throughput can be sensitive to minor environment differences. diff --git a/github.com/coreos/etcd/Documentation/op-guide/recovery.md b/github.com/coreos/etcd/Documentation/op-guide/recovery.md index b667818967..bb4a9a4ae4 100644 --- a/github.com/coreos/etcd/Documentation/op-guide/recovery.md +++ b/github.com/coreos/etcd/Documentation/op-guide/recovery.md @@ -1,4 +1,4 @@ -## Disaster recovery +# Disaster recovery etcd is designed to withstand machine failures. An etcd cluster automatically recovers from temporary failures (e.g., machine reboots) and tolerates up to *(N-1)/2* permanent failures for a cluster of N members. When a member permanently fails, whether due to hardware failure or disk corruption, it loses access to the cluster. If the cluster permanently loses more than *(N-1)/2* members then it disastrously fails, irrevocably losing quorum. Once quorum is lost, the cluster cannot reach consensus and therefore cannot continue accepting updates. @@ -6,7 +6,7 @@ To recover from disastrous failure, etcd v3 provides snapshot and restore facili [v2_recover]: ../v2/admin_guide.md#disaster-recovery -### Snapshotting the keyspace +## Snapshotting the keyspace Recovering a cluster first needs a snapshot of the keyspace from an etcd member. A snapshot may either be taken from a live member with the `etcdctl snapshot save` command or by copying the `member/snap/db` file from an etcd data directory. For example, the following command snapshots the keyspace served by `$ENDPOINT` to the file `snapshot.db`: @@ -14,7 +14,7 @@ Recovering a cluster first needs a snapshot of the keyspace from an etcd member. $ ETCDCTL_API=3 etcdctl --endpoints $ENDPOINT snapshot save snapshot.db ``` -### Restoring a cluster +## Restoring a cluster To restore a cluster, all that is needed is a single snapshot "db" file. A cluster restore with `etcdctl snapshot restore` creates new etcd data directories; all members should restore using the same snapshot. Restoring overwrites some snapshot metadata (specifically, the member ID and cluster ID); the member loses its former identity. This metadata overwrite prevents the new member from inadvertently joining an existing cluster. Therefore in order to start a cluster from a snapshot, the restore must start a new logical cluster. diff --git a/github.com/coreos/etcd/Documentation/op-guide/runtime-configuration.md b/github.com/coreos/etcd/Documentation/op-guide/runtime-configuration.md index 89e660a441..678e40d1ec 100644 --- a/github.com/coreos/etcd/Documentation/op-guide/runtime-configuration.md +++ b/github.com/coreos/etcd/Documentation/op-guide/runtime-configuration.md @@ -2,7 +2,7 @@ etcd comes with support for incremental runtime reconfiguration, which allows users to update the membership of the cluster at run time. -Reconfiguration requests can only be processed when a majority of cluster members are functioning. It is **highly recommended** to always have a cluster size greater than two in production. It is unsafe to remove a member from a two member cluster. The majority of a two member cluster is also two. If there is a failure during the removal process, the cluster might not able to make progress and need to [restart from majority failure][majority failure]. +Reconfiguration requests can only be processed when a majority of cluster members are functioning. It is **highly recommended** to always have a cluster size greater than two in production. It is unsafe to remove a member from a two member cluster. The majority of a two member cluster is also two. If there is a failure during the removal process, the cluster might not be able to make progress and need to [restart from majority failure][majority failure]. To better understand the design behind runtime reconfiguration, please read [the runtime reconfiguration document][runtime-reconf]. @@ -41,7 +41,7 @@ Before making any change, a simple majority (quorum) of etcd members must be ava All changes to the cluster must be done sequentially: * To update a single member peerURLs, issue an update operation -* To replace a healthy single member, add a new member then remove the old member +* To replace a healthy single member, remove the old member then add a new member * To increase from 3 to 5 members, issue two add operations * To decrease from 5 to 3, issue two remove operations @@ -55,9 +55,9 @@ To update the advertise client URLs of a member, simply restart that member with #### Update advertise peer URLs -To update the advertise peer URLs of a member, first update it explicitly via member command and then restart the member. The additional action is required since updating peer URLs changes the cluster wide configuration and can affect the health of the etcd cluster. +To update the advertise peer URLs of a member, first update it explicitly via member command and then restart the member. The additional action is required since updating peer URLs changes the cluster wide configuration and can affect the health of the etcd cluster. -To update the peer URLs, first find the target member's ID. To list all members with `etcdctl`: +To update the advertise peer URLs, first find the target member's ID. To list all members with `etcdctl`: ```sh $ etcdctl member list @@ -69,7 +69,7 @@ a8266ecf031671f3: name=node1 peerURLs=http://localhost:23801 clientURLs=http://1 This example will `update` a8266ecf031671f3 member ID and change its peerURLs value to `http://10.0.1.10:2380`: ```sh -$ etcdctl member update a8266ecf031671f3 http://10.0.1.10:2380 +$ etcdctl member update a8266ecf031671f3 --peer-urls=http://10.0.1.10:2380 Updated member with ID a8266ecf031671f3 in cluster ``` diff --git a/github.com/coreos/etcd/Documentation/op-guide/runtime-reconf-design.md b/github.com/coreos/etcd/Documentation/op-guide/runtime-reconf-design.md index 80b0117412..3632301c4c 100644 --- a/github.com/coreos/etcd/Documentation/op-guide/runtime-reconf-design.md +++ b/github.com/coreos/etcd/Documentation/op-guide/runtime-reconf-design.md @@ -10,13 +10,13 @@ In etcd, every runtime reconfiguration has to go through [two phases][add-member Phase 1 - Inform cluster of new configuration -To add a member into etcd cluster, make an API call to request a new member to be added to the cluster. This is only way to add a new member into an existing cluster. The API call returns when the cluster agrees on the configuration change. +To add a member into etcd cluster, make an API call to request a new member to be added to the cluster. This is the only way to add a new member into an existing cluster. The API call returns when the cluster agrees on the configuration change. Phase 2 - Start new member -To join the etcd member into the existing cluster, specify the correct `initial-cluster` and set `initial-cluster-state` to `existing`. When the member starts, it will contact the existing cluster first and verify the current cluster configuration matches the expected one specified in `initial-cluster`. When the new member successfully starts, the cluster has reached the expected configuration. +To join the new etcd member into the existing cluster, specify the correct `initial-cluster` and set `initial-cluster-state` to `existing`. When the member starts, it will contact the existing cluster first and verify the current cluster configuration matches the expected one specified in `initial-cluster`. When the new member successfully starts, the cluster has reached the expected configuration. -By splitting the process into two discrete phases users are forced to be explicit regarding cluster membership changes. This actually gives users more flexibility and makes things easier to reason about. For example, if there is an attempt to add a new member with the same ID as an existing member in an etcd cluster, the action will fail immediately during phase one without impacting the running cluster. Similar protection is provided to prevent adding new members by mistake. If a new etcd member attempts to join the cluster before the cluster has accepted the configuration change,, it will not be accepted by the cluster. +By splitting the process into two discrete phases users are forced to be explicit regarding cluster membership changes. This actually gives users more flexibility and makes things easier to reason about. For example, if there is an attempt to add a new member with the same ID as an existing member in an etcd cluster, the action will fail immediately during phase one without impacting the running cluster. Similar protection is provided to prevent adding new members by mistake. If a new etcd member attempts to join the cluster before the cluster has accepted the configuration change, it will not be accepted by the cluster. Without the explicit workflow around cluster membership etcd would be vulnerable to unexpected cluster membership changes. For example, if etcd is running under an init system such as systemd, etcd would be restarted after being removed via the membership API, and attempt to rejoin the cluster on startup. This cycle would continue every time a member is removed via the API and systemd is set to restart etcd after failing, which is unexpected. @@ -26,21 +26,21 @@ We expect runtime reconfiguration to be an infrequent operation. We decided to k If a cluster permanently loses a majority of its members, a new cluster will need to be started from an old data directory to recover the previous state. -It is entirely possible to force removing the failed members from the existing cluster to recover. However, we decided not to support this method since it bypasses the normal consensus committing phase, which is unsafe. If the member to remove is not actually dead or force removed through different members in the same cluster, etcd will end up with a diverged cluster with same clusterID. This is very dangerous and hard to debug/fix afterwards. +It is entirely possible to force removing the failed members from the existing cluster to recover. However, we decided not to support this method since it bypasses the normal consensus committing phase, which is unsafe. If the member to remove is not actually dead or force removed through different members in the same cluster, etcd will end up with a diverged cluster with same clusterID. This is very dangerous and hard to debug/fix afterwards. -With a correct deployment, the possibility of permanent majority lose is very low. But it is a severe enough problem that worth special care. We strongly suggest reading the [disaster recovery documentation][disaster-recovery] and prepare for permanent majority lose before putting etcd into production. +With a correct deployment, the possibility of permanent majority lose is very low. But it is a severe enough problem that worth special care. We strongly suggest reading the [disaster recovery documentation][disaster-recovery] and preparing for permanent majority lose before putting etcd into production. ## Do not use public discovery service for runtime reconfiguration -The public discovery service should only be used for bootstrapping a cluster. To join member into an existing cluster, use runtime reconfiguration API. +The public discovery service should only be used for bootstrapping a cluster. To join member into an existing cluster, use runtime reconfiguration API. Discovery service is designed for bootstrapping an etcd cluster in the cloud environment, when the IP addresses of all the members are not known beforehand. After successfully bootstrapping a cluster, the IP addresses of all the members are known. Technically, the discovery service should no longer be needed. -It seems that using public discovery service is a convenient way to do runtime reconfiguration, after all discovery service already has all the cluster configuration information. However relying on public discovery service brings troubles: +It seems that using public discovery service is a convenient way to do runtime reconfiguration, after all discovery service already has all the cluster configuration information. However relying on public discovery service brings troubles: 1. it introduces external dependencies for the entire life-cycle of the cluster, not just bootstrap time. If there is a network issue between the cluster and public discovery service, the cluster will suffer from it. - -2. public discovery service must reflect correct runtime configuration of the cluster during it life-cycle. It has to provide security mechanism to avoid bad actions, and it is hard. + +2. public discovery service must reflect correct runtime configuration of the cluster during it life-cycle. It has to provide security mechanism to avoid bad actions, and it is hard. 3. public discovery service has to keep tens of thousands of cluster configurations. Our public discovery service backend is not ready for that workload. diff --git a/github.com/coreos/etcd/Documentation/op-guide/security.md b/github.com/coreos/etcd/Documentation/op-guide/security.md index 755ccee14b..7c0aedc0c2 100644 --- a/github.com/coreos/etcd/Documentation/op-guide/security.md +++ b/github.com/coreos/etcd/Documentation/op-guide/security.md @@ -1,4 +1,4 @@ -# Security model +# Transport security model etcd supports automatic TLS as well as authentication through client certificates for both clients to server as well as peer (server to server / cluster) communication. @@ -16,7 +16,7 @@ etcd takes several certificate related configuration options, either through com `--key-file=`: Key for the certificate. Must be unencrypted. -`--client-cert-auth`: When this is set etcd will check all incoming HTTPS requests for a client certificate signed by the trusted CA, requests that don't supply a valid client certificate will fail. +`--client-cert-auth`: When this is set etcd will check all incoming HTTPS requests for a client certificate signed by the trusted CA, requests that don't supply a valid client certificate will fail. If [authentication][auth] is enabled, the certificate provides credentials for the user name given by the Common Name field. `--trusted-ca-file=`: Trusted certificate authority. @@ -222,3 +222,4 @@ The certificate needs to be signed for the member's FQDN in its Subject Name, us [tls-setup]: ../../hack/tls-setup [tls-guide]: https://github.com/coreos/docs/blob/master/os/generate-self-signed-certificates.md [alt-name]: http://wiki.cacert.org/FAQ/subjectAltName +[auth]: authentication.md diff --git a/github.com/coreos/etcd/Documentation/op-guide/supported-platform.md b/github.com/coreos/etcd/Documentation/op-guide/supported-platform.md index d42418dae5..37affd7c95 100644 --- a/github.com/coreos/etcd/Documentation/op-guide/supported-platform.md +++ b/github.com/coreos/etcd/Documentation/op-guide/supported-platform.md @@ -1,8 +1,8 @@ -## Supported platforms +# Supported systems -### Current support +## Current support -The following table lists etcd support status for common architectures and operating systems, +The following table lists etcd support status for common architectures and operating systems: | Architecture | Operating System | Status | Maintainers | | ------------ | ---------------- | ------------ | --------------------------- | @@ -18,7 +18,7 @@ The following table lists etcd support status for common architectures and opera Experimental platforms appear to work in practice and have some platform specific code in etcd, but do not fully conform to the stable support policy. Unstable platforms have been lightly tested, but less than experimental. Unlisted architecture and operating system pairs are currently unsupported; caveat emptor. -### Supporting a new platform +## Supporting a new system platform For etcd to officially support a new platform as stable, a few requirements are necessary to ensure acceptable quality: @@ -28,7 +28,7 @@ For etcd to officially support a new platform as stable, a few requirements are 4. Set up CI (TravisCI, SemaphoreCI or Jenkins) for running integration tests; etcd must pass intensive tests. 5. (Optional) Set up a functional testing cluster; an etcd cluster should survive stress testing. -### 32-bit and other unsupported systems +## 32-bit and other unsupported systems etcd has known issues on 32-bit systems due to a bug in the Go runtime. See the [Go issue][go-issue] and [atomic package][go-atomic] for more information. diff --git a/github.com/coreos/etcd/Documentation/op-guide/versioning.md b/github.com/coreos/etcd/Documentation/op-guide/versioning.md index 90c95a0ee1..192a39ac6c 100644 --- a/github.com/coreos/etcd/Documentation/op-guide/versioning.md +++ b/github.com/coreos/etcd/Documentation/op-guide/versioning.md @@ -1,6 +1,6 @@ -## Versioning +# Versioning -### Service versioning +## Service versioning etcd uses [semantic versioning](http://semver.org) New minor versions may add additional features to the API. @@ -11,7 +11,7 @@ Get the running etcd cluster version with `etcdctl`: ETCDCTL_API=3 etcdctl --endpoints=127.0.0.1:2379 endpoint status ``` -### API versioning +## API versioning The `v3` API responses should not change after the 3.0.0 release but new features will be added over time. diff --git a/github.com/coreos/etcd/Documentation/platforms/aws.md b/github.com/coreos/etcd/Documentation/platforms/aws.md index debb176bc1..fe5f38b168 100644 --- a/github.com/coreos/etcd/Documentation/platforms/aws.md +++ b/github.com/coreos/etcd/Documentation/platforms/aws.md @@ -1,4 +1,4 @@ -## Introduction +# Amazon Web Services This guide assumes operational knowledge of Amazon Web Services (AWS), specifically Amazon Elastic Compute Cloud (EC2). This guide provides an introduction to design considerations when designing an etcd deployment on AWS EC2 and how AWS specific features may be utilized in that context. diff --git a/github.com/coreos/etcd/Documentation/platforms/container-linux-systemd.md b/github.com/coreos/etcd/Documentation/platforms/container-linux-systemd.md new file mode 100644 index 0000000000..8945a85b98 --- /dev/null +++ b/github.com/coreos/etcd/Documentation/platforms/container-linux-systemd.md @@ -0,0 +1,203 @@ +# Container Linux with systemd + +The following guide shows how to run etcd with [systemd][systemd-docs] under [Container Linux][container-linux-docs]. + +## Provisioning an etcd cluster + +Cluster bootstrapping in Container Linux is simplest with [Ignition][container-linux-ignition]; `coreos-metadata.service` dynamically fetches the machine's IP for discovery. Note that etcd's discovery service protocol is only meant for bootstrapping, and cannot be used with runtime reconfiguration or cluster monitoring. + +The [Container Linux Config Transpiler][container-linux-ct] compiles etcd configuration files into Ignition configuration files: + +```yaml container-linux-config:norender +etcd: + version: 3.2.0 + name: s1 + data_dir: /var/lib/etcd + advertise_client_urls: http://{PUBLIC_IPV4}:2379 + initial_advertise_peer_urls: http://{PRIVATE_IPV4}:2380 + listen_client_urls: http://0.0.0.0:2379 + listen_peer_urls: http://{PRIVATE_IPV4}:2380 + discovery: https://discovery.etcd.io/ +``` + +`ct` would produce the following Ignition Config: + +``` +$ ct --platform=gce --in-file /tmp/ct-etcd.cnf +{"ignition":{"version":"2.0.0","config"... +``` + +```json ignition-config +{ + "ignition":{"version":"2.0.0","config":{}}, + "storage":{}, + "systemd":{ + "units":[{ + "name":"etcd-member.service", + "enable":true, + "dropins":[{ + "name":"20-clct-etcd-member.conf", + "contents":"[Unit]\nRequires=coreos-metadata.service\nAfter=coreos-metadata.service\n\n[Service]\nEnvironmentFile=/run/metadata/coreos\nEnvironment=\"ETCD_IMAGE_TAG=v3.1.8\"\nExecStart=\nExecStart=/usr/lib/coreos/etcd-wrapper $ETCD_OPTS \\\n --name=\"s1\" \\\n --data-dir=\"/var/lib/etcd\" \\\n --listen-peer-urls=\"http://${COREOS_GCE_IP_LOCAL_0}:2380\" \\\n --listen-client-urls=\"http://0.0.0.0:2379\" \\\n --initial-advertise-peer-urls=\"http://${COREOS_GCE_IP_LOCAL_0}:2380\" \\\n --advertise-client-urls=\"http://${COREOS_GCE_IP_EXTERNAL_0}:2379\" \\\n --discovery=\"https://discovery.etcd.io/\u003ctoken\u003e\""}]}]}, + "networkd":{}, + "passwd":{}} +``` + +To avoid accidental misconfiguration, the transpiler helpfully verifies etcd configurations when generating Ignition files: + +```yaml container-linux-config:norender +etcd: + version: 3.2.0 + name: s1 + data_dir_x: /var/lib/etcd + advertise_client_urls: http://{PUBLIC_IPV4}:2379 + initial_advertise_peer_urls: http://{PRIVATE_IPV4}:2380 + listen_client_urls: http://0.0.0.0:2379 + listen_peer_urls: http://{PRIVATE_IPV4}:2380 + discovery: https://discovery.etcd.io/ +``` + +``` +$ ct --platform=gce --in-file /tmp/ct-etcd.cnf +warning at line 3, column 2 +Config has unrecognized key: data_dir_x +``` + +See [Container Linux Provisioning][container-linux-provision] for more details. + +## etcd 3.x service + +[Container Linux][container-linux-docs] does not include etcd 3.x binaries by default. Different versions of etcd 3.x can be fetched via `etcd-member.service`. + +Confirm unit file exists: + +``` +systemctl cat etcd-member.service +``` + +Check if the etcd service is running: + +``` +systemctl status etcd-member.service +``` + +Example systemd drop-in unit to override the default service settings: + +```bash +cat > /tmp/20-cl-etcd-member.conf < Put Body: - JSON struct, above, matching the appropriate name - * Starting password and roles when creating. + JSON struct, above, matching the appropriate name + * Starting password and roles when creating. * Grant/Revoke/Password filled in when updating (to grant roles, revoke roles, or change the password). Possible Status Codes: 200 OK @@ -345,7 +350,7 @@ PUT /v2/auth/roles/rkt 401 Unauthorized 404 Not Found (update non-existent roles) 409 Conflict (when granting duplicated permission or revoking non-existent permission) - 200 Body: + 200 Body: JSON state of the role **Remove A Role** diff --git a/github.com/coreos/etcd/Documentation/v2/authentication.md b/github.com/coreos/etcd/Documentation/v2/authentication.md index d7e0a1114b..05adf06002 100644 --- a/github.com/coreos/etcd/Documentation/v2/authentication.md +++ b/github.com/coreos/etcd/Documentation/v2/authentication.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # Authentication Guide ## Overview @@ -14,7 +19,7 @@ There is one special user, `root`, and there are two special roles, `root` and ` ### User `root` -User `root` must be created before security can be activated. It has the `root` role and allows for the changing of anything inside etcd. The idea behind the `root` user is for recovery purposes -- a password is generated and stored somewhere -- and the root role is granted to the administrator accounts on the system. In the future, for troubleshooting and recovery, we will need to assume some access to the system, and future documentation will assume this root user (though anyone with the role will suffice). +User `root` must be created before security can be activated. It has the `root` role and allows for the changing of anything inside etcd. The idea behind the `root` user is for recovery purposes -- a password is generated and stored somewhere -- and the root role is granted to the administrator accounts on the system. In the future, for troubleshooting and recovery, we will need to assume some access to the system, and future documentation will assume this root user (though anyone with the role will suffice). ### Role `root` @@ -104,7 +109,7 @@ $ etcdctl role grant myrolename -path '/foo/bar' -write $ etcdctl role grant myrolename -path '/pub/*' -readwrite ``` -Beware that +Beware that ``` # Give full access to keys under /pub?? @@ -133,12 +138,12 @@ $ etcdctl role remove myrolename ## Enabling authentication -The minimal steps to enabling auth are as follows. The administrator can set up users and roles before or after enabling authentication, as a matter of preference. +The minimal steps to enabling auth are as follows. The administrator can set up users and roles before or after enabling authentication, as a matter of preference. Make sure the root user is created: ``` -$ etcdctl user add root +$ etcdctl user add root New password: ``` diff --git a/github.com/coreos/etcd/Documentation/v2/backward_compatibility.md b/github.com/coreos/etcd/Documentation/v2/backward_compatibility.md index a6bf506e42..3704325e7c 100644 --- a/github.com/coreos/etcd/Documentation/v2/backward_compatibility.md +++ b/github.com/coreos/etcd/Documentation/v2/backward_compatibility.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # Backward Compatibility The main goal of etcd 2.0 release is to improve cluster safety around bootstrapping and dynamic reconfiguration. To do this, we deprecated the old error-prone APIs and provide a new set of APIs. @@ -32,7 +37,7 @@ The consistent flag for read operations is removed in etcd 2.0.0. The normal rea The read consistency guarantees are: -The consistent read guarantees the sequential consistency within one client that talks to one etcd server. Read/Write from one client to one etcd member should be observed in order. If one client write a value to an etcd server successfully, it should be able to get the value out of the server immediately. +The consistent read guarantees the sequential consistency within one client that talks to one etcd server. Read/Write from one client to one etcd member should be observed in order. If one client write a value to an etcd server successfully, it should be able to get the value out of the server immediately. Each etcd member will proxy the request to leader and only return the result to user after the result is applied on the local member. Thus after the write succeed, the user is guaranteed to see the value on the member it sent the request to. @@ -56,6 +61,7 @@ Proxy mode in 2.0 will provide similar functionality, and with improved control ## Discovery Service A size key needs to be provided inside a [discovery token][discoverytoken]. + [discoverytoken]: clustering.md#custom-etcd-discovery-service ## HTTP Admin API diff --git a/github.com/coreos/etcd/Documentation/v2/benchmarks/README.md b/github.com/coreos/etcd/Documentation/v2/benchmarks/README.md index 897112f328..881641a79c 100644 --- a/github.com/coreos/etcd/Documentation/v2/benchmarks/README.md +++ b/github.com/coreos/etcd/Documentation/v2/benchmarks/README.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../../docs.md#documentation + + # Benchmarks etcd benchmarks will be published regularly and tracked for each release below: diff --git a/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-2-1-0-alpha-benchmarks.md b/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-2-1-0-alpha-benchmarks.md index afa465e977..1fc808ec4d 100644 --- a/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-2-1-0-alpha-benchmarks.md +++ b/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-2-1-0-alpha-benchmarks.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../../docs.md#documentation + + ## Physical machines GCE n1-highcpu-2 machine type diff --git a/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-2-2-0-benchmarks.md b/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-2-2-0-benchmarks.md index c15244e77f..2989c1a7da 100644 --- a/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-2-2-0-benchmarks.md +++ b/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-2-2-0-benchmarks.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../../docs.md#documentation + + # Benchmarking etcd v2.2.0 ## Physical Machines diff --git a/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-2-2-0-rc-benchmarks.md b/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-2-2-0-rc-benchmarks.md index 9d30cc44ab..9170a644ba 100644 --- a/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-2-2-0-rc-benchmarks.md +++ b/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-2-2-0-rc-benchmarks.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../../docs.md#documentation + + ## Physical machines GCE n1-highcpu-2 machine type diff --git a/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-2-2-0-rc-memory-benchmarks.md b/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-2-2-0-rc-memory-benchmarks.md index a8d9f14085..40c220eaa9 100644 --- a/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-2-2-0-rc-memory-benchmarks.md +++ b/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-2-2-0-rc-memory-benchmarks.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../../docs.md#documentation + + ## Physical machine GCE n1-standard-2 machine type diff --git a/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-3-demo-benchmarks.md b/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-3-demo-benchmarks.md index 16e941d479..cb59d173cc 100644 --- a/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-3-demo-benchmarks.md +++ b/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-3-demo-benchmarks.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../../docs.md#documentation + + ## Physical machines GCE n1-highcpu-2 machine type diff --git a/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-3-watch-memory-benchmark.md b/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-3-watch-memory-benchmark.md index 46507772f5..56ae1a2398 100644 --- a/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-3-watch-memory-benchmark.md +++ b/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-3-watch-memory-benchmark.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../../docs.md#documentation + + # Watch Memory Usage Benchmark *NOTE*: The watch features are under active development, and their memory usage may change as that development progresses. We do not expect it to significantly increase beyond the figures stated below. @@ -5,10 +10,10 @@ A primary goal of etcd is supporting a very large number of watchers doing a massively large amount of watching. etcd aims to support O(10k) clients, O(100K) watch streams (O(10) streams per client) and O(10M) total watchings (O(100) watching per stream). The memory consumed by each individual watching accounts for the largest portion of etcd's overall usage, and is therefore the focus of current and future optimizations. -Three related components of etcd watch consume physical memory: each `grpc.Conn`, each watch stream, and each instance of the watching activity. `grpc.Conn` maintains the actual TCP connection and other gRPC connection state. Each `grpc.Conn` consumes O(10kb) of memory, and might have multiple watch streams attached. +Three related components of etcd watch consume physical memory: each `grpc.Conn`, each watch stream, and each instance of the watching activity. `grpc.Conn` maintains the actual TCP connection and other gRPC connection state. Each `grpc.Conn` consumes O(10kb) of memory, and might have multiple watch streams attached. -Each watch stream is an independent HTTP2 connection which consumes another O(10kb) of memory. -Multiple watchings might share one watch stream. +Each watch stream is an independent HTTP2 connection which consumes another O(10kb) of memory. +Multiple watchings might share one watch stream. Watching is the actual struct that tracks the changes on the key-value store. Each watching should only consume < O(1kb). diff --git a/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-storage-memory-benchmark.md b/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-storage-memory-benchmark.md index 3834a1922a..3f75b79205 100644 --- a/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-storage-memory-benchmark.md +++ b/github.com/coreos/etcd/Documentation/v2/benchmarks/etcd-storage-memory-benchmark.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../../docs.md#documentation + + # Storage Memory Usage Benchmark @@ -60,7 +65,7 @@ GCE n1-standard-2 machine type In this test, we only benchmark the memory usage of the in-memory index. The goal is to find `c1` and `c2` mentioned above and to understand the hard limit of memory consumption of the storage. -We calculate the memory usage consumption via the Go runtime.ReadMemStats. We calculate the total allocated bytes difference before creating the index and after creating the index. It cannot perfectly reflect the memory usage of the in-memory index itself but can show the rough consumption pattern. +We calculate the memory usage consumption via the Go runtime.ReadMemStats. We calculate the total allocated bytes difference before creating the index and after creating the index. It cannot perfectly reflect the memory usage of the in-memory index itself but can show the rough consumption pattern. | N | versions | key size | memory usage | |------|----------|----------|--------------| diff --git a/github.com/coreos/etcd/Documentation/v2/branch_management.md b/github.com/coreos/etcd/Documentation/v2/branch_management.md index dcea5a36c6..45b2735423 100644 --- a/github.com/coreos/etcd/Documentation/v2/branch_management.md +++ b/github.com/coreos/etcd/Documentation/v2/branch_management.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # Branch Management ## Guide diff --git a/github.com/coreos/etcd/Documentation/v2/clustering.md b/github.com/coreos/etcd/Documentation/v2/clustering.md index 1151ef1227..f9c3e08f73 100644 --- a/github.com/coreos/etcd/Documentation/v2/clustering.md +++ b/github.com/coreos/etcd/Documentation/v2/clustering.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # Clustering Guide ## Overview diff --git a/github.com/coreos/etcd/Documentation/v2/configuration.md b/github.com/coreos/etcd/Documentation/v2/configuration.md index 98a58057cb..0cc146dc0a 100644 --- a/github.com/coreos/etcd/Documentation/v2/configuration.md +++ b/github.com/coreos/etcd/Documentation/v2/configuration.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # Configuration Flags etcd is configurable through command-line flags and environment variables. Options set on the command line take precedence over those from the environment. @@ -176,7 +181,10 @@ To start etcd automatically using custom settings at startup in Linux, using a [ The security flags help to [build a secure etcd cluster][security]. -### --ca-file [DEPRECATED] +### --ca-file + +**DEPRECATED** + + Path to the client server TLS CA file. `--ca-file ca.crt` could be replaced by `--trusted-ca-file ca.crt --client-cert-auth` and etcd will perform the same. + default: none + env variable: ETCD_CA_FILE @@ -201,7 +209,10 @@ The security flags help to [build a secure etcd cluster][security]. + default: none + env variable: ETCD_TRUSTED_CA_FILE -### --peer-ca-file [DEPRECATED] +### --peer-ca-file + +**DEPRECATED** + + Path to the peer server TLS CA file. `--peer-ca-file ca.crt` could be replaced by `--peer-trusted-ca-file ca.crt --peer-client-cert-auth` and etcd will perform the same. + default: none + env variable: ETCD_PEER_CA_FILE @@ -234,7 +245,7 @@ The security flags help to [build a secure etcd cluster][security]. + env variable: ETCD_DEBUG ### --log-package-levels -+ Set individual etcd subpackages to specific log levels. An example being `etcdserver=WARNING,security=DEBUG` ++ Set individual etcd subpackages to specific log levels. An example being `etcdserver=WARNING,security=DEBUG` + default: none (INFO for all packages) + env variable: ETCD_LOG_PACKAGE_LEVELS @@ -272,7 +283,7 @@ Follow the instructions when using these flags. [build-cluster]: clustering.md#static [reconfig]: runtime-configuration.md [discovery]: clustering.md#discovery -[iana-ports]: https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=etcd +[iana-ports]: http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt [proxy]: proxy.md [reconfig]: runtime-configuration.md [restore]: admin_guide.md#restoring-a-backup diff --git a/github.com/coreos/etcd/Documentation/v2/dev/release.md b/github.com/coreos/etcd/Documentation/v2/dev/release.md index 1542371608..bbf061da7b 100644 --- a/github.com/coreos/etcd/Documentation/v2/dev/release.md +++ b/github.com/coreos/etcd/Documentation/v2/dev/release.md @@ -1,8 +1,13 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../../docs.md#documentation + + # etcd release guide The guide talks about how to release a new version of etcd. -The procedure includes some manual steps for sanity checking but it can probably be further scripted. Please keep this document up-to-date if you want to make changes to the release process. +The procedure includes some manual steps for sanity checking but it can probably be further scripted. Please keep this document up-to-date if you want to make changes to the release process. ## Prepare Release @@ -70,7 +75,7 @@ cd release # personal GPG is okay for now for i in etcd-*{.zip,.tar.gz}; do gpg --sign ${i}; done # use `CoreOS ACI Builder ` secret key -gpg -u 88182190 -a --output etcd-${VERSION}-linux-amd64.aci.asc --detach-sig etcd-${VERSION}-linux-amd64.aci +for aci in etcd-${VERSION}.*.aci; do gpg -u 88182190 -a --output ${aci}.asc --detach-sig ${aci}; done ``` ## Publish Release Page in GitHub @@ -88,6 +93,7 @@ gpg -u 88182190 -a --output etcd-${VERSION}-linux-amd64.aci.asc --detach-sig etc ``` docker login quay.io docker push quay.io/coreos/etcd:${VERSION} +docker push quay.io/coreos/etcd:${VERSION}-${arch} ``` - Add `latest` tag to the new image on [quay.io](https://quay.io/repository/coreos/etcd?tag=latest&tab=tags) if this is a stable release. diff --git a/github.com/coreos/etcd/Documentation/v2/discovery_protocol.md b/github.com/coreos/etcd/Documentation/v2/discovery_protocol.md index c78a4c6166..b9479ac398 100644 --- a/github.com/coreos/etcd/Documentation/v2/discovery_protocol.md +++ b/github.com/coreos/etcd/Documentation/v2/discovery_protocol.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # Discovery Service Protocol Discovery service protocol helps new etcd member to discover all other members in cluster bootstrap phase using a shared discovery URL. diff --git a/github.com/coreos/etcd/Documentation/v2/docker_guide.md b/github.com/coreos/etcd/Documentation/v2/docker_guide.md index 2600a9801b..74dd906885 100644 --- a/github.com/coreos/etcd/Documentation/v2/docker_guide.md +++ b/github.com/coreos/etcd/Documentation/v2/docker_guide.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # Running etcd under Docker The following guide will show you how to run etcd under Docker using the [static bootstrap process](clustering.md#static). @@ -16,7 +21,7 @@ This will run the latest release version of etcd. You can specify version if nee ``` docker run -d -v /usr/share/ca-certificates/:/etc/ssl/certs -p 4001:4001 -p 2380:2380 -p 2379:2379 \ - --name etcd quay.io/coreos/etcd \ + --name etcd quay.io/coreos/etcd:v2.3.8 \ -name etcd0 \ -advertise-client-urls http://${HostIP}:2379,http://${HostIP}:4001 \ -listen-client-urls http://0.0.0.0:2379,http://0.0.0.0:4001 \ @@ -48,7 +53,7 @@ The main difference being the value used for the `-initial-cluster` flag, which ``` docker run -d -v /usr/share/ca-certificates/:/etc/ssl/certs -p 4001:4001 -p 2380:2380 -p 2379:2379 \ - --name etcd quay.io/coreos/etcd \ + --name etcd quay.io/coreos/etcd:v2.3.8 \ -name etcd0 \ -advertise-client-urls http://192.168.12.50:2379,http://192.168.12.50:4001 \ -listen-client-urls http://0.0.0.0:2379,http://0.0.0.0:4001 \ @@ -63,7 +68,7 @@ docker run -d -v /usr/share/ca-certificates/:/etc/ssl/certs -p 4001:4001 -p 2380 ``` docker run -d -v /usr/share/ca-certificates/:/etc/ssl/certs -p 4001:4001 -p 2380:2380 -p 2379:2379 \ - --name etcd quay.io/coreos/etcd \ + --name etcd quay.io/coreos/etcd:v2.3.8 \ -name etcd1 \ -advertise-client-urls http://192.168.12.51:2379,http://192.168.12.51:4001 \ -listen-client-urls http://0.0.0.0:2379,http://0.0.0.0:4001 \ @@ -78,7 +83,7 @@ docker run -d -v /usr/share/ca-certificates/:/etc/ssl/certs -p 4001:4001 -p 2380 ``` docker run -d -v /usr/share/ca-certificates/:/etc/ssl/certs -p 4001:4001 -p 2380:2380 -p 2379:2379 \ - --name etcd quay.io/coreos/etcd \ + --name etcd quay.io/coreos/etcd:v2.3.8 \ -name etcd2 \ -advertise-client-urls http://192.168.12.52:2379,http://192.168.12.52:4001 \ -listen-client-urls http://0.0.0.0:2379,http://0.0.0.0:4001 \ diff --git a/github.com/coreos/etcd/Documentation/v2/errorcode.md b/github.com/coreos/etcd/Documentation/v2/errorcode.md index 0078d7a045..4caf22a5b2 100644 --- a/github.com/coreos/etcd/Documentation/v2/errorcode.md +++ b/github.com/coreos/etcd/Documentation/v2/errorcode.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # Error Code ====== diff --git a/github.com/coreos/etcd/Documentation/v2/etcd_alert.rules b/github.com/coreos/etcd/Documentation/v2/etcd_alert.rules index 1793cad49a..5493c56b66 100644 --- a/github.com/coreos/etcd/Documentation/v2/etcd_alert.rules +++ b/github.com/coreos/etcd/Documentation/v2/etcd_alert.rules @@ -62,7 +62,7 @@ ALERT HTTPRequestsSlow } ANNOTATIONS { summary = "slow HTTP requests", - description = "on etcd instance {{ $labels.instance }} HTTP requests to {{ $label.method }} are slow", + description = "on etcd instance {{ $labels.instance }} HTTP requests to {{ $labels.method }} are slow", } ### File descriptor alerts ### diff --git a/github.com/coreos/etcd/Documentation/v2/faq.md b/github.com/coreos/etcd/Documentation/v2/faq.md index 0e2a0ffc86..c0faa41e08 100644 --- a/github.com/coreos/etcd/Documentation/v2/faq.md +++ b/github.com/coreos/etcd/Documentation/v2/faq.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # FAQ ## 1) Why can an etcd client read an old version of data when a majority of the etcd cluster members are down? diff --git a/github.com/coreos/etcd/Documentation/v2/glossary.md b/github.com/coreos/etcd/Documentation/v2/glossary.md index e9ed840e8e..70c2d40ee8 100644 --- a/github.com/coreos/etcd/Documentation/v2/glossary.md +++ b/github.com/coreos/etcd/Documentation/v2/glossary.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # Glossary This document defines the various terms used in etcd documentation, command line and source code. diff --git a/github.com/coreos/etcd/Documentation/v2/implementation-faq.md b/github.com/coreos/etcd/Documentation/v2/implementation-faq.md index d6d68d7134..027c47aaf0 100644 --- a/github.com/coreos/etcd/Documentation/v2/implementation-faq.md +++ b/github.com/coreos/etcd/Documentation/v2/implementation-faq.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # FAQ ## Initial Bootstrapping UX diff --git a/github.com/coreos/etcd/Documentation/v2/internal-protocol-versioning.md b/github.com/coreos/etcd/Documentation/v2/internal-protocol-versioning.md index 6df1fd402d..68d716e5f9 100644 --- a/github.com/coreos/etcd/Documentation/v2/internal-protocol-versioning.md +++ b/github.com/coreos/etcd/Documentation/v2/internal-protocol-versioning.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # Versioning Goal: We want to be able to upgrade an individual peer in an etcd cluster to a newer version of etcd. diff --git a/github.com/coreos/etcd/Documentation/v2/libraries-and-tools.md b/github.com/coreos/etcd/Documentation/v2/libraries-and-tools.md index 620b2f1000..806a5d9028 100644 --- a/github.com/coreos/etcd/Documentation/v2/libraries-and-tools.md +++ b/github.com/coreos/etcd/Documentation/v2/libraries-and-tools.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # Libraries and Tools **Tools** @@ -115,7 +120,6 @@ - [mattn/etcdenv](https://github.com/mattn/etcdenv) - "env" shebang with etcd integration - [kelseyhightower/confd](https://github.com/kelseyhightower/confd) - Manage local app config files using templates and data from etcd - [configdb](https://git.autistici.org/ai/configdb/tree/master) - A REST relational abstraction on top of arbitrary database backends, aimed at storing configs and inventories. -- [scrz](https://github.com/scrz/scrz) - Container manager, stores configuration in etcd. - [fleet](https://github.com/coreos/fleet) - Distributed init system - [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) - Container cluster manager introduced by Google. - [mailgun/vulcand](https://github.com/mailgun/vulcand) - HTTP proxy that uses etcd as a configuration backend. diff --git a/github.com/coreos/etcd/Documentation/v2/members_api.md b/github.com/coreos/etcd/Documentation/v2/members_api.md index 9c52fe790a..a9ff6a0439 100644 --- a/github.com/coreos/etcd/Documentation/v2/members_api.md +++ b/github.com/coreos/etcd/Documentation/v2/members_api.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # Members API * [List members](#list-members) diff --git a/github.com/coreos/etcd/Documentation/v2/metrics.md b/github.com/coreos/etcd/Documentation/v2/metrics.md index 857d1934ed..596c14b643 100644 --- a/github.com/coreos/etcd/Documentation/v2/metrics.md +++ b/github.com/coreos/etcd/Documentation/v2/metrics.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # Metrics etcd uses [Prometheus][prometheus] for metrics reporting. The metrics can be used for real-time monitoring and debugging. etcd does not persist its metrics; if a member restarts, the metrics will be reset. @@ -14,9 +19,9 @@ The metrics under the `etcd` prefix are for monitoring and alerting. They are st ### http requests -These metrics describe the serving of requests (non-watch events) served by etcd members in non-proxy mode: total +These metrics describe the serving of requests (non-watch events) served by etcd members in non-proxy mode: total incoming requests, request failures and processing latency (inc. raft rounds for storage). They are useful for tracking - user-generated traffic hitting the etcd cluster . + user-generated traffic hitting the etcd cluster . All these metrics are prefixed with `etcd_http_` @@ -28,20 +33,20 @@ All these metrics are prefixed with `etcd_http_` Example Prometheus queries that may be useful from these metrics (across all etcd members): - - * `sum(rate(etcd_http_failed_total{job="etcd"}[1m]) by (method) / sum(rate(etcd_http_events_received_total{job="etcd"})[1m]) by (method)` - + + * `sum(rate(etcd_http_failed_total{job="etcd"}[1m]) by (method) / sum(rate(etcd_http_events_received_total{job="etcd"})[1m]) by (method)` + Shows the fraction of events that failed by HTTP method across all members, across a time window of `1m`. - + * `sum(rate(etcd_http_received_total{job="etcd",method="GET})[1m]) by (method)` `sum(rate(etcd_http_received_total{job="etcd",method~="GET})[1m]) by (method)` - + Shows the rate of successful readonly/write queries across all servers, across a time window of `1m`. - + * `histogram_quantile(0.9, sum(rate(etcd_http_successful_duration_seconds{job="etcd",method="GET"}[5m]) ) by (le))` `histogram_quantile(0.9, sum(rate(etcd_http_successful_duration_seconds{job="etcd",method!="GET"}[5m]) ) by (le))` - - Show the 0.90-tile latency (in seconds) of read/write (respectively) event handling across all members, with a window of `5m`. + + Show the 0.90-tile latency (in seconds) of read/write (respectively) event handling across all members, with a window of `5m`. ### proxy @@ -56,21 +61,21 @@ All these metrics are prefixed with `etcd_proxy_` | requests_total | Total number of requests by this proxy instance. | Counter(method) | | handled_total | Total number of fully handled requests, with responses from etcd members. | Counter(method) | | dropped_total | Total number of dropped requests due to forwarding errors to etcd members.  | Counter(method,error) | -| handling_duration_seconds | Bucketed handling times by HTTP method, including round trip to member instances. | Histogram(method) | +| handling_duration_seconds | Bucketed handling times by HTTP method, including round trip to member instances. | Histogram(method) | Example Prometheus queries that may be useful from these metrics (across all etcd servers): * `sum(rate(etcd_proxy_handled_total{job="etcd"}[1m])) by (method)` - - Rate of requests (by HTTP method) handled by all proxies, across a window of `1m`. + + Rate of requests (by HTTP method) handled by all proxies, across a window of `1m`. * `histogram_quantile(0.9, sum(rate(handling_duration_seconds{job="etcd",method="GET"}[5m])) by (le))` `histogram_quantile(0.9, sum(rate(handling_duration_seconds{job="etcd",method!="GET"}[5m])) by (le))` - - Show the 0.90-tile latency (in seconds) of handling of user requests across all proxy machines, with a window of `5m`. - + + Show the 0.90-tile latency (in seconds) of handling of user requests across all proxy machines, with a window of `5m`. + * `sum(rate(etcd_proxy_dropped_total{job="etcd"}[1m])) by (proxying_error)` - + Number of failed request on the proxy. This should be 0, spikes here indicate connectivity issues to the etcd cluster. ## etcd_debugging namespace metrics diff --git a/github.com/coreos/etcd/Documentation/v2/other_apis.md b/github.com/coreos/etcd/Documentation/v2/other_apis.md index 29866a445c..339d9f8e55 100644 --- a/github.com/coreos/etcd/Documentation/v2/other_apis.md +++ b/github.com/coreos/etcd/Documentation/v2/other_apis.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # Miscellaneous APIs * [Getting the etcd version](#getting-the-etcd-version) diff --git a/github.com/coreos/etcd/Documentation/v2/platforms/freebsd.md b/github.com/coreos/etcd/Documentation/v2/platforms/freebsd.md index c84ac5a19d..891ea6f53d 100644 --- a/github.com/coreos/etcd/Documentation/v2/platforms/freebsd.md +++ b/github.com/coreos/etcd/Documentation/v2/platforms/freebsd.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../../docs.md#documentation + + # FreeBSD Starting with version 0.1.2 both etcd and etcdctl have been ported to FreeBSD and can diff --git a/github.com/coreos/etcd/Documentation/v2/production-users.md b/github.com/coreos/etcd/Documentation/v2/production-users.md index 893fe66c29..addef2a926 100644 --- a/github.com/coreos/etcd/Documentation/v2/production-users.md +++ b/github.com/coreos/etcd/Documentation/v2/production-users.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # Production Users This document tracks people and use cases for etcd in production. By creating a list of production use cases we hope to build a community of advisors that we can reach out to with experience using various etcd applications, operation environments, and cluster sizes. The etcd development team may reach out periodically to check-in on your experience and update this list. diff --git a/github.com/coreos/etcd/Documentation/v2/proxy.md b/github.com/coreos/etcd/Documentation/v2/proxy.md index af6a477332..1489b01525 100644 --- a/github.com/coreos/etcd/Documentation/v2/proxy.md +++ b/github.com/coreos/etcd/Documentation/v2/proxy.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # Proxy etcd can run as a transparent proxy. Doing so allows for easy discovery of etcd within your infrastructure, since it can run on each machine as a local service. In this mode, etcd acts as a reverse proxy and forwards client requests to an active etcd cluster. The etcd proxy does not participate in the consensus replication of the etcd cluster, thus it neither increases the resilience nor decreases the write performance of the etcd cluster. diff --git a/github.com/coreos/etcd/Documentation/v2/reporting_bugs.md b/github.com/coreos/etcd/Documentation/v2/reporting_bugs.md index 0187ab0207..1f5880faa8 100644 --- a/github.com/coreos/etcd/Documentation/v2/reporting_bugs.md +++ b/github.com/coreos/etcd/Documentation/v2/reporting_bugs.md @@ -1,6 +1,11 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # Reporting Bugs -If you find bugs or documentation mistakes in the etcd project, please let us know by [opening an issue][issue]. We treat bugs and mistakes very seriously and believe no issue is too small. Before creating a bug report, please check that an issue reporting the same problem does not already exist. +If you find bugs or documentation mistakes in the etcd project, please let us know by [opening an issue][etcd-issue]. We treat bugs and mistakes very seriously and believe no issue is too small. Before creating a bug report, please check that an issue reporting the same problem does not already exist. To make your bug report accurate and easy to understand, please try to create bug reports that are: diff --git a/github.com/coreos/etcd/Documentation/v2/rfc/v3api.md b/github.com/coreos/etcd/Documentation/v2/rfc/v3api.md index cab075e2bf..18567d36b0 100644 --- a/github.com/coreos/etcd/Documentation/v2/rfc/v3api.md +++ b/github.com/coreos/etcd/Documentation/v2/rfc/v3api.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../../docs.md#documentation + + # Overview The etcd v3 API is designed to give users a more efficient and cleaner abstraction compared to etcd v2. There are a number of semantic and protocol changes in this new API. For an overview [see Xiang Li's video](https://youtu.be/J5AioGtEPeQ?t=211). @@ -7,25 +12,25 @@ To prove out the design of the v3 API the team has also built [a number of examp # Design 1. Flatten binary key-value space - + 2. Keep the event history until compaction - access to old version of keys - user controlled history compaction - + 3. Support range query - Pagination support with limit argument - Support consistency guarantee across multiple range queries - + 4. Replace TTL key with Lease - more efficient/ low cost keep alive - a logical group of TTL keys - + 5. Replace CAS/CAD with multi-object Txn - MUCH MORE powerful and flexible - + 6. Support efficient watching with multiple ranges -7. RPC API supports the completed set of APIs. +7. RPC API supports the completed set of APIs. - more efficient than JSON/HTTP - additional txn/lease support @@ -56,7 +61,7 @@ the size in the future a little bit or make it configurable. // A put is always successful Put( PutRequest { key = foo, value = bar } ) -PutResponse { +PutResponse { cluster_id = 0x1000, member_id = 0x1, revision = 1, @@ -119,7 +124,7 @@ RangeResponse { Txn(TxnRequest { // mod_revision of foo0 is equal to 1, mod_revision of foo1 is greater than 1 compare = { - {compareType = equal, key = foo0, mod_revision = 1}, + {compareType = equal, key = foo0, mod_revision = 1}, {compareType = greater, key = foo1, mod_revision = 1}} }, // if the comparison succeeds, put foo2 = bar2 @@ -156,7 +161,7 @@ Watch( WatchRequest{ end_revision = 10000, // server decided notification frequency progress_notification = true, - } + } … // this can be a watch request stream ) @@ -176,7 +181,7 @@ WatchResponse { }, } … - + // a notification at 2000 WatchResponse { cluster_id = 0x1000, @@ -185,9 +190,9 @@ WatchResponse { raft_term = 0x1, // nil event as notification } - - … - + + … + // put (foo0=bar3000) event at 3000 WatchResponse { cluster_id = 0x1000, @@ -204,8 +209,8 @@ WatchResponse { }, } … - + ``` -[api-protobuf]: https://github.com/coreos/etcd/blob/master/etcdserver/etcdserverpb/rpc.proto -[kv-protobuf]: https://github.com/coreos/etcd/blob/master/storage/storagepb/kv.proto +[api-protobuf]: https://github.com/coreos/etcd/blob/release-2.3/etcdserver/etcdserverpb/rpc.proto +[kv-protobuf]: https://github.com/coreos/etcd/blob/release-2.3/storage/storagepb/kv.proto diff --git a/github.com/coreos/etcd/Documentation/v2/runtime-configuration.md b/github.com/coreos/etcd/Documentation/v2/runtime-configuration.md index c15a4896b0..a6b57b9162 100644 --- a/github.com/coreos/etcd/Documentation/v2/runtime-configuration.md +++ b/github.com/coreos/etcd/Documentation/v2/runtime-configuration.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # Runtime Reconfiguration etcd comes with support for incremental runtime reconfiguration, which allows users to update the membership of the cluster at run time. @@ -61,9 +66,9 @@ A wrongly updated client URL will not affect the health of the etcd cluster. #### Update advertise peer URLs -If you would like to update the advertise peer URLs of a member, you have to first update +If you would like to update the advertise peer URLs of a member, you have to first update it explicitly via member command and then restart the member. The additional action is required -since updating peer URLs changes the cluster wide configuration and can affect the health of the etcd cluster. +since updating peer URLs changes the cluster wide configuration and can affect the health of the etcd cluster. To update the peer URLs, first, we need to find the target member's ID. You can list all members with `etcdctl`: diff --git a/github.com/coreos/etcd/Documentation/v2/runtime-reconf-design.md b/github.com/coreos/etcd/Documentation/v2/runtime-reconf-design.md index d577278656..6ee9bd6b3a 100644 --- a/github.com/coreos/etcd/Documentation/v2/runtime-reconf-design.md +++ b/github.com/coreos/etcd/Documentation/v2/runtime-reconf-design.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # Design of Runtime Reconfiguration Runtime reconfiguration is one of the hardest and most error prone features in a distributed system, especially in a consensus based system like etcd. @@ -10,13 +15,13 @@ In etcd, every runtime reconfiguration has to go through [two phases][add-member Phase 1 - Inform cluster of new configuration -To add a member into etcd cluster, you need to make an API call to request a new member to be added to the cluster. And this is only way that you can add a new member into an existing cluster. The API call returns when the cluster agrees on the configuration change. +To add a member into etcd cluster, you need to make an API call to request a new member to be added to the cluster. And this is the only way that you can add a new member into an existing cluster. The API call returns when the cluster agrees on the configuration change. Phase 2 - Start new member -To join the etcd member into the existing cluster, you need to specify the correct `initial-cluster` and set `initial-cluster-state` to `existing`. When the member starts, it will contact the existing cluster first and verify the current cluster configuration matches the expected one specified in `initial-cluster`. When the new member successfully starts, you know your cluster reached the expected configuration. +To join the new etcd member into the existing cluster, you need to specify the correct `initial-cluster` and set `initial-cluster-state` to `existing`. When the member starts, it will contact the existing cluster first and verify the current cluster configuration matches the expected one specified in `initial-cluster`. When the new member successfully starts, you know your cluster reached the expected configuration. -By splitting the process into two discrete phases users are forced to be explicit regarding cluster membership changes. This actually gives users more flexibility and makes things easier to reason about. For example, if there is an attempt to add a new member with the same ID as an existing member in an etcd cluster, the action will fail immediately during phase one without impacting the running cluster. Similar protection is provided to prevent adding new members by mistake. If a new etcd member attempts to join the cluster before the cluster has accepted the configuration change,, it will not be accepted by the cluster. +By splitting the process into two discrete phases users are forced to be explicit regarding cluster membership changes. This actually gives users more flexibility and makes things easier to reason about. For example, if there is an attempt to add a new member with the same ID as an existing member in an etcd cluster, the action will fail immediately during phase one without impacting the running cluster. Similar protection is provided to prevent adding new members by mistake. If a new etcd member attempts to join the cluster before the cluster has accepted the configuration change, it will not be accepted by the cluster. Without the explicit workflow around cluster membership etcd would be vulnerable to unexpected cluster membership changes. For example, if etcd is running under an init system such as systemd, etcd would be restarted after being removed via the membership API, and attempt to rejoin the cluster on startup. This cycle would continue every time a member is removed via the API and systemd is set to restart etcd after failing, which is unexpected. @@ -26,21 +31,21 @@ We think runtime reconfiguration should be a low frequent operation. We made the If a cluster permanently loses a majority of its members, a new cluster will need to be started from an old data directory to recover the previous state. -It is entirely possible to force removing the failed members from the existing cluster to recover. However, we decided not to support this method since it bypasses the normal consensus committing phase, which is unsafe. If the member to remove is not actually dead or you force to remove different members through different members in the same cluster, you will end up with diverged cluster with same clusterID. This is very dangerous and hard to debug/fix afterwards. +It is entirely possible to force removing the failed members from the existing cluster to recover. However, we decided not to support this method since it bypasses the normal consensus committing phase, which is unsafe. If the member to remove is not actually dead or you force to remove different members through different members in the same cluster, you will end up with diverged cluster with same clusterID. This is very dangerous and hard to debug/fix afterwards. If you have a correct deployment, the possibility of permanent majority lose is very low. But it is a severe enough problem that worth special care. We strongly suggest you to read the [disaster recovery documentation][disaster-recovery] and prepare for permanent majority lose before you put etcd into production. ## Do Not Use Public Discovery Service For Runtime Reconfiguration -The public discovery service should only be used for bootstrapping a cluster. To join member into an existing cluster, you should use runtime reconfiguration API. +The public discovery service should only be used for bootstrapping a cluster. To join member into an existing cluster, you should use runtime reconfiguration API. Discovery service is designed for bootstrapping an etcd cluster in the cloud environment, when you do not know the IP addresses of all the members beforehand. After you successfully bootstrap a cluster, the IP addresses of all the members are known. Technically, you should not need the discovery service any more. -It seems that using public discovery service is a convenient way to do runtime reconfiguration, after all discovery service already has all the cluster configuration information. However relying on public discovery service brings troubles: +It seems that using public discovery service is a convenient way to do runtime reconfiguration, after all discovery service already has all the cluster configuration information. However relying on public discovery service brings troubles: 1. it introduces external dependencies for the entire life-cycle of your cluster, not just bootstrap time. If there is a network issue between your cluster and public discovery service, your cluster will suffer from it. - -2. public discovery service must reflect correct runtime configuration of your cluster during it life-cycle. It has to provide security mechanism to avoid bad actions, and it is hard. + +2. public discovery service must reflect correct runtime configuration of your cluster during it life-cycle. It has to provide security mechanism to avoid bad actions, and it is hard. 3. public discovery service has to keep tens of thousands of cluster configurations. Our public discovery service backend is not ready for that workload. diff --git a/github.com/coreos/etcd/Documentation/v2/security.md b/github.com/coreos/etcd/Documentation/v2/security.md index 5800c06530..2fd196fd06 100644 --- a/github.com/coreos/etcd/Documentation/v2/security.md +++ b/github.com/coreos/etcd/Documentation/v2/security.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # Security Model etcd supports SSL/TLS as well as authentication through client certificates, both for clients to server as well as peer (server to server / cluster) communication. @@ -16,7 +21,7 @@ etcd takes several certificate related configuration options, either through com `--key-file=`: Key for the certificate. Must be unencrypted. -`--client-cert-auth`: When this is set etcd will check all incoming HTTPS requests for a client certificate signed by the trusted CA, requests that don't supply a valid client certificate will fail. +`--client-cert-auth`: When this is set etcd will check all incoming HTTPS requests for a client certificate signed by the trusted CA, requests that don't supply a valid client certificate will fail. If [authentication][auth] is enabled, the certificate provides credentials for the user name given by the Common Name field. `--trusted-ca-file=`: Trusted certificate authority. @@ -191,3 +196,4 @@ If you need your certificate to be signed for your member's FQDN in its Subject [tls-setup]: ../../hack/tls-setup [tls-guide]: https://github.com/coreos/docs/blob/master/os/generate-self-signed-certificates.md [alt-name]: http://wiki.cacert.org/FAQ/subjectAltName +[auth]: authentication.md diff --git a/github.com/coreos/etcd/Documentation/v2/tuning.md b/github.com/coreos/etcd/Documentation/v2/tuning.md index 8513b5206a..290e887cda 100644 --- a/github.com/coreos/etcd/Documentation/v2/tuning.md +++ b/github.com/coreos/etcd/Documentation/v2/tuning.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # Tuning The default settings in etcd should work well for installations on a local network where the average network latency is low. diff --git a/github.com/coreos/etcd/Documentation/v2/upgrade_2_1.md b/github.com/coreos/etcd/Documentation/v2/upgrade_2_1.md index 8c83db953d..07ce357766 100644 --- a/github.com/coreos/etcd/Documentation/v2/upgrade_2_1.md +++ b/github.com/coreos/etcd/Documentation/v2/upgrade_2_1.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # Upgrade etcd to 2.1 In the general case, upgrading from etcd 2.0 to 2.1 can be a zero-downtime, rolling upgrade: @@ -12,11 +17,11 @@ Before [starting an upgrade](#upgrade-procedure), read through the rest of this To upgrade an existing etcd deployment to 2.1, you must be running 2.0. If you’re running a version of etcd before 2.0, you must upgrade to [2.0][v2.0] before upgrading to 2.1. -Also, to ensure a smooth rolling upgrade, your running cluster must be healthy. You can check the health of the cluster by using `etcdctl cluster-health` command. +Also, to ensure a smooth rolling upgrade, your running cluster must be healthy. You can check the health of the cluster by using `etcdctl cluster-health` command. -### Preparedness +### Preparedness -Before upgrading etcd, always test the services relying on etcd in a staging environment before deploying the upgrade to the production environment. +Before upgrading etcd, always test the services relying on etcd in a staging environment before deploying the upgrade to the production environment. You might also want to [backup your data directory][backup-datastore] for a potential [downgrade](#downgrade). @@ -38,7 +43,7 @@ If you have even more data, this might take more time. If you have a data size l ### Downgrade -If all members have been upgraded to v2.1, the cluster will be upgraded to v2.1, and downgrade is **not possible**. If any member is still v2.0, the cluster will remain in v2.0, and you can go back to use v2.0 binary. +If all members have been upgraded to v2.1, the cluster will be upgraded to v2.1, and downgrade is **not possible**. If any member is still v2.0, the cluster will remain in v2.0, and you can go back to use v2.0 binary. Please [backup your data directory][backup-datastore] of all etcd members if you want to downgrade the cluster, even if it is upgraded. @@ -96,7 +101,7 @@ member 924e2e83e93f2560 is healthy member a8266ecf031671f3 is healthy ``` -#### 4. Repeat step 2 to step 3 for all other members +#### 4. Repeat step 2 to step 3 for all other members #### 5. Finish diff --git a/github.com/coreos/etcd/Documentation/v2/upgrade_2_2.md b/github.com/coreos/etcd/Documentation/v2/upgrade_2_2.md index 2f3edb0059..76fcf811ea 100644 --- a/github.com/coreos/etcd/Documentation/v2/upgrade_2_2.md +++ b/github.com/coreos/etcd/Documentation/v2/upgrade_2_2.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + # Upgrade etcd from 2.1 to 2.2 In the general case, upgrading from etcd 2.1 to 2.2 can be a zero-downtime, rolling upgrade: @@ -13,11 +18,11 @@ Before [starting an upgrade](#upgrade-procedure), read through the rest of this To upgrade an existing etcd deployment to 2.2, you must be running 2.1. If you’re running a version of etcd before 2.1, you must upgrade to [2.1][v2.1] before upgrading to 2.2. -Also, to ensure a smooth rolling upgrade, your running cluster must be healthy. You can check the health of the cluster by using `etcdctl cluster-health` command. +Also, to ensure a smooth rolling upgrade, your running cluster must be healthy. You can check the health of the cluster by using `etcdctl cluster-health` command. -### Preparedness +### Preparedness -Before upgrading etcd, always test the services relying on etcd in a staging environment before deploying the upgrade to the production environment. +Before upgrading etcd, always test the services relying on etcd in a staging environment before deploying the upgrade to the production environment. You might also want to [backup the data directory][backup-datastore] for a potential [downgrade]. @@ -31,11 +36,11 @@ Internally, etcd members negotiate with each other to determine the overall etcd If you have a data size larger than 100MB you should contact us before upgrading, so we can make sure the upgrades work smoothly. -Every etcd 2.2 member will do health checking across the cluster periodically. etcd 2.1 member does not support health checking. During the upgrade, etcd 2.2 member will log warning about the unhealthy state of etcd 2.1 member. You can ignore the warning. +Every etcd 2.2 member will do health checking across the cluster periodically. etcd 2.1 member does not support health checking. During the upgrade, etcd 2.2 member will log warning about the unhealthy state of etcd 2.1 member. You can ignore the warning. ### Downgrade -If all members have been upgraded to v2.2, the cluster will be upgraded to v2.2, and downgrade is **not possible**. If any member is still v2.1, the cluster will remain in v2.1, and you can go back to use v2.1 binary. +If all members have been upgraded to v2.2, the cluster will be upgraded to v2.2, and downgrade is **not possible**. If any member is still v2.1, the cluster will remain in v2.1, and you can go back to use v2.1 binary. Please [backup the data directory][backup-datastore] of all etcd members if you want to downgrade the cluster, even if it is upgraded. @@ -112,7 +117,7 @@ member a8266ecf031671f3 is healthy: got healthy result from http://localhost:123 cluster is healthy ``` -#### 4. Repeat step 2 to step 3 for all other members +#### 4. Repeat step 2 to step 3 for all other members #### 5. Finish diff --git a/github.com/coreos/etcd/Documentation/v2/upgrade_2_3.md b/github.com/coreos/etcd/Documentation/v2/upgrade_2_3.md index 34d948e6ad..95ddbbf416 100644 --- a/github.com/coreos/etcd/Documentation/v2/upgrade_2_3.md +++ b/github.com/coreos/etcd/Documentation/v2/upgrade_2_3.md @@ -1,3 +1,8 @@ +**This is the documentation for etcd2 releases. Read [etcd3 doc][v3-docs] for etcd3 releases.** + +[v3-docs]: ../docs.md#documentation + + ## Upgrade etcd from 2.2 to 2.3 In the general case, upgrading from etcd 2.2 to 2.3 can be a zero-downtime, rolling upgrade: diff --git a/github.com/coreos/etcd/NEWS b/github.com/coreos/etcd/NEWS index e46a4cd442..1f6c71ff20 100644 --- a/github.com/coreos/etcd/NEWS +++ b/github.com/coreos/etcd/NEWS @@ -1,3 +1,93 @@ +etcd v3.2.4 (2017-07-19) +- do not block on active client stream when stopping server +- fix gRPC proxy Snapshot RPC error handling + +etcd v3.2.3 (2017-07-14) +- let clients establish unlimited streams +- add docker tags for minor versions + e.g. docker pull quay.io/coreos/etcd:v3.2 to fetch latest v3.2 versions + +etcd v3.1.10 (2017-07-14) +- use Go 1.8.3 to fix panic on net/http.CloseNotify +- add docker tags for minor versions + e.g. docker pull quay.io/coreos/etcd:v3.1 to fetch latest v3.1 versions + +etcd v3.2.2 (2017-07-07) +- rate-limit lease revoke on expiration +- extend leases on promote to avoid queueing effect on lease expiration +- use user-provided listen address to connect to gRPC gateway + - net.Listener rewrites IPv4 0.0.0.0 to IPv6 [::], breaking IPv6 disabled hosts + - only v3.2.0, v3.2.1 are affected +- accept connection with matched IP SAN but no DNS match + - don't check DNS entries in certs if there's a matching IP +- fix 'tools/benchmark' watch command + +etcd v3.2.1 (2017-06-23) +- fix backend database in-memory index corruption issue on restore (only 3.2.0 is affected) +- fix gRPC gateway Txn marshaling issue +- fix backend database size debugging metrics + +etcd v3.2.0 (2017-06-09) +- improved backend read concurrency +- embedded etcd + - Etcd.Peers field is now []*peerListener +- RPCs + - add Election, Lock service +- native client etcdserver/api/v3client + - client "embedded" in the server +- v3 client + - LeaseTimeToLive returns TTL=-1 resp on lease not found + - clientv3.NewFromConfigFile is moved to clientv3/yaml.NewConfig + - STM prefetching + - add namespace feature + - concurrency package's elections updated to match RPC interfaces + - let client dial endpoints not in the balancer + - add ErrOldCluster with server version checking + - translate WithPrefix() into WithFromKey() for empty key +- v3 etcdctl + - add check perf command + - add --from-key flag to role grant-permission command + - lock command takes an optional command to execute +- etcd flags + - add --enable-v2 flag to configure v2 backend (enabled by default) + - add --auth-token flag +- gRPC proxy + - proxy endpoint discovery + - namespaces + - coalesce lease requests +- gateway + - support DNS SRV priority +- auth + - support Watch API + - JWT tokens +- logging, monitoring + - server warns large snapshot operations + - add 'etcd_debugging_server_lease_expired_total' metrics +- security + - deny incoming peer certs with wrong IP SAN + - resolve TLS DNSNames when SAN checking + - reload TLS certificates on every client connection +- release + - annotate acbuild with supports-systemd-notify + - add nsswitch.conf to Docker container image + - add ppc64le, arm64(experimental) builds + - Go 1.8.3 + - gRPC v1.2.1 + - grpc-gateway to v1.2.0 +- v2 + - allow snapshot over 512MB + +etcd v3.1.9 (2017-06-09) +- allow v2 snapshot over 512MB + +etcd v3.1.8 (2017-05-19) + +etcd v3.1.7 (2017-04-28) + +etcd v3.1.6 (2017-04-19) +- remove auth check in Status API +- fill in Auth API response header + etcd v3.1.5 (2017-03-27) - add '/etc/nsswitch.conf' file to alpine-based Docker image - fix raft memory leak issue diff --git a/github.com/coreos/etcd/README.md b/github.com/coreos/etcd/README.md index 24467d06d7..8ab28492c7 100644 --- a/github.com/coreos/etcd/README.md +++ b/github.com/coreos/etcd/README.md @@ -11,7 +11,7 @@ ![etcd Logo](logos/etcd-horizontal-color.png) -etcd is a distributed, consistent key-value store for shared configuration and service discovery, with a focus on being: +etcd is a distributed reliable key-value store for the most critical data of a distributed system, with a focus on being: * *Simple*: well-defined, user-facing API (gRPC) * *Secure*: automatic TLS with optional client cert authentication @@ -75,7 +75,7 @@ That's it! etcd is now running and serving client requests. For more The [official etcd ports][iana-ports] are 2379 for client requests, and 2380 for peer communication. -[iana-ports]: https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=etcd +[iana-ports]: http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.txt ### Running a local etcd cluster @@ -133,5 +133,3 @@ See [reporting bugs](Documentation/reporting_bugs.md) for details about reportin ### License etcd is under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details. - - diff --git a/github.com/coreos/etcd/ROADMAP.md b/github.com/coreos/etcd/ROADMAP.md index 213d125a20..f7ae890d35 100644 --- a/github.com/coreos/etcd/ROADMAP.md +++ b/github.com/coreos/etcd/ROADMAP.md @@ -6,7 +6,7 @@ This document defines a high level roadmap for etcd development. The dates below should not be considered authoritative, but rather indicative of the projected timeline of the project. The [milestones defined in GitHub](https://github.com/coreos/etcd/milestones) represent the most up-to-date and issue-for-issue plans. -etcd 3.1 is our current stable branch. The roadmap below outlines new features that will be added to etcd, and while subject to change, define what future stable will look like. +etcd 3.2 is our current stable branch. The roadmap below outlines new features that will be added to etcd, and while subject to change, define what future stable will look like. ### etcd 3.2 (2017-May) - Stable scalable proxy diff --git a/github.com/coreos/etcd/auth/jwt.go b/github.com/coreos/etcd/auth/jwt.go index 214ae48c83..12ccc62c69 100644 --- a/github.com/coreos/etcd/auth/jwt.go +++ b/github.com/coreos/etcd/auth/jwt.go @@ -97,7 +97,9 @@ func prepareOpts(opts map[string]string) (jwtSignMethod, jwtPubKeyPath, jwtPrivK return "", "", "", ErrInvalidAuthOpts } } - + if len(jwtSignMethod) == 0 { + return "", "", "", ErrInvalidAuthOpts + } return jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, nil } diff --git a/github.com/coreos/etcd/auth/jwt_test.go b/github.com/coreos/etcd/auth/jwt_test.go new file mode 100644 index 0000000000..35e11dee25 --- /dev/null +++ b/github.com/coreos/etcd/auth/jwt_test.go @@ -0,0 +1,94 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "context" + "testing" +) + +const ( + jwtPubKey = "../integration/fixtures/server.crt" + jwtPrivKey = "../integration/fixtures/server.key.insecure" +) + +func TestJWTInfo(t *testing.T) { + opts := map[string]string{ + "pub-key": jwtPubKey, + "priv-key": jwtPrivKey, + "sign-method": "RS256", + } + jwt, err := newTokenProviderJWT(opts) + if err != nil { + t.Fatal(err) + } + token, aerr := jwt.assign(context.TODO(), "abc", 123) + if aerr != nil { + t.Fatal(err) + } + ai, ok := jwt.info(context.TODO(), token, 123) + if !ok { + t.Fatalf("failed to authenticate with token %s", token) + } + if ai.Revision != 123 { + t.Fatalf("expected revision 123, got %d", ai.Revision) + } + ai, ok = jwt.info(context.TODO(), "aaa", 120) + if ok || ai != nil { + t.Fatalf("expected aaa to fail to authenticate, got %+v", ai) + } +} + +func TestJWTBad(t *testing.T) { + opts := map[string]string{ + "pub-key": jwtPubKey, + "priv-key": jwtPrivKey, + "sign-method": "RS256", + } + // private key instead of public key + opts["pub-key"] = jwtPrivKey + if _, err := newTokenProviderJWT(opts); err == nil { + t.Fatalf("expected failure on missing public key") + } + opts["pub-key"] = jwtPubKey + + // public key instead of private key + opts["priv-key"] = jwtPubKey + if _, err := newTokenProviderJWT(opts); err == nil { + t.Fatalf("expected failure on missing public key") + } + opts["priv-key"] = jwtPrivKey + + // missing signing option + delete(opts, "sign-method") + if _, err := newTokenProviderJWT(opts); err == nil { + t.Fatal("expected error on missing option") + } + opts["sign-method"] = "RS256" + + // bad file for pubkey + opts["pub-key"] = "whatever" + if _, err := newTokenProviderJWT(opts); err == nil { + t.Fatalf("expected failure on missing public key") + } + opts["pub-key"] = jwtPubKey + + // bad file for private key + opts["priv-key"] = "whatever" + if _, err := newTokenProviderJWT(opts); err == nil { + t.Fatalf("expeceted failure on missing private key") + } + opts["priv-key"] = jwtPrivKey +} diff --git a/github.com/coreos/etcd/auth/simple_token.go b/github.com/coreos/etcd/auth/simple_token.go index 94d92a115e..e39678d6cf 100644 --- a/github.com/coreos/etcd/auth/simple_token.go +++ b/github.com/coreos/etcd/auth/simple_token.go @@ -118,6 +118,8 @@ func (t *tokenSimple) genTokenPrefix() (string, error) { func (t *tokenSimple) assignSimpleTokenToUser(username, token string) { t.simpleTokensMu.Lock() + defer t.simpleTokensMu.Unlock() + _, ok := t.simpleTokens[token] if ok { plog.Panicf("token %s is alredy used", token) @@ -125,7 +127,6 @@ func (t *tokenSimple) assignSimpleTokenToUser(username, token string) { t.simpleTokens[token] = username t.simpleTokenKeeper.addSimpleToken(token) - t.simpleTokensMu.Unlock() } func (t *tokenSimple) invalidateUser(username string) { diff --git a/github.com/coreos/etcd/auth/store.go b/github.com/coreos/etcd/auth/store.go index 33a388a796..4efa5bdd1d 100644 --- a/github.com/coreos/etcd/auth/store.go +++ b/github.com/coreos/etcd/auth/store.go @@ -162,6 +162,12 @@ type AuthStore interface { // AuthInfoFromTLS gets AuthInfo from TLS info of gRPC's context AuthInfoFromTLS(ctx context.Context) *AuthInfo + + // WithRoot generates and installs a token that can be used as a root credential + WithRoot(ctx context.Context) context.Context + + // HasRole checks that user has role + HasRole(user, role string) bool } type TokenProvider interface { @@ -282,6 +288,10 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string } func (as *authStore) CheckPassword(username, password string) (uint64, error) { + if !as.isAuthEnabled() { + return 0, ErrAuthNotEnabled + } + tx := as.be.BatchTx() tx.Lock() defer tx.Unlock() @@ -988,13 +998,17 @@ func (as *authStore) AuthInfoFromTLS(ctx context.Context) *AuthInfo { } func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { - md, ok := metadata.FromContext(ctx) + md, ok := metadata.FromIncomingContext(ctx) if !ok { return nil, nil } - ts, tok := md["token"] - if !tok { + //TODO(mitake|hexfusion) review unifying key names + ts, ok := md["token"] + if !ok { + ts, ok = md["authorization"] + } + if !ok { return nil, nil } @@ -1004,6 +1018,7 @@ func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { plog.Warningf("invalid auth token: %s", token) return nil, ErrInvalidAuthToken } + return authInfo, nil } @@ -1053,3 +1068,55 @@ func NewTokenProvider(tokenOpts string, indexWaiter func(uint64) <-chan struct{} return nil, ErrInvalidAuthOpts } } + +func (as *authStore) WithRoot(ctx context.Context) context.Context { + if !as.isAuthEnabled() { + return ctx + } + + var ctxForAssign context.Context + if ts := as.tokenProvider.(*tokenSimple); ts != nil { + ctx1 := context.WithValue(ctx, "index", uint64(0)) + prefix, err := ts.genTokenPrefix() + if err != nil { + plog.Errorf("failed to generate prefix of internally used token") + return ctx + } + ctxForAssign = context.WithValue(ctx1, "simpleToken", prefix) + } else { + ctxForAssign = ctx + } + + token, err := as.tokenProvider.assign(ctxForAssign, "root", as.Revision()) + if err != nil { + // this must not happen + plog.Errorf("failed to assign token for lease revoking: %s", err) + return ctx + } + + mdMap := map[string]string{ + "token": token, + } + tokenMD := metadata.New(mdMap) + return metadata.NewContext(ctx, tokenMD) +} + +func (as *authStore) HasRole(user, role string) bool { + tx := as.be.BatchTx() + tx.Lock() + defer tx.Unlock() + + u := getUser(tx, user) + if u == nil { + plog.Warningf("tried to check user %s has role %s, but user %s doesn't exist", user, role, user) + return false + } + + for _, r := range u.Roles { + if role == r { + return true + } + } + + return false +} diff --git a/github.com/coreos/etcd/auth/store_test.go b/github.com/coreos/etcd/auth/store_test.go index bf0a4fc931..f2a25aac6b 100644 --- a/github.com/coreos/etcd/auth/store_test.go +++ b/github.com/coreos/etcd/auth/store_test.go @@ -453,7 +453,8 @@ func TestAuthInfoFromCtx(t *testing.T) { t.Errorf("expected (nil, nil), got (%v, %v)", ai, err) } - ctx = metadata.NewContext(context.Background(), metadata.New(map[string]string{"tokens": "dummy"})) + // as if it came from RPC + ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"tokens": "dummy"})) ai, err = as.AuthInfoFromCtx(ctx) if err != nil && ai != nil { t.Errorf("expected (nil, nil), got (%v, %v)", ai, err) @@ -465,19 +466,19 @@ func TestAuthInfoFromCtx(t *testing.T) { t.Error(err) } - ctx = metadata.NewContext(context.Background(), metadata.New(map[string]string{"token": "Invalid Token"})) + ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"token": "Invalid Token"})) _, err = as.AuthInfoFromCtx(ctx) if err != ErrInvalidAuthToken { t.Errorf("expected %v, got %v", ErrInvalidAuthToken, err) } - ctx = metadata.NewContext(context.Background(), metadata.New(map[string]string{"token": "Invalid.Token"})) + ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"token": "Invalid.Token"})) _, err = as.AuthInfoFromCtx(ctx) if err != ErrInvalidAuthToken { t.Errorf("expected %v, got %v", ErrInvalidAuthToken, err) } - ctx = metadata.NewContext(context.Background(), metadata.New(map[string]string{"token": resp.Token})) + ctx = metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"token": resp.Token})) ai, err = as.AuthInfoFromCtx(ctx) if err != nil { t.Error(err) @@ -521,7 +522,7 @@ func TestAuthInfoFromCtxRace(t *testing.T) { donec := make(chan struct{}) go func() { defer close(donec) - ctx := metadata.NewContext(context.Background(), metadata.New(map[string]string{"token": "test"})) + ctx := metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{"token": "test"})) as.AuthInfoFromCtx(ctx) }() as.UserAdd(&pb.AuthUserAddRequest{Name: "test"}) diff --git a/github.com/coreos/etcd/bill-of-materials.json b/github.com/coreos/etcd/bill-of-materials.json new file mode 100644 index 0000000000..578361371c --- /dev/null +++ b/github.com/coreos/etcd/bill-of-materials.json @@ -0,0 +1,388 @@ +[ + { + "project": "bitbucket.org/ww/goautoneg", + "licenses": [ + { + "type": "BSD 3-clause \"New\" or \"Revised\" License", + "confidence": 1 + } + ] + }, + { + "project": "github.com/beorn7/perks/quantile", + "licenses": [ + { + "type": "MIT License", + "confidence": 0.9891304347826086 + } + ] + }, + { + "project": "github.com/bgentry/speakeasy", + "licenses": [ + { + "type": "MIT License", + "confidence": 0.9441624365482234 + } + ] + }, + { + "project": "github.com/cockroachdb/cmux", + "licenses": [ + { + "type": "Apache License 2.0", + "confidence": 1 + } + ] + }, + { + "project": "github.com/coreos/bbolt", + "licenses": [ + { + "type": "MIT License", + "confidence": 1 + } + ] + }, + { + "project": "github.com/coreos/etcd", + "licenses": [ + { + "type": "Apache License 2.0", + "confidence": 1 + } + ] + }, + { + "project": "github.com/coreos/go-semver/semver", + "licenses": [ + { + "type": "Apache License 2.0", + "confidence": 1 + } + ] + }, + { + "project": "github.com/coreos/go-systemd", + "licenses": [ + { + "type": "Apache License 2.0", + "confidence": 0.9966703662597114 + } + ] + }, + { + "project": "github.com/coreos/pkg", + "licenses": [ + { + "type": "Apache License 2.0", + "confidence": 1 + } + ] + }, + { + "project": "github.com/cpuguy83/go-md2man/md2man", + "licenses": [ + { + "type": "MIT License", + "confidence": 1 + } + ] + }, + { + "project": "github.com/dgrijalva/jwt-go", + "licenses": [ + { + "type": "MIT License", + "confidence": 0.9891304347826086 + } + ] + }, + { + "project": "github.com/dustin/go-humanize", + "licenses": [ + { + "type": "MIT License", + "confidence": 0.96875 + } + ] + }, + { + "project": "github.com/ghodss/yaml", + "licenses": [ + { + "type": "MIT License and BSD 3-clause \"New\" or \"Revised\" License", + "confidence": 1 + } + ] + }, + { + "project": "github.com/gogo/protobuf/proto", + "licenses": [ + { + "type": "BSD 3-clause \"New\" or \"Revised\" License", + "confidence": 0.9090909090909091 + } + ] + }, + { + "project": "github.com/golang/groupcache/lru", + "licenses": [ + { + "type": "Apache License 2.0", + "confidence": 0.9966703662597114 + } + ] + }, + { + "project": "github.com/golang/protobuf", + "licenses": [ + { + "type": "BSD 3-clause \"New\" or \"Revised\" License", + "confidence": 0.92 + } + ] + }, + { + "project": "github.com/google/btree", + "licenses": [ + { + "type": "Apache License 2.0", + "confidence": 1 + } + ] + }, + { + "project": "github.com/grpc-ecosystem/go-grpc-prometheus", + "licenses": [ + { + "type": "Apache License 2.0", + "confidence": 1 + } + ] + }, + { + "project": "github.com/grpc-ecosystem/grpc-gateway", + "licenses": [ + { + "type": "BSD 3-clause \"New\" or \"Revised\" License", + "confidence": 0.979253112033195 + } + ] + }, + { + "project": "github.com/inconshreveable/mousetrap", + "licenses": [ + { + "type": "MIT License and BSD 3-clause \"New\" or \"Revised\" License", + "confidence": 1 + }, + { + "type": "Apache License 2.0", + "confidence": 1 + } + ] + }, + { + "project": "github.com/jonboulle/clockwork", + "licenses": [ + { + "type": "Apache License 2.0", + "confidence": 1 + } + ] + }, + { + "project": "github.com/mattn/go-runewidth", + "licenses": [ + { + "type": "MIT License", + "confidence": 1 + } + ] + }, + { + "project": "github.com/matttproud/golang_protobuf_extensions/pbutil", + "licenses": [ + { + "type": "Apache License 2.0", + "confidence": 1 + } + ] + }, + { + "project": "github.com/olekukonko/tablewriter", + "licenses": [ + { + "type": "MIT License", + "confidence": 0.9891304347826086 + } + ] + }, + { + "project": "github.com/prometheus/client_golang/prometheus", + "licenses": [ + { + "type": "Apache License 2.0", + "confidence": 1 + } + ] + }, + { + "project": "github.com/prometheus/client_model/go", + "licenses": [ + { + "type": "Apache License 2.0", + "confidence": 1 + } + ] + }, + { + "project": "github.com/prometheus/common", + "licenses": [ + { + "type": "Apache License 2.0", + "confidence": 1 + } + ] + }, + { + "project": "github.com/prometheus/procfs", + "licenses": [ + { + "type": "Apache License 2.0", + "confidence": 1 + } + ] + }, + { + "project": "github.com/russross/blackfriday", + "licenses": [ + { + "type": "BSD 2-clause \"Simplified\" License", + "confidence": 0.9626168224299065 + } + ] + }, + { + "project": "github.com/spf13/cobra", + "licenses": [ + { + "type": "Apache License 2.0", + "confidence": 0.9573241061130334 + } + ] + }, + { + "project": "github.com/spf13/pflag", + "licenses": [ + { + "type": "BSD 3-clause \"New\" or \"Revised\" License", + "confidence": 0.9663865546218487 + } + ] + }, + { + "project": "github.com/ugorji/go/codec", + "licenses": [ + { + "type": "MIT License", + "confidence": 0.9946524064171123 + } + ] + }, + { + "project": "github.com/urfave/cli", + "licenses": [ + { + "type": "MIT License", + "confidence": 1 + } + ] + }, + { + "project": "github.com/xiang90/probing", + "licenses": [ + { + "type": "MIT License", + "confidence": 1 + } + ] + }, + { + "project": "golang.org/x/crypto", + "licenses": [ + { + "type": "BSD 3-clause \"New\" or \"Revised\" License", + "confidence": 0.9663865546218487 + } + ] + }, + { + "project": "golang.org/x/net", + "licenses": [ + { + "type": "BSD 3-clause \"New\" or \"Revised\" License", + "confidence": 0.9663865546218487 + } + ] + }, + { + "project": "golang.org/x/text", + "licenses": [ + { + "type": "BSD 3-clause \"New\" or \"Revised\" License", + "confidence": 0.9663865546218487 + } + ] + }, + { + "project": "golang.org/x/time/rate", + "licenses": [ + { + "type": "BSD 3-clause \"New\" or \"Revised\" License", + "confidence": 0.9663865546218487 + } + ] + }, + { + "project": "google.golang.org/genproto/googleapis/rpc/status", + "licenses": [ + { + "type": "Apache License 2.0", + "confidence": 1 + } + ] + }, + { + "project": "google.golang.org/grpc", + "licenses": [ + { + "type": "BSD 3-clause \"New\" or \"Revised\" License", + "confidence": 0.979253112033195 + } + ] + }, + { + "project": "gopkg.in/cheggaaa/pb.v1", + "licenses": [ + { + "type": "BSD 3-clause \"New\" or \"Revised\" License", + "confidence": 0.9916666666666667 + } + ] + }, + { + "project": "gopkg.in/yaml.v2", + "licenses": [ + { + "type": "The Unlicense", + "confidence": 0.35294117647058826 + }, + { + "type": "MIT License", + "confidence": 0.8975609756097561 + } + ] + } +] diff --git a/github.com/coreos/etcd/bill-of-materials.override.json b/github.com/coreos/etcd/bill-of-materials.override.json new file mode 100644 index 0000000000..34de90e714 --- /dev/null +++ b/github.com/coreos/etcd/bill-of-materials.override.json @@ -0,0 +1,26 @@ +[ + { + "project": "bitbucket.org/ww/goautoneg", + "licenses": [ + { + "type": "BSD 3-clause \"New\" or \"Revised\" License" + } + ] + }, + { + "project": "github.com/ghodss/yaml", + "licenses": [ + { + "type": "MIT License and BSD 3-clause \"New\" or \"Revised\" License" + } + ] + }, + { + "project": "github.com/inconshreveable/mousetrap", + "licenses": [ + { + "type": "Apache License 2.0" + } + ] + } +] diff --git a/github.com/coreos/etcd/build b/github.com/coreos/etcd/build index 4f5b805748..36be42eb20 100755 --- a/github.com/coreos/etcd/build +++ b/github.com/coreos/etcd/build @@ -3,9 +3,7 @@ # set some environment variables ORG_PATH="github.com/coreos" REPO_PATH="${ORG_PATH}/etcd" -export GO15VENDOREXPERIMENT="1" -eval $(go env) GIT_SHA=`git rev-parse --short HEAD || echo "GitNotFound"` if [ ! -z "$FAILPOINTS" ]; then GIT_SHA="$GIT_SHA"-FAILPOINTS @@ -17,11 +15,7 @@ GO_LDFLAGS="$GO_LDFLAGS -X ${REPO_PATH}/cmd/vendor/${REPO_PATH}/version.GitSHA=$ # enable/disable failpoints toggle_failpoints() { FAILPKGS="etcdserver/ mvcc/backend/" - - mode="disable" - if [ ! -z "$FAILPOINTS" ]; then mode="enable"; fi - if [ ! -z "$1" ]; then mode="$1"; fi - + mode="$1" if which gofail >/dev/null 2>&1; then gofail "$mode" $FAILPKGS elif [ "$mode" != "disable" ]; then @@ -30,19 +24,26 @@ toggle_failpoints() { fi } +toggle_failpoints_default() { + mode="disable" + if [ ! -z "$FAILPOINTS" ]; then mode="enable"; fi + toggle_failpoints "$mode" +} + etcd_build() { out="bin" if [ -n "${BINDIR}" ]; then out="${BINDIR}"; fi - toggle_failpoints + toggle_failpoints_default # Static compilation is useful when etcd is run in a container CGO_ENABLED=0 go build $GO_BUILD_FLAGS -installsuffix cgo -ldflags "$GO_LDFLAGS" -o ${out}/etcd ${REPO_PATH}/cmd/etcd || return CGO_ENABLED=0 go build $GO_BUILD_FLAGS -installsuffix cgo -ldflags "$GO_LDFLAGS" -o ${out}/etcdctl ${REPO_PATH}/cmd/etcdctl || return } etcd_setup_gopath() { - CDIR=$(cd `dirname "$0"` && pwd) + d=$(dirname "$0") + CDIR=$(cd "$d" && pwd) cd "$CDIR" - etcdGOPATH=${CDIR}/gopath + etcdGOPATH="${CDIR}/gopath" # preserve old gopath to support building with unvendored tooling deps (e.g., gofail) if [ -n "$GOPATH" ]; then GOPATH=":$GOPATH" @@ -53,7 +54,7 @@ etcd_setup_gopath() { ln -s ${CDIR}/cmd/vendor ${etcdGOPATH}/src } -toggle_failpoints +toggle_failpoints_default # only build when called directly, not sourced if echo "$0" | grep "build$" >/dev/null; then diff --git a/github.com/coreos/etcd/client/discover.go b/github.com/coreos/etcd/client/discover.go index bfd7aec93f..442e35fe54 100644 --- a/github.com/coreos/etcd/client/discover.go +++ b/github.com/coreos/etcd/client/discover.go @@ -14,8 +14,27 @@ package client +import ( + "github.com/coreos/etcd/pkg/srv" +) + // Discoverer is an interface that wraps the Discover method. type Discoverer interface { // Discover looks up the etcd servers for the domain. Discover(domain string) ([]string, error) } + +type srvDiscover struct{} + +// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. +func NewSRVDiscover() Discoverer { + return &srvDiscover{} +} + +func (d *srvDiscover) Discover(domain string) ([]string, error) { + srvs, err := srv.GetClient("etcd-client", domain) + if err != nil { + return nil, err + } + return srvs.Endpoints, nil +} diff --git a/github.com/coreos/etcd/client/doc.go b/github.com/coreos/etcd/client/doc.go index 32fdfb52c6..dd336d1887 100644 --- a/github.com/coreos/etcd/client/doc.go +++ b/github.com/coreos/etcd/client/doc.go @@ -59,7 +59,7 @@ Use a custom context to set timeouts on your operations: ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - // set a new key, ignoring it's previous state + // set a new key, ignoring its previous state _, err := kAPI.Set(ctx, "/ping", "pong", nil) if err != nil { if err == context.DeadlineExceeded { diff --git a/github.com/coreos/etcd/client/example_keys_test.go b/github.com/coreos/etcd/client/example_keys_test.go new file mode 100644 index 0000000000..105a74791b --- /dev/null +++ b/github.com/coreos/etcd/client/example_keys_test.go @@ -0,0 +1,93 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client_test + +import ( + "fmt" + "log" + "sort" + + "github.com/coreos/etcd/client" + "golang.org/x/net/context" +) + +func ExampleKeysAPI_directory() { + c, err := client.New(client.Config{ + Endpoints: exampleEndpoints, + Transport: exampleTransport, + }) + if err != nil { + log.Fatal(err) + } + kapi := client.NewKeysAPI(c) + + // Setting '/myNodes' to create a directory that will hold some keys. + o := client.SetOptions{Dir: true} + resp, err := kapi.Set(context.Background(), "/myNodes", "", &o) + if err != nil { + log.Fatal(err) + } + + // Add keys to /myNodes directory. + resp, err = kapi.Set(context.Background(), "/myNodes/key1", "value1", nil) + if err != nil { + log.Fatal(err) + } + resp, err = kapi.Set(context.Background(), "/myNodes/key2", "value2", nil) + if err != nil { + log.Fatal(err) + } + + // fetch directory + resp, err = kapi.Get(context.Background(), "/myNodes", nil) + if err != nil { + log.Fatal(err) + } + // print directory keys + sort.Sort(resp.Node.Nodes) + for _, n := range resp.Node.Nodes { + fmt.Printf("Key: %q, Value: %q\n", n.Key, n.Value) + } + + // Output: + // Key: "/myNodes/key1", Value: "value1" + // Key: "/myNodes/key2", Value: "value2" +} + +func ExampleKeysAPI_setget() { + c, err := client.New(client.Config{ + Endpoints: exampleEndpoints, + Transport: exampleTransport, + }) + if err != nil { + log.Fatal(err) + } + kapi := client.NewKeysAPI(c) + + // Set key "/foo" to value "bar". + resp, err := kapi.Set(context.Background(), "/foo", "bar", nil) + if err != nil { + log.Fatal(err) + } + // Get key "/foo" + resp, err = kapi.Get(context.Background(), "/foo", nil) + if err != nil { + log.Fatal(err) + } + + fmt.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value) + + // Output: "/foo" key has "bar" value +} diff --git a/github.com/coreos/etcd/client/main_test.go b/github.com/coreos/etcd/client/main_test.go new file mode 100644 index 0000000000..747740601b --- /dev/null +++ b/github.com/coreos/etcd/client/main_test.go @@ -0,0 +1,77 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client_test + +import ( + "fmt" + "net/http" + "os" + "regexp" + "strings" + "testing" + "time" + + "github.com/coreos/etcd/integration" + "github.com/coreos/etcd/pkg/testutil" + "github.com/coreos/etcd/pkg/transport" +) + +var exampleEndpoints []string +var exampleTransport *http.Transport + +// TestMain sets up an etcd cluster if running the examples. +func TestMain(m *testing.M) { + useCluster, hasRunArg := false, false // default to running only Test* + for _, arg := range os.Args { + if strings.HasPrefix(arg, "-test.run=") { + exp := strings.Split(arg, "=")[1] + match, err := regexp.MatchString(exp, "Example") + useCluster = (err == nil && match) || strings.Contains(exp, "Example") + hasRunArg = true + break + } + } + if !hasRunArg { + // force only running Test* if no args given to avoid leak false + // positives from having a long-running cluster for the examples. + os.Args = append(os.Args, "-test.run=Test") + } + + v := 0 + if useCluster { + tr, trerr := transport.NewTransport(transport.TLSInfo{}, time.Second) + if trerr != nil { + fmt.Fprintf(os.Stderr, "%v", trerr) + os.Exit(1) + } + cfg := integration.ClusterConfig{Size: 1} + clus := integration.NewClusterV3(nil, &cfg) + exampleEndpoints = []string{clus.Members[0].URL()} + exampleTransport = tr + v = m.Run() + clus.Terminate(nil) + if err := testutil.CheckAfterTest(time.Second); err != nil { + fmt.Fprintf(os.Stderr, "%v", err) + os.Exit(1) + } + } else { + v = m.Run() + } + + if v == 0 && testutil.CheckLeakedGoroutine() { + os.Exit(1) + } + os.Exit(v) +} diff --git a/github.com/coreos/etcd/client/members.go b/github.com/coreos/etcd/client/members.go index 23adf07ad9..2054895606 100644 --- a/github.com/coreos/etcd/client/members.go +++ b/github.com/coreos/etcd/client/members.go @@ -44,7 +44,7 @@ type Member struct { PeerURLs []string `json:"peerURLs"` // ClientURLs represents the HTTP(S) endpoints on which this Member - // serves it's client-facing APIs. + // serves its client-facing APIs. ClientURLs []string `json:"clientURLs"` } diff --git a/github.com/coreos/etcd/client/srv.go b/github.com/coreos/etcd/client/srv.go deleted file mode 100644 index fdfa343592..0000000000 --- a/github.com/coreos/etcd/client/srv.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "fmt" - "net" - "net/url" -) - -var ( - // indirection for testing - lookupSRV = net.LookupSRV -) - -type srvDiscover struct{} - -// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. -func NewSRVDiscover() Discoverer { - return &srvDiscover{} -} - -// Discover looks up the etcd servers for the domain. -func (d *srvDiscover) Discover(domain string) ([]string, error) { - var urls []*url.URL - - updateURLs := func(service, scheme string) error { - _, addrs, err := lookupSRV(service, "tcp", domain) - if err != nil { - return err - } - for _, srv := range addrs { - urls = append(urls, &url.URL{ - Scheme: scheme, - Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), - }) - } - return nil - } - - errHTTPS := updateURLs("etcd-client-ssl", "https") - errHTTP := updateURLs("etcd-client", "http") - - if errHTTPS != nil && errHTTP != nil { - return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) - } - - endpoints := make([]string, len(urls)) - for i := range urls { - endpoints[i] = urls[i].String() - } - return endpoints, nil -} diff --git a/github.com/coreos/etcd/client/srv_test.go b/github.com/coreos/etcd/client/srv_test.go deleted file mode 100644 index 64cf603232..0000000000 --- a/github.com/coreos/etcd/client/srv_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "errors" - "net" - "reflect" - "testing" -) - -func TestSRVDiscover(t *testing.T) { - defer func() { lookupSRV = net.LookupSRV }() - - tests := []struct { - withSSL []*net.SRV - withoutSSL []*net.SRV - expected []string - }{ - { - []*net.SRV{}, - []*net.SRV{}, - []string{}, - }, - { - []*net.SRV{ - {Target: "10.0.0.1", Port: 2480}, - {Target: "10.0.0.2", Port: 2480}, - {Target: "10.0.0.3", Port: 2480}, - }, - []*net.SRV{}, - []string{"https://10.0.0.1:2480", "https://10.0.0.2:2480", "https://10.0.0.3:2480"}, - }, - { - []*net.SRV{ - {Target: "10.0.0.1", Port: 2480}, - {Target: "10.0.0.2", Port: 2480}, - {Target: "10.0.0.3", Port: 2480}, - }, - []*net.SRV{ - {Target: "10.0.0.1", Port: 7001}, - }, - []string{"https://10.0.0.1:2480", "https://10.0.0.2:2480", "https://10.0.0.3:2480", "http://10.0.0.1:7001"}, - }, - { - []*net.SRV{ - {Target: "10.0.0.1", Port: 2480}, - {Target: "10.0.0.2", Port: 2480}, - {Target: "10.0.0.3", Port: 2480}, - }, - []*net.SRV{ - {Target: "10.0.0.1", Port: 7001}, - }, - []string{"https://10.0.0.1:2480", "https://10.0.0.2:2480", "https://10.0.0.3:2480", "http://10.0.0.1:7001"}, - }, - { - []*net.SRV{ - {Target: "a.example.com", Port: 2480}, - {Target: "b.example.com", Port: 2480}, - {Target: "c.example.com", Port: 2480}, - }, - []*net.SRV{}, - []string{"https://a.example.com:2480", "https://b.example.com:2480", "https://c.example.com:2480"}, - }, - } - - for i, tt := range tests { - lookupSRV = func(service string, proto string, domain string) (string, []*net.SRV, error) { - if service == "etcd-client-ssl" { - return "", tt.withSSL, nil - } - if service == "etcd-client" { - return "", tt.withoutSSL, nil - } - return "", nil, errors.New("Unknown service in mock") - } - - d := NewSRVDiscover() - - endpoints, err := d.Discover("example.com") - if err != nil { - t.Fatalf("%d: err: %#v", i, err) - } - - if !reflect.DeepEqual(endpoints, tt.expected) { - t.Errorf("#%d: endpoints = %v, want %v", i, endpoints, tt.expected) - } - - } -} diff --git a/github.com/coreos/etcd/clientv3/balancer.go b/github.com/coreos/etcd/clientv3/balancer.go index b7767221ac..6ae047e984 100644 --- a/github.com/coreos/etcd/clientv3/balancer.go +++ b/github.com/coreos/etcd/clientv3/balancer.go @@ -77,7 +77,6 @@ func newSimpleBalancer(eps []string) *simpleBalancer { for i := range eps { addrs[i].Addr = getHost(eps[i]) } - notifyCh <- addrs sb := &simpleBalancer{ addrs: addrs, notifyCh: notifyCh, @@ -89,6 +88,7 @@ func newSimpleBalancer(eps []string) *simpleBalancer { updateAddrsC: make(chan struct{}, 1), host2ep: getHost2ep(eps), } + close(sb.downc) go sb.updateNotifyLoop() return sb } @@ -170,38 +170,51 @@ func (b *simpleBalancer) updateNotifyLoop() { for { b.mu.RLock() - upc := b.upc + upc, downc, addr := b.upc, b.downc, b.pinAddr b.mu.RUnlock() - var downc chan struct{} + // downc or upc should be closed + select { + case <-downc: + downc = nil + default: + } select { case <-upc: - var addr string - b.mu.RLock() - addr = b.pinAddr - // Up() sets pinAddr and downc as a pair under b.mu - downc = b.downc - b.mu.RUnlock() - if addr == "" { - break + upc = nil + default: + } + switch { + case downc == nil && upc == nil: + // stale + select { + case <-b.stopc: + return + default: } - // close opened connections that are not pinAddr - // this ensures only one connection is open per client + case downc == nil: + b.notifyAddrs() select { + case <-upc: + case <-b.updateAddrsC: + b.notifyAddrs() + case <-b.stopc: + return + } + case upc == nil: + select { + // close connections that are not the pinned address case b.notifyCh <- []grpc.Address{{Addr: addr}}: + case <-downc: + case <-b.stopc: + return + } + select { + case <-downc: + case <-b.updateAddrsC: case <-b.stopc: return } - case <-b.updateAddrsC: - b.notifyAddrs() - continue - } - select { - case <-downc: - b.notifyAddrs() - case <-b.updateAddrsC: b.notifyAddrs() - case <-b.stopc: - return } } } @@ -231,23 +244,20 @@ func (b *simpleBalancer) Up(addr grpc.Address) func(error) { if !hasAddr(b.addrs, addr.Addr) { return func(err error) {} } - - if b.pinAddr == "" { - // notify waiting Get()s and pin first connected address - close(b.upc) - b.downc = make(chan struct{}) - b.pinAddr = addr.Addr - // notify client that a connection is up - b.readyOnce.Do(func() { close(b.readyc) }) + if b.pinAddr != "" { + return func(err error) {} } - + // notify waiting Get()s and pin first connected address + close(b.upc) + b.downc = make(chan struct{}) + b.pinAddr = addr.Addr + // notify client that a connection is up + b.readyOnce.Do(func() { close(b.readyc) }) return func(err error) { b.mu.Lock() - if b.pinAddr == addr.Addr { - b.upc = make(chan struct{}) - close(b.downc) - b.pinAddr = "" - } + b.upc = make(chan struct{}) + close(b.downc) + b.pinAddr = "" b.mu.Unlock() } } @@ -280,6 +290,8 @@ func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) b.mu.RUnlock() select { case <-ch: + case <-b.donec: + return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing case <-ctx.Done(): return grpc.Address{Addr: ""}, nil, ctx.Err() } diff --git a/github.com/coreos/etcd/clientv3/client.go b/github.com/coreos/etcd/clientv3/client.go index 1b6d9b5ba6..9dbc34b1eb 100644 --- a/github.com/coreos/etcd/clientv3/client.go +++ b/github.com/coreos/etcd/clientv3/client.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" ) @@ -182,7 +183,7 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) { host = url.Host switch url.Scheme { case "http", "https": - case "unix": + case "unix", "unixs": proto = "unix" host = url.Host + url.Path default: @@ -197,7 +198,7 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden case "unix": case "http": creds = nil - case "https": + case "https", "unixs": if creds != nil { break } @@ -215,6 +216,16 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts if c.cfg.DialTimeout > 0 { opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)} } + if c.cfg.DialKeepAliveTime > 0 { + params := keepalive.ClientParameters{ + Time: c.cfg.DialKeepAliveTime, + } + // Only relevant when KeepAliveTime is non-zero + if c.cfg.DialKeepAliveTimeout > 0 { + params.Timeout = c.cfg.DialKeepAliveTimeout + } + opts = append(opts, grpc.WithKeepaliveParams(params)) + } opts = append(opts, dopts...) f := func(host string, t time.Duration) (net.Conn, error) { @@ -306,19 +317,23 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo defer cancel() ctx = cctx } - if err := c.getToken(ctx); err != nil { - if err == ctx.Err() && ctx.Err() != c.ctx.Err() { - err = grpc.ErrClientConnTimeout + + err := c.getToken(ctx) + if err != nil { + if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled { + if err == ctx.Err() && ctx.Err() != c.ctx.Err() { + err = grpc.ErrClientConnTimeout + } + return nil, err } - return nil, err + } else { + opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred)) } - - opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred)) } opts = append(opts, c.cfg.DialOptions...) - conn, err := grpc.Dial(host, opts...) + conn, err := grpc.DialContext(c.ctx, host, opts...) if err != nil { return nil, err } @@ -329,7 +344,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo // when the cluster has a leader. func WithRequireLeader(ctx context.Context) context.Context { md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) - return metadata.NewContext(ctx, md) + return metadata.NewOutgoingContext(ctx, md) } func newClient(cfg *Config) (*Client, error) { @@ -363,7 +378,9 @@ func newClient(cfg *Config) (*Client, error) { } client.balancer = newSimpleBalancer(cfg.Endpoints) - conn, err := client.dial("", grpc.WithBalancer(client.balancer)) + // use Endpoints[0] so that for https:// without any tls config given, then + // grpc will assume the ServerName is in the endpoint. + conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer)) if err != nil { client.cancel() client.balancer.Close() @@ -499,3 +516,11 @@ func toErr(ctx context.Context, err error) error { } return err } + +func canceledByCaller(stopCtx context.Context, err error) bool { + if stopCtx.Err() == nil || err == nil { + return false + } + + return err == context.Canceled || err == context.DeadlineExceeded +} diff --git a/github.com/coreos/etcd/clientv3/clientv3util/util.go b/github.com/coreos/etcd/clientv3/clientv3util/util.go index 3afbffe546..3b296343eb 100644 --- a/github.com/coreos/etcd/clientv3/clientv3util/util.go +++ b/github.com/coreos/etcd/clientv3/clientv3util/util.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package clientv3util contains utility functions derived from clientv3. package clientv3util import ( diff --git a/github.com/coreos/etcd/clientv3/compare.go b/github.com/coreos/etcd/clientv3/compare.go index c55228cc0b..e01489c06f 100644 --- a/github.com/coreos/etcd/clientv3/compare.go +++ b/github.com/coreos/etcd/clientv3/compare.go @@ -99,6 +99,18 @@ func (cmp *Cmp) ValueBytes() []byte { // WithValueBytes sets the byte slice for the comparison's value. func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v } +// WithRange sets the comparison to scan the range [key, end). +func (cmp Cmp) WithRange(end string) Cmp { + cmp.RangeEnd = []byte(end) + return cmp +} + +// WithPrefix sets the comparison to scan all keys prefixed by the key. +func (cmp Cmp) WithPrefix() Cmp { + cmp.RangeEnd = getPrefix(cmp.Key) + return cmp +} + func mustInt64(val interface{}) int64 { if v, ok := val.(int64); ok { return v diff --git a/github.com/coreos/etcd/clientv3/concurrency/election.go b/github.com/coreos/etcd/clientv3/concurrency/election.go index 90c7c667c5..1d75dde3d8 100644 --- a/github.com/coreos/etcd/clientv3/concurrency/election.go +++ b/github.com/coreos/etcd/clientv3/concurrency/election.go @@ -148,9 +148,13 @@ func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) { return resp, nil } -// Observe returns a channel that observes all leader proposal values as -// GetResponse values on the current leader key. The channel closes when -// the context is cancelled or the underlying watcher is otherwise disrupted. +// Observe returns a channel that reliably observes ordered leader proposals +// as GetResponse values on every current elected leader key. It will not +// necessarily fetch all historical leader updates, but will always post the +// most recent leader value. +// +// The channel closes when the context is canceled or the underlying watcher +// is otherwise disrupted. func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse { retc := make(chan v3.GetResponse) go e.observe(ctx, retc) @@ -161,15 +165,14 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) { client := e.session.Client() defer close(ch) - lastRev := int64(0) for { - opts := append(v3.WithFirstCreate(), v3.WithRev(lastRev)) - resp, err := client.Get(ctx, e.keyPrefix, opts...) + resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...) if err != nil { return } var kv *mvccpb.KeyValue + var hdr *pb.ResponseHeader if len(resp.Kvs) == 0 { cctx, cancel := context.WithCancel(ctx) @@ -185,18 +188,27 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) { // only accept PUTs; a DELETE will make observe() spin for _, ev := range wr.Events { if ev.Type == mvccpb.PUT { - kv = ev.Kv + hdr, kv = &wr.Header, ev.Kv + // may have multiple revs; hdr.rev = the last rev + // set to kv's rev in case batch has multiple PUTs + hdr.Revision = kv.ModRevision break } } } cancel() } else { - kv = resp.Kvs[0] + hdr, kv = resp.Header, resp.Kvs[0] + } + + select { + case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}: + case <-ctx.Done(): + return } cctx, cancel := context.WithCancel(ctx) - wch := client.Watch(cctx, string(kv.Key), v3.WithRev(kv.ModRevision)) + wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1)) keyDeleted := false for !keyDeleted { wr, ok := <-wch @@ -205,7 +217,6 @@ func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) { } for _, ev := range wr.Events { if ev.Type == mvccpb.DELETE { - lastRev = ev.Kv.ModRevision keyDeleted = true break } diff --git a/github.com/coreos/etcd/clientv3/concurrency/mutex.go b/github.com/coreos/etcd/clientv3/concurrency/mutex.go index cee15db7b3..83df27c799 100644 --- a/github.com/coreos/etcd/clientv3/concurrency/mutex.go +++ b/github.com/coreos/etcd/clientv3/concurrency/mutex.go @@ -49,7 +49,9 @@ func (m *Mutex) Lock(ctx context.Context) error { put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease())) // reuse key in case this session already holds the lock get := v3.OpGet(m.myKey) - resp, err := client.Txn(ctx).If(cmp).Then(put).Else(get).Commit() + // fetch current holder to complete uncontended path with only one RPC + getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...) + resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit() if err != nil { return err } @@ -57,6 +59,12 @@ func (m *Mutex) Lock(ctx context.Context) error { if !resp.Succeeded { m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision } + // if no key on prefix / the minimum rev is key, already hold the lock + ownerKey := resp.Responses[1].GetResponseRange().Kvs + if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev { + m.hdr = resp.Header + return nil + } // wait for deletion revisions prior to myKey hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1) diff --git a/github.com/coreos/etcd/clientv3/concurrency/stm.go b/github.com/coreos/etcd/clientv3/concurrency/stm.go index 7ab019e995..a9e69e8021 100644 --- a/github.com/coreos/etcd/clientv3/concurrency/stm.go +++ b/github.com/coreos/etcd/clientv3/concurrency/stm.go @@ -46,7 +46,7 @@ const ( // SerializableSnapshot provides serializable isolation and also checks // for write conflicts. SerializableSnapshot Isolation = iota - // Serializable reads within the same transactiona attempt return data + // Serializable reads within the same transaction attempt return data // from the at the revision of the first read. Serializable // RepeatableReads reads within the same transaction attempt always @@ -85,7 +85,7 @@ func WithPrefetch(keys ...string) stmOption { return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) } } -// NewSTM initiates a new STM instance, using snapshot isolation by default. +// NewSTM initiates a new STM instance, using serializable snapshot isolation by default. func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) { opts := &stmOptions{ctx: c.Ctx()} for _, f := range so { @@ -369,3 +369,18 @@ func respToValue(resp *v3.GetResponse) string { } return string(resp.Kvs[0].Value) } + +// NewSTMRepeatable is deprecated. +func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { + return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads)) +} + +// NewSTMSerializable is deprecated. +func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { + return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable)) +} + +// NewSTMReadCommitted is deprecated. +func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { + return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted)) +} diff --git a/github.com/coreos/etcd/clientv3/config.go b/github.com/coreos/etcd/clientv3/config.go index dda72a748e..d9545e430c 100644 --- a/github.com/coreos/etcd/clientv3/config.go +++ b/github.com/coreos/etcd/clientv3/config.go @@ -33,6 +33,14 @@ type Config struct { // DialTimeout is the timeout for failing to establish a connection. DialTimeout time.Duration `json:"dial-timeout"` + // DialKeepAliveTime is the time in seconds after which client pings the server to see if + // transport is alive. + DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"` + + // DialKeepAliveTimeout is the time in seconds that the client waits for a response for the + // keep-alive probe. If the response is not received in this time, the connection is closed. + DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"` + // TLS holds the client secure credentials, if any. TLS *tls.Config diff --git a/github.com/coreos/etcd/clientv3/example_metrics_test.go b/github.com/coreos/etcd/clientv3/example_metrics_test.go index 21c5f07a6d..6e5fd5e095 100644 --- a/github.com/coreos/etcd/clientv3/example_metrics_test.go +++ b/github.com/coreos/etcd/clientv3/example_metrics_test.go @@ -30,7 +30,7 @@ import ( "google.golang.org/grpc" ) -func ExampleMetrics_range() { +func ExampleClient_metrics() { cli, err := clientv3.New(clientv3.Config{ Endpoints: endpoints, DialOptions: []grpc.DialOption{ diff --git a/github.com/coreos/etcd/clientv3/integration/dial_test.go b/github.com/coreos/etcd/clientv3/integration/dial_test.go index a2ca4ab9f8..4d8075de25 100644 --- a/github.com/coreos/etcd/clientv3/integration/dial_test.go +++ b/github.com/coreos/etcd/clientv3/integration/dial_test.go @@ -23,9 +23,65 @@ import ( pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/integration" "github.com/coreos/etcd/pkg/testutil" + "github.com/coreos/etcd/pkg/transport" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +var ( + testTLSInfo = transport.TLSInfo{ + KeyFile: "../../integration/fixtures/server.key.insecure", + CertFile: "../../integration/fixtures/server.crt", + TrustedCAFile: "../../integration/fixtures/ca.crt", + ClientCertAuth: true, + } + + testTLSInfoExpired = transport.TLSInfo{ + KeyFile: "../../integration/fixtures-expired/server-key.pem", + CertFile: "../../integration/fixtures-expired/server.pem", + TrustedCAFile: "../../integration/fixtures-expired/etcd-root-ca.pem", + ClientCertAuth: true, + } ) +// TestDialTLSExpired tests client with expired certs fails to dial. +func TestDialTLSExpired(t *testing.T) { + defer testutil.AfterTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, PeerTLS: &testTLSInfo, ClientTLS: &testTLSInfo}) + defer clus.Terminate(t) + + tls, err := testTLSInfoExpired.ClientConfig() + if err != nil { + t.Fatal(err) + } + // expect remote errors 'tls: bad certificate' + _, err = clientv3.New(clientv3.Config{ + Endpoints: []string{clus.Members[0].GRPCAddr()}, + DialTimeout: 3 * time.Second, + TLS: tls, + }) + if err != grpc.ErrClientConnTimeout { + t.Fatalf("expected %v, got %v", grpc.ErrClientConnTimeout, err) + } +} + +// TestDialTLSNoConfig ensures the client fails to dial / times out +// when TLS endpoints (https, unixs) are given but no tls config. +func TestDialTLSNoConfig(t *testing.T) { + defer testutil.AfterTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, ClientTLS: &testTLSInfo}) + defer clus.Terminate(t) + // expect 'signed by unknown authority' + _, err := clientv3.New(clientv3.Config{ + Endpoints: []string{clus.Members[0].GRPCAddr()}, + DialTimeout: time.Second, + }) + if err != grpc.ErrClientConnTimeout { + t.Fatalf("expected %v, got %v", grpc.ErrClientConnTimeout, err) + } +} + // TestDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones. func TestDialSetEndpointsBeforeFail(t *testing.T) { testDialSetEndpoints(t, true) diff --git a/github.com/coreos/etcd/clientv3/integration/kv_test.go b/github.com/coreos/etcd/clientv3/integration/kv_test.go index f9f87fe20a..49f107f76d 100644 --- a/github.com/coreos/etcd/clientv3/integration/kv_test.go +++ b/github.com/coreos/etcd/clientv3/integration/kv_test.go @@ -381,6 +381,36 @@ func TestKVRange(t *testing.T) { {Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1}, }, }, + // fetch entire keyspace using WithFromKey + { + "\x00", "", + 0, + []clientv3.OpOption{clientv3.WithFromKey(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend)}, + + []*mvccpb.KeyValue{ + {Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1}, + {Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1}, + {Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3}, + {Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1}, + {Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1}, + {Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1}, + }, + }, + // fetch entire keyspace using WithPrefix + { + "", "", + 0, + []clientv3.OpOption{clientv3.WithPrefix(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend)}, + + []*mvccpb.KeyValue{ + {Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1}, + {Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1}, + {Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3}, + {Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1}, + {Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1}, + {Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1}, + }, + }, } for i, tt := range tests { diff --git a/github.com/coreos/etcd/clientv3/integration/lease_test.go b/github.com/coreos/etcd/clientv3/integration/lease_test.go index ee1402d5fd..65f8adcb3c 100644 --- a/github.com/coreos/etcd/clientv3/integration/lease_test.go +++ b/github.com/coreos/etcd/clientv3/integration/lease_test.go @@ -482,7 +482,8 @@ func TestLeaseTimeToLive(t *testing.T) { clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) defer clus.Terminate(t) - lapi := clus.RandClient() + c := clus.RandClient() + lapi := c resp, err := lapi.Grant(context.Background(), 10) if err != nil { @@ -497,6 +498,11 @@ func TestLeaseTimeToLive(t *testing.T) { } } + // linearized read to ensure Puts propagated to server backing lapi + if _, err := c.Get(context.TODO(), "abc"); err != nil { + t.Fatal(err) + } + lresp, lerr := lapi.TimeToLive(context.Background(), resp.ID, clientv3.WithAttachedKeys()) if lerr != nil { t.Fatal(lerr) @@ -586,17 +592,24 @@ func TestLeaseRenewLostQuorum(t *testing.T) { } // consume first keepalive so next message sends when cluster is down <-ka + lastKa := time.Now() // force keepalive stream message to timeout clus.Members[1].Stop(t) clus.Members[2].Stop(t) - // Use TTL-1 since the client closes the keepalive channel if no - // keepalive arrives before the lease deadline. - // The cluster has 1 second to recover and reply to the keepalive. - time.Sleep(time.Duration(r.TTL-1) * time.Second) + // Use TTL-2 since the client closes the keepalive channel if no + // keepalive arrives before the lease deadline; the client will + // try to resend a keepalive after TTL/3 seconds, so for a TTL of 4, + // sleeping for 2s should be sufficient time for issuing a retry. + // The cluster has two seconds to recover and reply to the keepalive. + time.Sleep(time.Duration(r.TTL-2) * time.Second) clus.Members[1].Restart(t) clus.Members[2].Restart(t) + if time.Since(lastKa) > time.Duration(r.TTL)*time.Second { + t.Skip("waited too long for server stop and restart") + } + select { case _, ok := <-ka: if !ok { @@ -722,6 +735,12 @@ func TestLeaseWithRequireLeader(t *testing.T) { } clus.Members[1].Stop(t) + // kaReqLeader may issue multiple requests while waiting for the first + // response from proxy server; drain any stray keepalive responses + time.Sleep(100 * time.Millisecond) + for len(kaReqLeader) > 0 { + <-kaReqLeader + } select { case resp, ok := <-kaReqLeader: diff --git a/github.com/coreos/etcd/clientv3/integration/maintenance_test.go b/github.com/coreos/etcd/clientv3/integration/maintenance_test.go new file mode 100644 index 0000000000..e5496406ed --- /dev/null +++ b/github.com/coreos/etcd/clientv3/integration/maintenance_test.go @@ -0,0 +1,53 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package integration + +import ( + "context" + "testing" + + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + "github.com/coreos/etcd/integration" + "github.com/coreos/etcd/pkg/testutil" +) + +func TestMaintenanceMoveLeader(t *testing.T) { + defer testutil.AfterTest(t) + + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + defer clus.Terminate(t) + + oldLeadIdx := clus.WaitLeader(t) + targetIdx := (oldLeadIdx + 1) % 3 + target := uint64(clus.Members[targetIdx].ID()) + + cli := clus.Client(targetIdx) + _, err := cli.MoveLeader(context.Background(), target) + if err != rpctypes.ErrNotLeader { + t.Fatalf("error expected %v, got %v", rpctypes.ErrNotLeader, err) + } + + cli = clus.Client(oldLeadIdx) + _, err = cli.MoveLeader(context.Background(), target) + if err != nil { + t.Fatal(err) + } + + leadIdx := clus.WaitLeader(t) + lead := uint64(clus.Members[leadIdx].ID()) + if target != lead { + t.Fatalf("new leader expected %d, got %d", target, lead) + } +} diff --git a/github.com/coreos/etcd/clientv3/integration/txn_test.go b/github.com/coreos/etcd/clientv3/integration/txn_test.go index b5cdea7e64..e895b5b625 100644 --- a/github.com/coreos/etcd/clientv3/integration/txn_test.go +++ b/github.com/coreos/etcd/clientv3/integration/txn_test.go @@ -20,7 +20,7 @@ import ( "time" "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/etcdserver/api/v3rpc" + "github.com/coreos/etcd/embed" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" "github.com/coreos/etcd/integration" "github.com/coreos/etcd/pkg/testutil" @@ -41,7 +41,7 @@ func TestTxnError(t *testing.T) { t.Fatalf("expected %v, got %v", rpctypes.ErrDuplicateKey, err) } - ops := make([]clientv3.Op, v3rpc.MaxOpsPerTxn+10) + ops := make([]clientv3.Op, int(embed.DefaultMaxTxnOps+10)) for i := range ops { ops[i] = clientv3.OpPut(fmt.Sprintf("foo%d", i), "") } @@ -106,27 +106,35 @@ func TestTxnReadRetry(t *testing.T) { defer clus.Terminate(t) kv := clus.Client(0) - clus.Members[0].Stop(t) - <-clus.Members[0].StopNotify() - donec := make(chan struct{}) - go func() { - ctx := context.TODO() - _, err := kv.Txn(ctx).Then(clientv3.OpGet("foo")).Commit() - if err != nil { - t.Fatalf("expected response, got error %v", err) + thenOps := [][]clientv3.Op{ + {clientv3.OpGet("foo")}, + {clientv3.OpTxn(nil, []clientv3.Op{clientv3.OpGet("foo")}, nil)}, + {clientv3.OpTxn(nil, nil, nil)}, + {}, + } + for i := range thenOps { + clus.Members[0].Stop(t) + <-clus.Members[0].StopNotify() + + donec := make(chan struct{}) + go func() { + _, err := kv.Txn(context.TODO()).Then(thenOps[i]...).Commit() + if err != nil { + t.Fatalf("expected response, got error %v", err) + } + donec <- struct{}{} + }() + // wait for txn to fail on disconnect + time.Sleep(100 * time.Millisecond) + + // restart node; client should resume + clus.Members[0].Restart(t) + select { + case <-donec: + case <-time.After(2 * clus.Members[1].ServerConfig.ReqTimeout()): + t.Fatalf("waited too long") } - donec <- struct{}{} - }() - // wait for txn to fail on disconnect - time.Sleep(100 * time.Millisecond) - - // restart node; client should resume - clus.Members[0].Restart(t) - select { - case <-donec: - case <-time.After(2 * clus.Members[1].ServerConfig.ReqTimeout()): - t.Fatalf("waited too long") } } @@ -152,3 +160,68 @@ func TestTxnSuccess(t *testing.T) { t.Fatalf("unexpected Get response %v", resp) } } + +func TestTxnCompareRange(t *testing.T) { + defer testutil.AfterTest(t) + + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + defer clus.Terminate(t) + + kv := clus.Client(0) + fooResp, err := kv.Put(context.TODO(), "foo/", "bar") + if err != nil { + t.Fatal(err) + } + if _, err = kv.Put(context.TODO(), "foo/a", "baz"); err != nil { + t.Fatal(err) + } + tresp, terr := kv.Txn(context.TODO()).If( + clientv3.Compare( + clientv3.CreateRevision("foo/"), "=", fooResp.Header.Revision). + WithPrefix(), + ).Commit() + if terr != nil { + t.Fatal(terr) + } + if tresp.Succeeded { + t.Fatal("expected prefix compare to false, got compares as true") + } +} + +func TestTxnNested(t *testing.T) { + defer testutil.AfterTest(t) + + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + defer clus.Terminate(t) + + kv := clus.Client(0) + + tresp, err := kv.Txn(context.TODO()). + If(clientv3.Compare(clientv3.Version("foo"), "=", 0)). + Then( + clientv3.OpPut("foo", "bar"), + clientv3.OpTxn(nil, []clientv3.Op{clientv3.OpPut("abc", "123")}, nil)). + Else(clientv3.OpPut("foo", "baz")).Commit() + if err != nil { + t.Fatal(err) + } + if len(tresp.Responses) != 2 { + t.Errorf("expected 2 top-level txn responses, got %+v", tresp.Responses) + } + + // check txn writes were applied + resp, err := kv.Get(context.TODO(), "foo") + if err != nil { + t.Fatal(err) + } + if len(resp.Kvs) != 1 || string(resp.Kvs[0].Value) != "bar" { + t.Errorf("unexpected Get response %+v", resp) + } + resp, err = kv.Get(context.TODO(), "abc") + if err != nil { + t.Fatal(err) + } + if len(resp.Kvs) != 1 || string(resp.Kvs[0].Value) != "123" { + t.Errorf("unexpected Get response %+v", resp) + } +} diff --git a/github.com/coreos/etcd/clientv3/integration/watch_test.go b/github.com/coreos/etcd/clientv3/integration/watch_test.go index c4dd86ea99..4c9f14757c 100644 --- a/github.com/coreos/etcd/clientv3/integration/watch_test.go +++ b/github.com/coreos/etcd/clientv3/integration/watch_test.go @@ -343,7 +343,57 @@ func putAndWatch(t *testing.T, wctx *watchctx, key, val string) { } } -// TestWatchResumeComapcted checks that the watcher gracefully closes in case +func TestWatchResumeInitRev(t *testing.T) { + defer testutil.AfterTest(t) + clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}) + defer clus.Terminate(t) + + cli := clus.Client(0) + if _, err := cli.Put(context.TODO(), "b", "2"); err != nil { + t.Fatal(err) + } + if _, err := cli.Put(context.TODO(), "a", "3"); err != nil { + t.Fatal(err) + } + // if resume is broken, it'll pick up this key first instead of a=3 + if _, err := cli.Put(context.TODO(), "a", "4"); err != nil { + t.Fatal(err) + } + + wch := clus.Client(0).Watch(context.Background(), "a", clientv3.WithRev(1), clientv3.WithCreatedNotify()) + if resp, ok := <-wch; !ok || resp.Header.Revision != 4 { + t.Fatalf("got (%v, %v), expected create notification rev=4", resp, ok) + } + // pause wch + clus.Members[0].DropConnections() + clus.Members[0].PauseConnections() + + select { + case resp, ok := <-wch: + t.Skipf("wch should block, got (%+v, %v); drop not fast enough", resp, ok) + case <-time.After(100 * time.Millisecond): + } + + // resume wch + clus.Members[0].UnpauseConnections() + + select { + case resp, ok := <-wch: + if !ok { + t.Fatal("unexpected watch close") + } + if len(resp.Events) == 0 { + t.Fatal("expected event on watch") + } + if string(resp.Events[0].Kv.Value) != "3" { + t.Fatalf("expected value=3, got event %+v", resp.Events[0]) + } + case <-time.After(5 * time.Second): + t.Fatal("watch timed out") + } +} + +// TestWatchResumeCompacted checks that the watcher gracefully closes in case // that it tries to resume to a revision that's been compacted out of the store. // Since the watcher's server restarts with stale data, the watcher will receive // either a compaction error or all keys by staying in sync before the compaction @@ -806,6 +856,20 @@ func TestWatchCancelOnServer(t *testing.T) { client := cluster.RandClient() numWatches := 10 + // grpcproxy starts watches to detect leadership after the proxy server + // returns as started; to avoid racing on the proxy's internal watches, wait + // until require leader watches get create responses to ensure the leadership + // watches have started. + for { + ctx, cancel := context.WithCancel(clientv3.WithRequireLeader(context.TODO())) + ww := client.Watch(ctx, "a", clientv3.WithCreatedNotify()) + wresp := <-ww + cancel() + if wresp.Err() == nil { + break + } + } + cancels := make([]context.CancelFunc, numWatches) for i := 0; i < numWatches; i++ { // use WithTimeout to force separate streams in client @@ -835,7 +899,7 @@ func TestWatchCancelOnServer(t *testing.T) { t.Fatalf("expected n=2 and err=nil, got n=%d and err=%v", n, serr) } - if maxWatchV-minWatchV != numWatches { + if maxWatchV-minWatchV < numWatches { t.Fatalf("expected %d canceled watchers, got %d", numWatches, maxWatchV-minWatchV) } } diff --git a/github.com/coreos/etcd/clientv3/kv.go b/github.com/coreos/etcd/clientv3/kv.go index 6578dbe357..f887e04410 100644 --- a/github.com/coreos/etcd/clientv3/kv.go +++ b/github.com/coreos/etcd/clientv3/kv.go @@ -51,11 +51,6 @@ type KV interface { // Compact compacts etcd KV history before the given rev. Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) - // Do applies a single Op on KV without a transaction. - // Do is useful when declaring operations to be issued at a later time - // whereas Get/Put/Delete are for better suited for when the operation - // should be immediately issued at time of declaration. - // Do applies a single Op on KV without a transaction. // Do is useful when creating arbitrary operations to be issued at a // later time; the user can range over the operations, calling Do to @@ -71,11 +66,13 @@ type OpResponse struct { put *PutResponse get *GetResponse del *DeleteResponse + txn *TxnResponse } func (op OpResponse) Put() *PutResponse { return op.put } func (op OpResponse) Get() *GetResponse { return op.get } func (op OpResponse) Del() *DeleteResponse { return op.del } +func (op OpResponse) Txn() *TxnResponse { return op.txn } type kv struct { remote pb.KVClient @@ -139,7 +136,6 @@ func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) { var err error switch op.t { - // TODO: handle other ops case tRange: var resp *pb.RangeResponse resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false)) @@ -160,6 +156,12 @@ func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) { if err == nil { return OpResponse{del: (*DeleteResponse)(resp)}, nil } + case tTxn: + var resp *pb.TxnResponse + resp, err = kv.remote.Txn(ctx, op.toTxnRequest()) + if err == nil { + return OpResponse{txn: (*TxnResponse)(resp)}, nil + } default: panic("Unknown op") } diff --git a/github.com/coreos/etcd/clientv3/lease.go b/github.com/coreos/etcd/clientv3/lease.go index a6494ceee4..f624793c4b 100644 --- a/github.com/coreos/etcd/clientv3/lease.go +++ b/github.com/coreos/etcd/clientv3/lease.go @@ -69,7 +69,7 @@ const ( // NoLease is a lease ID for the absence of a lease. NoLease LeaseID = 0 - // retryConnWait is how long to wait before retrying on a lost leader + // retryConnWait is how long to wait before retrying request due to an error retryConnWait = 500 * time.Millisecond ) @@ -323,7 +323,7 @@ func (l *lessor) closeRequireLeader() { reqIdxs := 0 // find all required leader channels, close, mark as nil for i, ctx := range ka.ctxs { - md, ok := metadata.FromContext(ctx) + md, ok := metadata.FromOutgoingContext(ctx) if !ok { continue } @@ -386,40 +386,51 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) { close(l.donec) l.loopErr = gerr for _, ka := range l.keepAlives { - ka.Close() + ka.close() } l.keepAlives = make(map[LeaseID]*keepAlive) l.mu.Unlock() }() - stream, serr := l.resetRecv() - for serr == nil { - resp, err := stream.Recv() - if err == nil { - l.recvKeepAlive(resp) - continue - } - err = toErr(l.stopCtx, err) - if err == rpctypes.ErrNoLeader { - l.closeRequireLeader() - select { - case <-time.After(retryConnWait): - case <-l.stopCtx.Done(): + for { + stream, err := l.resetRecv() + if err != nil { + if canceledByCaller(l.stopCtx, err) { return err } - } else if isHaltErr(l.stopCtx, err) { - return err + } else { + for { + resp, err := stream.Recv() + + if err != nil { + if canceledByCaller(l.stopCtx, err) { + return err + } + + if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader { + l.closeRequireLeader() + } + break + } + + l.recvKeepAlive(resp) + } + } + + select { + case <-time.After(retryConnWait): + continue + case <-l.stopCtx.Done(): + return l.stopCtx.Err() } - stream, serr = l.resetRecv() } - return serr } // resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { sctx, cancel := context.WithCancel(l.stopCtx) stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false)) - if err = toErr(sctx, err); err != nil { + if err != nil { cancel() return nil, err } @@ -456,7 +467,7 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { if karesp.TTL <= 0 { // lease expired; close all keep alive channels delete(l.keepAlives, karesp.ID) - ka.Close() + ka.close() return } @@ -486,7 +497,7 @@ func (l *lessor) deadlineLoop() { for id, ka := range l.keepAlives { if ka.deadline.Before(now) { // waited too long for response; lease may be expired - ka.Close() + ka.close() delete(l.keepAlives, id) } } @@ -528,7 +539,7 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { } } -func (ka *keepAlive) Close() { +func (ka *keepAlive) close() { close(ka.donec) for _, ch := range ka.chs { close(ch) diff --git a/github.com/coreos/etcd/clientv3/main_test.go b/github.com/coreos/etcd/clientv3/main_test.go index eb5a7135b0..6df3f63349 100644 --- a/github.com/coreos/etcd/clientv3/main_test.go +++ b/github.com/coreos/etcd/clientv3/main_test.go @@ -32,15 +32,21 @@ func init() { auth.BcryptCost = bcrypt.MinCost } // TestMain sets up an etcd cluster if running the examples. func TestMain(m *testing.M) { - useCluster := true // default to running all tests + useCluster, hasRunArg := false, false // default to running only Test* for _, arg := range os.Args { if strings.HasPrefix(arg, "-test.run=") { exp := strings.Split(arg, "=")[1] match, err := regexp.MatchString(exp, "Example") useCluster = (err == nil && match) || strings.Contains(exp, "Example") + hasRunArg = true break } } + if !hasRunArg { + // force only running Test* if no args given to avoid leak false + // positives from having a long-running cluster for the examples. + os.Args = append(os.Args, "-test.run=Test") + } v := 0 if useCluster { diff --git a/github.com/coreos/etcd/clientv3/maintenance.go b/github.com/coreos/etcd/clientv3/maintenance.go index 2a75b7e9c5..236ab261dd 100644 --- a/github.com/coreos/etcd/clientv3/maintenance.go +++ b/github.com/coreos/etcd/clientv3/maintenance.go @@ -28,6 +28,7 @@ type ( AlarmResponse pb.AlarmResponse AlarmMember pb.AlarmMember StatusResponse pb.StatusResponse + MoveLeaderResponse pb.MoveLeaderResponse ) type Maintenance interface { @@ -51,6 +52,10 @@ type Maintenance interface { // Snapshot provides a reader for a snapshot of a backend. Snapshot(ctx context.Context) (io.ReadCloser, error) + + // MoveLeader requests current leader to transfer its leadership to the transferee. + // Request must be made to the leader. + MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) } type maintenance struct { @@ -180,3 +185,8 @@ func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { }() return pr, nil } + +func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) { + resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, grpc.FailFast(false)) + return (*MoveLeaderResponse)(resp), toErr(ctx, err) +} diff --git a/github.com/coreos/etcd/clientv3/namespace/kv.go b/github.com/coreos/etcd/clientv3/namespace/kv.go index f3e82d6b89..b5ec5e964e 100644 --- a/github.com/coreos/etcd/clientv3/namespace/kv.go +++ b/github.com/coreos/etcd/clientv3/namespace/kv.go @@ -74,7 +74,7 @@ func (kv *kvPrefix) Delete(ctx context.Context, key string, opts ...clientv3.OpO } func (kv *kvPrefix) Do(ctx context.Context, op clientv3.Op) (clientv3.OpResponse, error) { - if len(op.KeyBytes()) == 0 { + if len(op.KeyBytes()) == 0 && !op.IsTxn() { return clientv3.OpResponse{}, rpctypes.ErrEmptyKey } r, err := kv.KV.Do(ctx, kv.prefixOp(op)) @@ -88,6 +88,8 @@ func (kv *kvPrefix) Do(ctx context.Context, op clientv3.Op) (clientv3.OpResponse kv.unprefixPutResponse(r.Put()) case r.Del() != nil: kv.unprefixDeleteResponse(r.Del()) + case r.Txn() != nil: + kv.unprefixTxnResponse(r.Txn()) } return r, nil } @@ -102,31 +104,17 @@ func (kv *kvPrefix) Txn(ctx context.Context) clientv3.Txn { } func (txn *txnPrefix) If(cs ...clientv3.Cmp) clientv3.Txn { - newCmps := make([]clientv3.Cmp, len(cs)) - for i := range cs { - newCmps[i] = cs[i] - pfxKey, _ := txn.kv.prefixInterval(cs[i].KeyBytes(), nil) - newCmps[i].WithKeyBytes(pfxKey) - } - txn.Txn = txn.Txn.If(newCmps...) + txn.Txn = txn.Txn.If(txn.kv.prefixCmps(cs)...) return txn } func (txn *txnPrefix) Then(ops ...clientv3.Op) clientv3.Txn { - newOps := make([]clientv3.Op, len(ops)) - for i := range ops { - newOps[i] = txn.kv.prefixOp(ops[i]) - } - txn.Txn = txn.Txn.Then(newOps...) + txn.Txn = txn.Txn.Then(txn.kv.prefixOps(ops)...) return txn } func (txn *txnPrefix) Else(ops ...clientv3.Op) clientv3.Txn { - newOps := make([]clientv3.Op, len(ops)) - for i := range ops { - newOps[i] = txn.kv.prefixOp(ops[i]) - } - txn.Txn = txn.Txn.Else(newOps...) + txn.Txn = txn.Txn.Else(txn.kv.prefixOps(ops)...) return txn } @@ -140,10 +128,14 @@ func (txn *txnPrefix) Commit() (*clientv3.TxnResponse, error) { } func (kv *kvPrefix) prefixOp(op clientv3.Op) clientv3.Op { - begin, end := kv.prefixInterval(op.KeyBytes(), op.RangeBytes()) - op.WithKeyBytes(begin) - op.WithRangeBytes(end) - return op + if !op.IsTxn() { + begin, end := kv.prefixInterval(op.KeyBytes(), op.RangeBytes()) + op.WithKeyBytes(begin) + op.WithRangeBytes(end) + return op + } + cmps, thenOps, elseOps := op.Txn() + return clientv3.OpTxn(kv.prefixCmps(cmps), kv.prefixOps(thenOps), kv.prefixOps(elseOps)) } func (kv *kvPrefix) unprefixGetResponse(resp *clientv3.GetResponse) { @@ -179,6 +171,10 @@ func (kv *kvPrefix) unprefixTxnResponse(resp *clientv3.TxnResponse) { if tv.ResponseDeleteRange != nil { kv.unprefixDeleteResponse((*clientv3.DeleteResponse)(tv.ResponseDeleteRange)) } + case *pb.ResponseOp_ResponseTxn: + if tv.ResponseTxn != nil { + kv.unprefixTxnResponse((*clientv3.TxnResponse)(tv.ResponseTxn)) + } default: } } @@ -187,3 +183,24 @@ func (kv *kvPrefix) unprefixTxnResponse(resp *clientv3.TxnResponse) { func (p *kvPrefix) prefixInterval(key, end []byte) (pfxKey []byte, pfxEnd []byte) { return prefixInterval(p.pfx, key, end) } + +func (kv *kvPrefix) prefixCmps(cs []clientv3.Cmp) []clientv3.Cmp { + newCmps := make([]clientv3.Cmp, len(cs)) + for i := range cs { + newCmps[i] = cs[i] + pfxKey, endKey := kv.prefixInterval(cs[i].KeyBytes(), cs[i].RangeEnd) + newCmps[i].WithKeyBytes(pfxKey) + if len(cs[i].RangeEnd) != 0 { + newCmps[i].RangeEnd = endKey + } + } + return newCmps +} + +func (kv *kvPrefix) prefixOps(ops []clientv3.Op) []clientv3.Op { + newOps := make([]clientv3.Op, len(ops)) + for i := range ops { + newOps[i] = kv.prefixOp(ops[i]) + } + return newOps +} diff --git a/github.com/coreos/etcd/clientv3/naming/doc.go b/github.com/coreos/etcd/clientv3/naming/doc.go new file mode 100644 index 0000000000..71608cc738 --- /dev/null +++ b/github.com/coreos/etcd/clientv3/naming/doc.go @@ -0,0 +1,56 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package naming provides an etcd-backed gRPC resolver for discovering gRPC services. +// +// To use, first import the packages: +// +// import ( +// "github.com/coreos/etcd/clientv3" +// etcdnaming "github.com/coreos/etcd/clientv3/naming" +// +// "google.golang.org/grpc" +// "google.golang.org/grpc/naming" +// ) +// +// First, register new endpoint addresses for a service: +// +// func etcdAdd(c *clientv3.Client, service, addr string) error { +// r := &etcdnaming.GRPCResolver{Client: c} +// return r.Update(c.Ctx(), service, naming.Update{Op: naming.Add, Addr: addr}) +// } +// +// Dial an RPC service using the etcd gRPC resolver and a gRPC Balancer: +// +// func etcdDial(c *clientv3.Client, service string) (*grpc.ClientConn, error) { +// r := &etcdnaming.GRPCResolver{Client: c} +// b := grpc.RoundRobin(r) +// return grpc.Dial(service, grpc.WithBalancer(b)) +// } +// +// Optionally, force delete an endpoint: +// +// func etcdDelete(c *clientv3, service, addr string) error { +// r := &etcdnaming.GRPCResolver{Client: c} +// return r.Update(c.Ctx(), "my-service", naming.Update{Op: naming.Delete, Addr: "1.2.3.4"}) +// } +// +// Or register an expiring endpoint with a lease: +// +// func etcdLeaseAdd(c *clientv3.Client, lid clientv3.LeaseID, service, addr string) error { +// r := &etcdnaming.GRPCResolver{Client: c} +// return r.Update(c.Ctx(), service, naming.Update{Op: naming.Add, Addr: addr}, clientv3.WithLease(lid)) +// } +// +package naming diff --git a/github.com/coreos/etcd/clientv3/op.go b/github.com/coreos/etcd/clientv3/op.go index e8218924ba..ef6f04368a 100644 --- a/github.com/coreos/etcd/clientv3/op.go +++ b/github.com/coreos/etcd/clientv3/op.go @@ -23,6 +23,7 @@ const ( tRange opType = iota + 1 tPut tDeleteRange + tTxn ) var ( @@ -67,10 +68,18 @@ type Op struct { // for put val []byte leaseID LeaseID + + // txn + cmps []Cmp + thenOps []Op + elseOps []Op } // accesors / mutators +func (op Op) IsTxn() bool { return op.t == tTxn } +func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps } + // KeyBytes returns the byte slice holding the Op's key. func (op Op) KeyBytes() []byte { return op.key } @@ -113,6 +122,22 @@ func (op Op) toRangeRequest() *pb.RangeRequest { return r } +func (op Op) toTxnRequest() *pb.TxnRequest { + thenOps := make([]*pb.RequestOp, len(op.thenOps)) + for i, tOp := range op.thenOps { + thenOps[i] = tOp.toRequestOp() + } + elseOps := make([]*pb.RequestOp, len(op.elseOps)) + for i, eOp := range op.elseOps { + elseOps[i] = eOp.toRequestOp() + } + cmps := make([]*pb.Compare, len(op.cmps)) + for i := range op.cmps { + cmps[i] = (*pb.Compare)(&op.cmps[i]) + } + return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps} +} + func (op Op) toRequestOp() *pb.RequestOp { switch op.t { case tRange: @@ -123,12 +148,27 @@ func (op Op) toRequestOp() *pb.RequestOp { case tDeleteRange: r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}} + case tTxn: + return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}} default: panic("Unknown Op") } } func (op Op) isWrite() bool { + if op.t == tTxn { + for _, tOp := range op.thenOps { + if tOp.isWrite() { + return true + } + } + for _, tOp := range op.elseOps { + if tOp.isWrite() { + return true + } + } + return false + } return op.t != tRange } @@ -194,6 +234,10 @@ func OpPut(key, val string, opts ...OpOption) Op { return ret } +func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op { + return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps} +} + func opWatch(key string, opts ...OpOption) Op { ret := Op{t: tRange, key: []byte(key)} ret.applyOpts(opts) @@ -282,6 +326,10 @@ func getPrefix(key []byte) []byte { // can return 'foo1', 'foo2', and so on. func WithPrefix() OpOption { return func(op *Op) { + if len(op.key) == 0 { + op.key, op.end = []byte{0}, []byte{0} + return + } op.end = getPrefix(op.key) } } diff --git a/github.com/coreos/etcd/clientv3/watch.go b/github.com/coreos/etcd/clientv3/watch.go index 7847b03b32..ee43b2afeb 100644 --- a/github.com/coreos/etcd/clientv3/watch.go +++ b/github.com/coreos/etcd/clientv3/watch.go @@ -24,6 +24,7 @@ import ( mvccpb "github.com/coreos/etcd/mvcc/mvccpb" "golang.org/x/net/context" "google.golang.org/grpc" + "google.golang.org/grpc/codes" ) const ( @@ -39,10 +40,9 @@ type WatchChan <-chan WatchResponse type Watcher interface { // Watch watches on a key or prefix. The watched events will be returned - // through the returned channel. - // If the watch is slow or the required rev is compacted, the watch request - // might be canceled from the server-side and the chan will be closed. - // 'opts' can be: 'WithRev' and/or 'WithPrefix'. + // through the returned channel. If revisions waiting to be sent over the + // watch are compacted, then the watch will be canceled by the server, the + // client will post a compacted error watch response, and the channel will close. Watch(ctx context.Context, key string, opts ...OpOption) WatchChan // Close closes the watcher and cancels all watch requests. @@ -65,6 +65,9 @@ type WatchResponse struct { Created bool closeErr error + + // cancelReason is a reason of canceling watch + cancelReason string } // IsCreate returns true if the event tells that the key is newly created. @@ -85,6 +88,9 @@ func (wr *WatchResponse) Err() error { case wr.CompactRevision != 0: return v3rpc.ErrCompacted case wr.Canceled: + if len(wr.cancelReason) != 0 { + return v3rpc.Error(grpc.Errorf(codes.FailedPrecondition, "%s", wr.cancelReason)) + } return v3rpc.ErrFutureRev } return nil @@ -310,14 +316,14 @@ func (w *watcher) Close() (err error) { w.streams = nil w.mu.Unlock() for _, wgs := range streams { - if werr := wgs.Close(); werr != nil { + if werr := wgs.close(); werr != nil { err = werr } } return err } -func (w *watchGrpcStream) Close() (err error) { +func (w *watchGrpcStream) close() (err error) { w.cancel() <-w.donec select { @@ -520,10 +526,6 @@ func (w *watchGrpcStream) nextResume() *watcherStream { // dispatchEvent sends a WatchResponse to the appropriate watcher stream func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { - ws, ok := w.substreams[pbresp.WatchId] - if !ok { - return false - } events := make([]*Event, len(pbresp.Events)) for i, ev := range pbresp.Events { events[i] = (*Event)(ev) @@ -534,6 +536,11 @@ func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { CompactRevision: pbresp.CompactRevision, Created: pbresp.Created, Canceled: pbresp.Canceled, + cancelReason: pbresp.CancelReason, + } + ws, ok := w.substreams[pbresp.WatchId] + if !ok { + return false } select { case ws.recvc <- wr: @@ -616,10 +623,24 @@ func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{ if ws.initReq.createdNotify { ws.outc <- *wr } + // once the watch channel is returned, a current revision + // watch must resume at the store revision. This is necessary + // for the following case to work as expected: + // wch := m1.Watch("a") + // m2.Put("a", "b") + // <-wch + // If the revision is only bound on the first observed event, + // if wch is disconnected before the Put is issued, then reconnects + // after it is committed, it'll miss the Put. + if ws.initReq.rev == 0 { + nextRev = wr.Header.Revision + } } + } else { + // current progress of watch; <= store revision + nextRev = wr.Header.Revision } - nextRev = wr.Header.Revision if len(wr.Events) > 0 { nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1 } diff --git a/github.com/coreos/etcd/clientv3/watch_test.go b/github.com/coreos/etcd/clientv3/watch_test.go index 448bfd814e..8d3380522f 100644 --- a/github.com/coreos/etcd/clientv3/watch_test.go +++ b/github.com/coreos/etcd/clientv3/watch_test.go @@ -42,7 +42,7 @@ func TestEvent(t *testing.T) { ModRevision: 4, }, }, - isModify: false, + isModify: true, }} for i, tt := range tests { if tt.isCreate && !tt.ev.IsCreate() { diff --git a/github.com/coreos/etcd/clientv3/yaml/config.go b/github.com/coreos/etcd/clientv3/yaml/config.go index 21aa6375ad..aa6df1c183 100644 --- a/github.com/coreos/etcd/clientv3/yaml/config.go +++ b/github.com/coreos/etcd/clientv3/yaml/config.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package yaml handles yaml-formatted clientv3 configuration data. package yaml import ( diff --git a/github.com/coreos/etcd/compactor/compactor.go b/github.com/coreos/etcd/compactor/compactor.go index 322a098701..e25fd7e417 100644 --- a/github.com/coreos/etcd/compactor/compactor.go +++ b/github.com/coreos/etcd/compactor/compactor.go @@ -15,14 +15,13 @@ package compactor import ( - "sync" + "fmt" "time" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc" "github.com/coreos/pkg/capnslog" - "github.com/jonboulle/clockwork" "golang.org/x/net/context" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" ) var ( @@ -30,9 +29,26 @@ var ( ) const ( - checkCompactionInterval = 5 * time.Minute + checkCompactionInterval = 5 * time.Minute + executeCompactionInterval = time.Hour + + ModePeriodic = "periodic" + ModeRevision = "revision" ) +// Compactor purges old log from the storage periodically. +type Compactor interface { + // Run starts the main loop of the compactor in background. + // Use Stop() to halt the loop and release the resource. + Run() + // Stop halts the main loop of the compactor. + Stop() + // Pause temporally suspend the compactor not to run compaction. Resume() to unpose. + Pause() + // Resume restarts the compactor suspended by Pause(). + Resume() +} + type Compactable interface { Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) } @@ -41,93 +57,13 @@ type RevGetter interface { Rev() int64 } -type Periodic struct { - clock clockwork.Clock - periodInHour int - - rg RevGetter - c Compactable - - revs []int64 - ctx context.Context - cancel context.CancelFunc - - mu sync.Mutex - paused bool -} - -func NewPeriodic(h int, rg RevGetter, c Compactable) *Periodic { - return &Periodic{ - clock: clockwork.NewRealClock(), - periodInHour: h, - rg: rg, - c: c, - } -} - -func (t *Periodic) Run() { - t.ctx, t.cancel = context.WithCancel(context.Background()) - t.revs = make([]int64, 0) - clock := t.clock - - go func() { - last := clock.Now() - for { - t.revs = append(t.revs, t.rg.Rev()) - select { - case <-t.ctx.Done(): - return - case <-clock.After(checkCompactionInterval): - t.mu.Lock() - p := t.paused - t.mu.Unlock() - if p { - continue - } - } - if clock.Now().Sub(last) < time.Duration(t.periodInHour)*time.Hour { - continue - } - - rev := t.getRev(t.periodInHour) - if rev < 0 { - continue - } - - plog.Noticef("Starting auto-compaction at revision %d", rev) - _, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev}) - if err == nil || err == mvcc.ErrCompacted { - t.revs = make([]int64, 0) - last = clock.Now() - plog.Noticef("Finished auto-compaction at revision %d", rev) - } else { - plog.Noticef("Failed auto-compaction at revision %d (%v)", err, rev) - plog.Noticef("Retry after %v", checkCompactionInterval) - } - } - }() -} - -func (t *Periodic) Stop() { - t.cancel() -} - -func (t *Periodic) Pause() { - t.mu.Lock() - defer t.mu.Unlock() - t.paused = true -} - -func (t *Periodic) Resume() { - t.mu.Lock() - defer t.mu.Unlock() - t.paused = false -} - -func (t *Periodic) getRev(h int) int64 { - i := len(t.revs) - int(time.Duration(h)*time.Hour/checkCompactionInterval) - if i < 0 { - return -1 +func New(mode string, retention int, rg RevGetter, c Compactable) (Compactor, error) { + switch mode { + case ModePeriodic: + return NewPeriodic(retention, rg, c), nil + case ModeRevision: + return NewRevision(int64(retention), rg, c), nil + default: + return nil, fmt.Errorf("unsupported compaction mode %s", mode) } - return t.revs[i] } diff --git a/github.com/coreos/etcd/compactor/compactor_test.go b/github.com/coreos/etcd/compactor/compactor_test.go index dcf9aabf49..c38ef4b2e5 100644 --- a/github.com/coreos/etcd/compactor/compactor_test.go +++ b/github.com/coreos/etcd/compactor/compactor_test.go @@ -15,110 +15,13 @@ package compactor import ( - "reflect" - "testing" - "time" + "sync/atomic" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/pkg/testutil" - "github.com/jonboulle/clockwork" "golang.org/x/net/context" ) -func TestPeriodic(t *testing.T) { - fc := clockwork.NewFakeClock() - rg := &fakeRevGetter{testutil.NewRecorderStream(), 0} - compactable := &fakeCompactable{testutil.NewRecorderStream()} - tb := &Periodic{ - clock: fc, - periodInHour: 1, - rg: rg, - c: compactable, - } - - tb.Run() - defer tb.Stop() - - n := int(time.Hour / checkCompactionInterval) - // collect 3 hours of revisions - for i := 0; i < 3; i++ { - // advance one (hour - checkCompactionInterval), one revision for each interval - for j := 0; j < n-1; j++ { - _, err := rg.Wait(1) - if err != nil { - t.Fatal(err) - } - fc.Advance(checkCompactionInterval) - } - _, err := rg.Wait(1) - if err != nil { - t.Fatal(err) - } - // ready to acknowledge hour "i" - // block until compactor calls clock.After() - fc.BlockUntil(1) - // unblock the After() - fc.Advance(checkCompactionInterval) - a, err := compactable.Wait(1) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(a[0].Params[0], &pb.CompactionRequest{Revision: int64(i*n) + 1}) { - t.Errorf("compact request = %v, want %v", a[0].Params[0], &pb.CompactionRequest{Revision: int64(i*n) + 1}) - } - } - - // unblock the rev getter, so we can stop the compactor routine. - _, err := rg.Wait(1) - if err != nil { - t.Fatal(err) - } -} - -func TestPeriodicPause(t *testing.T) { - fc := clockwork.NewFakeClock() - compactable := &fakeCompactable{testutil.NewRecorderStream()} - rg := &fakeRevGetter{testutil.NewRecorderStream(), 0} - tb := &Periodic{ - clock: fc, - periodInHour: 1, - rg: rg, - c: compactable, - } - - tb.Run() - tb.Pause() - - // tb will collect 3 hours of revisions but not compact since paused - n := int(time.Hour / checkCompactionInterval) - for i := 0; i < 3*n; i++ { - fc.Advance(checkCompactionInterval) - rg.Wait(1) - } - // tb ends up waiting for the clock - - select { - case a := <-compactable.Chan(): - t.Fatalf("unexpected action %v", a) - case <-time.After(10 * time.Millisecond): - } - - // tb resumes to being blocked on the clock - tb.Resume() - - // unblock clock, will kick off a compaction at hour 3 - fc.Advance(checkCompactionInterval) - a, err := compactable.Wait(1) - if err != nil { - t.Fatal(err) - } - // compact the revision from hour 2 - wreq := &pb.CompactionRequest{Revision: int64(2*n + 1)} - if !reflect.DeepEqual(a[0].Params[0], wreq) { - t.Errorf("compact request = %v, want %v", a[0].Params[0], wreq.Revision) - } -} - type fakeCompactable struct { testutil.Recorder } @@ -135,6 +38,10 @@ type fakeRevGetter struct { func (fr *fakeRevGetter) Rev() int64 { fr.Record(testutil.Action{Name: "g"}) - fr.rev++ - return fr.rev + rev := atomic.AddInt64(&fr.rev, 1) + return rev +} + +func (fr *fakeRevGetter) SetRev(rev int64) { + atomic.StoreInt64(&fr.rev, rev) } diff --git a/github.com/coreos/etcd/compactor/periodic.go b/github.com/coreos/etcd/compactor/periodic.go new file mode 100644 index 0000000000..7eb7cf7a8c --- /dev/null +++ b/github.com/coreos/etcd/compactor/periodic.go @@ -0,0 +1,122 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compactor + +import ( + "sync" + "time" + + "github.com/jonboulle/clockwork" + "golang.org/x/net/context" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/mvcc" +) + +// Periodic compacts the log by purging revisions older than +// the configured retention time. Compaction happens hourly. +type Periodic struct { + clock clockwork.Clock + periodInHour int + + rg RevGetter + c Compactable + + revs []int64 + ctx context.Context + cancel context.CancelFunc + + mu sync.Mutex + paused bool +} + +// NewPeriodic creates a new instance of Periodic compactor that purges +// the log older than h hours. +func NewPeriodic(h int, rg RevGetter, c Compactable) *Periodic { + return &Periodic{ + clock: clockwork.NewRealClock(), + periodInHour: h, + rg: rg, + c: c, + } +} + +func (t *Periodic) Run() { + t.ctx, t.cancel = context.WithCancel(context.Background()) + t.revs = make([]int64, 0) + clock := t.clock + + go func() { + last := clock.Now() + for { + t.revs = append(t.revs, t.rg.Rev()) + select { + case <-t.ctx.Done(): + return + case <-clock.After(checkCompactionInterval): + t.mu.Lock() + p := t.paused + t.mu.Unlock() + if p { + continue + } + } + + if clock.Now().Sub(last) < executeCompactionInterval { + continue + } + + rev, remaining := t.getRev(t.periodInHour) + if rev < 0 { + continue + } + + plog.Noticef("Starting auto-compaction at revision %d (retention: %d hours)", rev, t.periodInHour) + _, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev}) + if err == nil || err == mvcc.ErrCompacted { + t.revs = remaining + last = clock.Now() + plog.Noticef("Finished auto-compaction at revision %d", rev) + } else { + plog.Noticef("Failed auto-compaction at revision %d (%v)", err, rev) + plog.Noticef("Retry after %v", checkCompactionInterval) + } + } + }() +} + +func (t *Periodic) Stop() { + t.cancel() +} + +func (t *Periodic) Pause() { + t.mu.Lock() + defer t.mu.Unlock() + t.paused = true +} + +func (t *Periodic) Resume() { + t.mu.Lock() + defer t.mu.Unlock() + t.paused = false +} + +func (t *Periodic) getRev(h int) (int64, []int64) { + i := len(t.revs) - int(time.Duration(h)*time.Hour/checkCompactionInterval) + if i < 0 { + return -1, t.revs + } + return t.revs[i], t.revs[i+1:] +} diff --git a/github.com/coreos/etcd/compactor/periodic_test.go b/github.com/coreos/etcd/compactor/periodic_test.go new file mode 100644 index 0000000000..d0bb7f6eef --- /dev/null +++ b/github.com/coreos/etcd/compactor/periodic_test.go @@ -0,0 +1,117 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compactor + +import ( + "reflect" + "testing" + "time" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/pkg/testutil" + "github.com/jonboulle/clockwork" +) + +func TestPeriodic(t *testing.T) { + retentionHours := 2 + + fc := clockwork.NewFakeClock() + rg := &fakeRevGetter{testutil.NewRecorderStream(), 0} + compactable := &fakeCompactable{testutil.NewRecorderStream()} + tb := &Periodic{ + clock: fc, + periodInHour: retentionHours, + rg: rg, + c: compactable, + } + + tb.Run() + defer tb.Stop() + + n := int(time.Hour / checkCompactionInterval) + // collect 5 hours of revisions + for i := 0; i < 5; i++ { + // advance one hour, one revision for each interval + for j := 0; j < n; j++ { + rg.Wait(1) + fc.Advance(checkCompactionInterval) + } + + // compaction doesn't happen til 2 hours elapses + if i+1 < retentionHours { + continue + } + + a, err := compactable.Wait(1) + if err != nil { + t.Fatal(err) + } + expectedRevision := int64(1 + (i+1)*n - retentionHours*n) + if !reflect.DeepEqual(a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) { + t.Errorf("compact request = %v, want %v", a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) + } + } + + // unblock the rev getter, so we can stop the compactor routine. + _, err := rg.Wait(1) + if err != nil { + t.Fatal(err) + } +} + +func TestPeriodicPause(t *testing.T) { + fc := clockwork.NewFakeClock() + compactable := &fakeCompactable{testutil.NewRecorderStream()} + rg := &fakeRevGetter{testutil.NewRecorderStream(), 0} + tb := &Periodic{ + clock: fc, + periodInHour: 1, + rg: rg, + c: compactable, + } + + tb.Run() + tb.Pause() + + // tb will collect 3 hours of revisions but not compact since paused + n := int(time.Hour / checkCompactionInterval) + for i := 0; i < 3*n; i++ { + rg.Wait(1) + fc.Advance(checkCompactionInterval) + } + // tb ends up waiting for the clock + + select { + case a := <-compactable.Chan(): + t.Fatalf("unexpected action %v", a) + case <-time.After(10 * time.Millisecond): + } + + // tb resumes to being blocked on the clock + tb.Resume() + + // unblock clock, will kick off a compaction at hour 3:05 + rg.Wait(1) + fc.Advance(checkCompactionInterval) + a, err := compactable.Wait(1) + if err != nil { + t.Fatal(err) + } + // compact the revision from hour 2:05 + wreq := &pb.CompactionRequest{Revision: int64(1 + 2*n + 1)} + if !reflect.DeepEqual(a[0].Params[0], wreq) { + t.Errorf("compact request = %v, want %v", a[0].Params[0], wreq.Revision) + } +} diff --git a/github.com/coreos/etcd/compactor/revision.go b/github.com/coreos/etcd/compactor/revision.go new file mode 100644 index 0000000000..fd80c278dd --- /dev/null +++ b/github.com/coreos/etcd/compactor/revision.go @@ -0,0 +1,106 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compactor + +import ( + "sync" + + "github.com/jonboulle/clockwork" + "golang.org/x/net/context" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/mvcc" +) + +// Revision compacts the log by purging revisions older than +// the configured reivison number. Compaction happens every 5 minutes. +type Revision struct { + clock clockwork.Clock + retention int64 + + rg RevGetter + c Compactable + + ctx context.Context + cancel context.CancelFunc + + mu sync.Mutex + paused bool +} + +// NewRevision creates a new instance of Revisonal compactor that purges +// the log older than retention revisions from the current revision. +func NewRevision(retention int64, rg RevGetter, c Compactable) *Revision { + return &Revision{ + clock: clockwork.NewRealClock(), + retention: retention, + rg: rg, + c: c, + } +} + +func (t *Revision) Run() { + t.ctx, t.cancel = context.WithCancel(context.Background()) + clock := t.clock + previous := int64(0) + + go func() { + for { + select { + case <-t.ctx.Done(): + return + case <-clock.After(checkCompactionInterval): + t.mu.Lock() + p := t.paused + t.mu.Unlock() + if p { + continue + } + } + + rev := t.rg.Rev() - t.retention + + if rev <= 0 || rev == previous { + continue + } + + plog.Noticef("Starting auto-compaction at revision %d (retention: %d revisions)", rev, t.retention) + _, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev}) + if err == nil || err == mvcc.ErrCompacted { + previous = rev + plog.Noticef("Finished auto-compaction at revision %d", rev) + } else { + plog.Noticef("Failed auto-compaction at revision %d (%v)", err, rev) + plog.Noticef("Retry after %v", checkCompactionInterval) + } + } + }() +} + +func (t *Revision) Stop() { + t.cancel() +} + +func (t *Revision) Pause() { + t.mu.Lock() + defer t.mu.Unlock() + t.paused = true +} + +func (t *Revision) Resume() { + t.mu.Lock() + defer t.mu.Unlock() + t.paused = false +} diff --git a/github.com/coreos/etcd/compactor/revision_test.go b/github.com/coreos/etcd/compactor/revision_test.go new file mode 100644 index 0000000000..766afaee0b --- /dev/null +++ b/github.com/coreos/etcd/compactor/revision_test.go @@ -0,0 +1,117 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compactor + +import ( + "reflect" + "testing" + "time" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/pkg/testutil" + "github.com/jonboulle/clockwork" +) + +func TestRevision(t *testing.T) { + fc := clockwork.NewFakeClock() + rg := &fakeRevGetter{testutil.NewRecorderStream(), 0} + compactable := &fakeCompactable{testutil.NewRecorderStream()} + tb := &Revision{ + clock: fc, + retention: 10, + rg: rg, + c: compactable, + } + + tb.Run() + defer tb.Stop() + + fc.Advance(checkCompactionInterval) + rg.Wait(1) + // nothing happens + + rg.SetRev(99) // will be 100 + expectedRevision := int64(90) + fc.Advance(checkCompactionInterval) + rg.Wait(1) + a, err := compactable.Wait(1) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) { + t.Errorf("compact request = %v, want %v", a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) + } + + // skip the same revision + rg.SetRev(99) // will be 100 + expectedRevision = int64(90) + rg.Wait(1) + // nothing happens + + rg.SetRev(199) // will be 200 + expectedRevision = int64(190) + fc.Advance(checkCompactionInterval) + rg.Wait(1) + a, err = compactable.Wait(1) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) { + t.Errorf("compact request = %v, want %v", a[0].Params[0], &pb.CompactionRequest{Revision: expectedRevision}) + } +} + +func TestRevisionPause(t *testing.T) { + fc := clockwork.NewFakeClock() + compactable := &fakeCompactable{testutil.NewRecorderStream()} + rg := &fakeRevGetter{testutil.NewRecorderStream(), 99} // will be 100 + tb := &Revision{ + clock: fc, + retention: 10, + rg: rg, + c: compactable, + } + + tb.Run() + tb.Pause() + + // tb will collect 3 hours of revisions but not compact since paused + n := int(time.Hour / checkCompactionInterval) + for i := 0; i < 3*n; i++ { + fc.Advance(checkCompactionInterval) + } + // tb ends up waiting for the clock + + select { + case a := <-compactable.Chan(): + t.Fatalf("unexpected action %v", a) + case <-time.After(10 * time.Millisecond): + } + + // tb resumes to being blocked on the clock + tb.Resume() + + // unblock clock, will kick off a compaction at hour 3:05 + fc.Advance(checkCompactionInterval) + rg.Wait(1) + a, err := compactable.Wait(1) + if err != nil { + t.Fatal(err) + } + wreq := &pb.CompactionRequest{Revision: int64(90)} + if !reflect.DeepEqual(a[0].Params[0], wreq) { + t.Errorf("compact request = %v, want %v", a[0].Params[0], wreq.Revision) + } +} diff --git a/github.com/coreos/etcd/contrib/raftexample/kvstore.go b/github.com/coreos/etcd/contrib/raftexample/kvstore.go index b378ec3364..d877bc7ad5 100644 --- a/github.com/coreos/etcd/contrib/raftexample/kvstore.go +++ b/github.com/coreos/etcd/contrib/raftexample/kvstore.go @@ -58,7 +58,7 @@ func (s *kvstore) Propose(k string, v string) { if err := gob.NewEncoder(&buf).Encode(kv{k, v}); err != nil { log.Fatal(err) } - s.proposeC <- string(buf.Bytes()) + s.proposeC <- buf.String() } func (s *kvstore) readCommits(commitC <-chan *string, errorC <-chan error) { diff --git a/github.com/coreos/etcd/contrib/raftexample/raft.go b/github.com/coreos/etcd/contrib/raftexample/raft.go index 3d6878eb5c..b212dcb7c0 100644 --- a/github.com/coreos/etcd/contrib/raftexample/raft.go +++ b/github.com/coreos/etcd/contrib/raftexample/raft.go @@ -107,9 +107,9 @@ func newRaftNode(id int, peers []string, join bool, getSnapshot func() ([]byte, } func (rc *raftNode) saveSnap(snap raftpb.Snapshot) error { - if err := rc.snapshotter.SaveSnap(snap); err != nil { - return err - } + // must save the snapshot index to the WAL before saving the + // snapshot to maintain the invariant that we only Open the + // wal at previously-saved snapshot indexes. walSnap := walpb.Snapshot{ Index: snap.Metadata.Index, Term: snap.Metadata.Term, @@ -117,6 +117,9 @@ func (rc *raftNode) saveSnap(snap raftpb.Snapshot) error { if err := rc.wal.SaveSnapshot(walSnap); err != nil { return err } + if err := rc.snapshotter.SaveSnap(snap); err != nil { + return err + } return rc.wal.ReleaseLockTo(snap.Metadata.Index) } @@ -288,14 +291,11 @@ func (rc *raftNode) startRaft() { rc.node = raft.StartNode(c, startPeers) } - ss := &stats.ServerStats{} - ss.Initialize() - rc.transport = &rafthttp.Transport{ ID: types.ID(rc.id), ClusterID: 0x1000, Raft: rc, - ServerStats: ss, + ServerStats: stats.NewServerStats("", ""), LeaderStats: stats.NewLeaderStats(strconv.Itoa(rc.id)), ErrorC: make(chan error), } diff --git a/github.com/coreos/etcd/contrib/recipes/doc.go b/github.com/coreos/etcd/contrib/recipes/doc.go new file mode 100644 index 0000000000..386be975d1 --- /dev/null +++ b/github.com/coreos/etcd/contrib/recipes/doc.go @@ -0,0 +1,17 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package recipe contains experimental client-side distributed +// synchronization primitives. +package recipe diff --git a/github.com/coreos/etcd/discovery/srv.go b/github.com/coreos/etcd/discovery/srv.go deleted file mode 100644 index 782b6888f5..0000000000 --- a/github.com/coreos/etcd/discovery/srv.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package discovery - -import ( - "fmt" - "net" - "net/url" - "strings" - - "github.com/coreos/etcd/pkg/types" -) - -var ( - // indirection for testing - lookupSRV = net.LookupSRV - resolveTCPAddr = net.ResolveTCPAddr -) - -// SRVGetCluster gets the cluster information via DNS discovery. -// TODO(barakmich): Currently ignores priority and weight (as they don't make as much sense for a bootstrap) -// Also sees each entry as a separate instance. -func SRVGetCluster(name, dns string, apurls types.URLs) (string, error) { - tempName := int(0) - tcp2ap := make(map[string]url.URL) - - // First, resolve the apurls - for _, url := range apurls { - tcpAddr, err := resolveTCPAddr("tcp", url.Host) - if err != nil { - plog.Errorf("couldn't resolve host %s during SRV discovery", url.Host) - return "", err - } - tcp2ap[tcpAddr.String()] = url - } - - stringParts := []string{} - updateNodeMap := func(service, scheme string) error { - _, addrs, err := lookupSRV(service, "tcp", dns) - if err != nil { - return err - } - for _, srv := range addrs { - port := fmt.Sprintf("%d", srv.Port) - host := net.JoinHostPort(srv.Target, port) - tcpAddr, err := resolveTCPAddr("tcp", host) - if err != nil { - plog.Warningf("couldn't resolve host %s during SRV discovery", host) - continue - } - n := "" - url, ok := tcp2ap[tcpAddr.String()] - if ok { - n = name - } - if n == "" { - n = fmt.Sprintf("%d", tempName) - tempName++ - } - // SRV records have a trailing dot but URL shouldn't. - shortHost := strings.TrimSuffix(srv.Target, ".") - urlHost := net.JoinHostPort(shortHost, port) - stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost)) - plog.Noticef("got bootstrap from DNS for %s at %s://%s", service, scheme, urlHost) - if ok && url.Scheme != scheme { - plog.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String()) - } - } - return nil - } - - failCount := 0 - err := updateNodeMap("etcd-server-ssl", "https") - srvErr := make([]string, 2) - if err != nil { - srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _etcd-server-ssl %s", err) - failCount++ - } - err = updateNodeMap("etcd-server", "http") - if err != nil { - srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _etcd-server %s", err) - failCount++ - } - if failCount == 2 { - plog.Warningf(srvErr[0]) - plog.Warningf(srvErr[1]) - plog.Errorf("SRV discovery failed: too many errors querying DNS SRV records") - return "", err - } - return strings.Join(stringParts, ","), nil -} diff --git a/github.com/coreos/etcd/e2e/cluster_direct_test.go b/github.com/coreos/etcd/e2e/cluster_direct_test.go new file mode 100644 index 0000000000..15a16c9257 --- /dev/null +++ b/github.com/coreos/etcd/e2e/cluster_direct_test.go @@ -0,0 +1,21 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !cluster_proxy + +package e2e + +func newEtcdProcess(cfg *etcdServerProcessConfig) (etcdProcess, error) { + return newEtcdServerProcess(cfg) +} diff --git a/github.com/coreos/etcd/e2e/cluster_proxy_test.go b/github.com/coreos/etcd/e2e/cluster_proxy_test.go new file mode 100644 index 0000000000..a2bab6f587 --- /dev/null +++ b/github.com/coreos/etcd/e2e/cluster_proxy_test.go @@ -0,0 +1,278 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build cluster_proxy + +package e2e + +import ( + "fmt" + "net" + "net/url" + "os" + "strconv" + "strings" + + "github.com/coreos/etcd/pkg/expect" +) + +type proxyEtcdProcess struct { + etcdProc etcdProcess + proxyV2 *proxyV2Proc + proxyV3 *proxyV3Proc +} + +func newEtcdProcess(cfg *etcdServerProcessConfig) (etcdProcess, error) { + return newProxyEtcdProcess(cfg) +} + +func newProxyEtcdProcess(cfg *etcdServerProcessConfig) (*proxyEtcdProcess, error) { + ep, err := newEtcdServerProcess(cfg) + if err != nil { + return nil, err + } + pep := &proxyEtcdProcess{ + etcdProc: ep, + proxyV2: newProxyV2Proc(cfg), + proxyV3: newProxyV3Proc(cfg), + } + return pep, nil +} + +func (p *proxyEtcdProcess) Config() *etcdServerProcessConfig { return p.etcdProc.Config() } + +func (p *proxyEtcdProcess) EndpointsV2() []string { return p.proxyV2.endpoints() } +func (p *proxyEtcdProcess) EndpointsV3() []string { return p.proxyV3.endpoints() } + +func (p *proxyEtcdProcess) Start() error { + if err := p.etcdProc.Start(); err != nil { + return err + } + if err := p.proxyV2.Start(); err != nil { + return err + } + return p.proxyV3.Start() +} + +func (p *proxyEtcdProcess) Restart() error { + if err := p.etcdProc.Restart(); err != nil { + return err + } + if err := p.proxyV2.Restart(); err != nil { + return err + } + return p.proxyV3.Restart() +} + +func (p *proxyEtcdProcess) Stop() error { + err := p.proxyV2.Stop() + if v3err := p.proxyV3.Stop(); err == nil { + err = v3err + } + if eerr := p.etcdProc.Stop(); eerr != nil && err == nil { + // fails on go-grpc issue #1384 + if !strings.Contains(eerr.Error(), "exit status 2") { + err = eerr + } + } + return err +} + +func (p *proxyEtcdProcess) Close() error { + err := p.proxyV2.Close() + if v3err := p.proxyV3.Close(); err == nil { + err = v3err + } + if eerr := p.etcdProc.Close(); eerr != nil && err == nil { + // fails on go-grpc issue #1384 + if !strings.Contains(eerr.Error(), "exit status 2") { + err = eerr + } + } + return err +} + +func (p *proxyEtcdProcess) WithStopSignal(sig os.Signal) os.Signal { + p.proxyV3.WithStopSignal(sig) + p.proxyV3.WithStopSignal(sig) + return p.etcdProc.WithStopSignal(sig) +} + +type proxyProc struct { + execPath string + args []string + ep string + donec chan struct{} + + proc *expect.ExpectProcess +} + +func (pp *proxyProc) endpoints() []string { return []string{pp.ep} } + +func (pp *proxyProc) start() error { + if pp.proc != nil { + panic("already started") + } + proc, err := spawnCmd(append([]string{pp.execPath}, pp.args...)) + if err != nil { + return err + } + pp.proc = proc + return nil +} + +func (pp *proxyProc) waitReady(readyStr string) error { + defer close(pp.donec) + return waitReadyExpectProc(pp.proc, []string{readyStr}) +} + +func (pp *proxyProc) Stop() error { + if pp.proc == nil { + return nil + } + if err := pp.proc.Stop(); err != nil && !strings.Contains(err.Error(), "exit status 1") { + // v2proxy exits with status 1 on auto tls; not sure why + return err + } + pp.proc = nil + <-pp.donec + pp.donec = make(chan struct{}) + return nil +} + +func (pp *proxyProc) WithStopSignal(sig os.Signal) os.Signal { + ret := pp.proc.StopSignal + pp.proc.StopSignal = sig + return ret +} + +func (pp *proxyProc) Close() error { return pp.Stop() } + +type proxyV2Proc struct { + proxyProc + dataDir string +} + +func proxyListenURL(cfg *etcdServerProcessConfig, portOffset int) string { + u, err := url.Parse(cfg.acurl) + if err != nil { + panic(err) + } + host, port, _ := net.SplitHostPort(u.Host) + p, _ := strconv.ParseInt(port, 10, 16) + u.Host = fmt.Sprintf("%s:%d", host, int(p)+portOffset) + return u.String() +} + +func newProxyV2Proc(cfg *etcdServerProcessConfig) *proxyV2Proc { + listenAddr := proxyListenURL(cfg, 2) + name := fmt.Sprintf("testname-proxy-%p", cfg) + args := []string{ + "--name", name, + "--proxy", "on", + "--listen-client-urls", listenAddr, + "--initial-cluster", cfg.name + "=" + cfg.purl.String(), + } + return &proxyV2Proc{ + proxyProc{ + execPath: cfg.execPath, + args: append(args, cfg.tlsArgs...), + ep: listenAddr, + donec: make(chan struct{}), + }, + name + ".etcd", + } +} + +func (v2p *proxyV2Proc) Start() error { + os.RemoveAll(v2p.dataDir) + if err := v2p.start(); err != nil { + return err + } + return v2p.waitReady("httpproxy: endpoints found") +} + +func (v2p *proxyV2Proc) Restart() error { + if err := v2p.Stop(); err != nil { + return err + } + return v2p.Start() +} + +func (v2p *proxyV2Proc) Stop() error { + if err := v2p.proxyProc.Stop(); err != nil { + return err + } + // v2 proxy caches members; avoid reuse of directory + return os.RemoveAll(v2p.dataDir) +} + +type proxyV3Proc struct { + proxyProc +} + +func newProxyV3Proc(cfg *etcdServerProcessConfig) *proxyV3Proc { + listenAddr := proxyListenURL(cfg, 3) + args := []string{ + "grpc-proxy", + "start", + "--listen-addr", strings.Split(listenAddr, "/")[2], + "--endpoints", cfg.acurl, + // pass-through member RPCs + "--advertise-client-url", "", + } + tlsArgs := []string{} + for i := 0; i < len(cfg.tlsArgs); i++ { + switch cfg.tlsArgs[i] { + case "--cert-file": + tlsArgs = append(tlsArgs, "--cert", cfg.tlsArgs[i+1], "--cert-file", cfg.tlsArgs[i+1]) + i++ + case "--key-file": + tlsArgs = append(tlsArgs, "--key", cfg.tlsArgs[i+1], "--key-file", cfg.tlsArgs[i+1]) + i++ + case "--ca-file": + tlsArgs = append(tlsArgs, "--cacert", cfg.tlsArgs[i+1], "--trusted-ca-file", cfg.tlsArgs[i+1]) + i++ + case "--auto-tls": + tlsArgs = append(tlsArgs, "--auto-tls", "--insecure-skip-tls-verify") + case "--peer-ca-file", "--peer-cert-file", "--peer-key-file": + i++ // skip arg + case "--client-cert-auth", "--peer-auto-tls": + default: + tlsArgs = append(tlsArgs, cfg.tlsArgs[i]) + } + } + return &proxyV3Proc{ + proxyProc{ + execPath: cfg.execPath, + args: append(args, tlsArgs...), + ep: listenAddr, + donec: make(chan struct{}), + }, + } +} + +func (v3p *proxyV3Proc) Restart() error { + if err := v3p.Stop(); err != nil { + return err + } + return v3p.Start() +} + +func (v3p *proxyV3Proc) Start() error { + if err := v3p.start(); err != nil { + return err + } + return v3p.waitReady("listening for grpc-proxy client requests") +} diff --git a/github.com/coreos/etcd/e2e/etcd_test.go b/github.com/coreos/etcd/e2e/cluster_test.go similarity index 50% rename from github.com/coreos/etcd/e2e/etcd_test.go rename to github.com/coreos/etcd/e2e/cluster_test.go index c4efb3f8c2..ebd2c265d7 100644 --- a/github.com/coreos/etcd/e2e/etcd_test.go +++ b/github.com/coreos/etcd/e2e/cluster_test.go @@ -22,20 +22,10 @@ import ( "strings" "github.com/coreos/etcd/etcdserver" - "github.com/coreos/etcd/pkg/expect" - "github.com/coreos/etcd/pkg/fileutil" ) const etcdProcessBasePort = 20000 -var ( - binPath string - ctlBinPath string - certPath string - privateKeyPath string - caPath string -) - type clientConnType int const ( @@ -47,7 +37,6 @@ const ( var ( configNoTLS = etcdProcessClusterConfig{ clusterSize: 3, - proxySize: 0, initialToken: "new", } configAutoTLS = etcdProcessClusterConfig{ @@ -58,57 +47,33 @@ var ( } configTLS = etcdProcessClusterConfig{ clusterSize: 3, - proxySize: 0, clientTLS: clientTLS, isPeerTLS: true, initialToken: "new", } configClientTLS = etcdProcessClusterConfig{ clusterSize: 3, - proxySize: 0, clientTLS: clientTLS, initialToken: "new", } configClientBoth = etcdProcessClusterConfig{ clusterSize: 1, - proxySize: 0, clientTLS: clientTLSAndNonTLS, initialToken: "new", } configClientAutoTLS = etcdProcessClusterConfig{ clusterSize: 1, - proxySize: 0, isClientAutoTLS: true, clientTLS: clientTLS, initialToken: "new", } configPeerTLS = etcdProcessClusterConfig{ clusterSize: 3, - proxySize: 0, - isPeerTLS: true, - initialToken: "new", - } - configWithProxy = etcdProcessClusterConfig{ - clusterSize: 3, - proxySize: 1, - initialToken: "new", - } - configWithProxyTLS = etcdProcessClusterConfig{ - clusterSize: 3, - proxySize: 1, - clientTLS: clientTLS, - isPeerTLS: true, - initialToken: "new", - } - configWithProxyPeerTLS = etcdProcessClusterConfig{ - clusterSize: 3, - proxySize: 1, isPeerTLS: true, initialToken: "new", } configClientTLSCertAuth = etcdProcessClusterConfig{ clusterSize: 1, - proxySize: 0, clientTLS: clientTLS, initialToken: "new", clientCertAuthEnabled: true, @@ -123,36 +88,7 @@ func configStandalone(cfg etcdProcessClusterConfig) *etcdProcessClusterConfig { type etcdProcessCluster struct { cfg *etcdProcessClusterConfig - procs []*etcdProcess -} - -type etcdProcess struct { - cfg *etcdProcessConfig - proc *expect.ExpectProcess - donec chan struct{} // closed when Interact() terminates -} - -type etcdProcessConfig struct { - execPath string - args []string - - dataDirPath string - keepDataDir bool - - name string - - purl url.URL - - acurl string - // additional url for tls connection when the etcd process - // serves both http and https - acurltls string - acurlHost string - - initialToken string - initialCluster string - - isProxy bool + procs []etcdProcess } type etcdProcessClusterConfig struct { @@ -165,8 +101,6 @@ type etcdProcessClusterConfig struct { baseScheme string basePort int - proxySize int - snapCount int // default is 10000 clientTLS clientConnType @@ -174,19 +108,21 @@ type etcdProcessClusterConfig struct { isPeerTLS bool isPeerAutoTLS bool isClientAutoTLS bool - forceNewCluster bool - initialToken string - quotaBackendBytes int64 - noStrictReconfig bool + isClientCRL bool + + forceNewCluster bool + initialToken string + quotaBackendBytes int64 + noStrictReconfig bool } // newEtcdProcessCluster launches a new cluster from etcd processes, returning // a new etcdProcessCluster once all nodes are ready to accept client requests. func newEtcdProcessCluster(cfg *etcdProcessClusterConfig) (*etcdProcessCluster, error) { - etcdCfgs := cfg.etcdProcessConfigs() + etcdCfgs := cfg.etcdServerProcessConfigs() epc := &etcdProcessCluster{ cfg: cfg, - procs: make([]*etcdProcess, cfg.clusterSize+cfg.proxySize), + procs: make([]etcdProcess, cfg.clusterSize), } // launch etcd processes @@ -199,38 +135,34 @@ func newEtcdProcessCluster(cfg *etcdProcessClusterConfig) (*etcdProcessCluster, epc.procs[i] = proc } - return epc, epc.Start() + if err := epc.Start(); err != nil { + return nil, err + } + return epc, nil } -func newEtcdProcess(cfg *etcdProcessConfig) (*etcdProcess, error) { - if !fileutil.Exist(cfg.execPath) { - return nil, fmt.Errorf("could not find etcd binary") +func (cfg *etcdProcessClusterConfig) clientScheme() string { + if cfg.clientTLS == clientTLS { + return "https" } + return "http" +} - if !cfg.keepDataDir { - if err := os.RemoveAll(cfg.dataDirPath); err != nil { - return nil, err - } +func (cfg *etcdProcessClusterConfig) peerScheme() string { + peerScheme := cfg.baseScheme + if peerScheme == "" { + peerScheme = "http" } - - child, err := spawnCmd(append([]string{cfg.execPath}, cfg.args...)) - if err != nil { - return nil, err + if cfg.isPeerTLS { + peerScheme += "s" } - return &etcdProcess{cfg: cfg, proc: child, donec: make(chan struct{})}, nil + return peerScheme } -func (cfg *etcdProcessClusterConfig) etcdProcessConfigs() []*etcdProcessConfig { - binPath = binDir + "/etcd" - ctlBinPath = binDir + "/etcdctl" - certPath = certDir + "/server.crt" - privateKeyPath = certDir + "/server.key.insecure" - caPath = certDir + "/ca.crt" - +func (cfg *etcdProcessClusterConfig) etcdServerProcessConfigs() []*etcdServerProcessConfig { if cfg.basePort == 0 { cfg.basePort = etcdProcessBasePort } - if cfg.execPath == "" { cfg.execPath = binPath } @@ -238,29 +170,17 @@ func (cfg *etcdProcessClusterConfig) etcdProcessConfigs() []*etcdProcessConfig { cfg.snapCount = etcdserver.DefaultSnapCount } - clientScheme := "http" - if cfg.clientTLS == clientTLS { - clientScheme = "https" - } - peerScheme := cfg.baseScheme - if peerScheme == "" { - peerScheme = "http" - } - if cfg.isPeerTLS { - peerScheme += "s" - } - - etcdCfgs := make([]*etcdProcessConfig, cfg.clusterSize+cfg.proxySize) + etcdCfgs := make([]*etcdServerProcessConfig, cfg.clusterSize) initialCluster := make([]string, cfg.clusterSize) for i := 0; i < cfg.clusterSize; i++ { var curls []string var curl, curltls string - port := cfg.basePort + 2*i + port := cfg.basePort + 4*i curlHost := fmt.Sprintf("localhost:%d", port) switch cfg.clientTLS { case clientNonTLS, clientTLS: - curl = (&url.URL{Scheme: clientScheme, Host: curlHost}).String() + curl = (&url.URL{Scheme: cfg.clientScheme(), Host: curlHost}).String() curls = []string{curl} case clientTLSAndNonTLS: curl = (&url.URL{Scheme: "http", Host: curlHost}).String() @@ -268,7 +188,7 @@ func (cfg *etcdProcessClusterConfig) etcdProcessConfigs() []*etcdProcessConfig { curls = []string{curl, curltls} } - purl := url.URL{Scheme: peerScheme, Host: fmt.Sprintf("localhost:%d", port+1)} + purl := url.URL{Scheme: cfg.peerScheme(), Host: fmt.Sprintf("localhost:%d", port+1)} name := fmt.Sprintf("testname%d", i) dataDirPath := cfg.dataDirPath if cfg.dataDirPath == "" { @@ -303,46 +223,18 @@ func (cfg *etcdProcessClusterConfig) etcdProcessConfigs() []*etcdProcessConfig { } args = append(args, cfg.tlsArgs()...) - etcdCfgs[i] = &etcdProcessConfig{ + etcdCfgs[i] = &etcdServerProcessConfig{ execPath: cfg.execPath, args: args, + tlsArgs: cfg.tlsArgs(), dataDirPath: dataDirPath, keepDataDir: cfg.keepDataDir, name: name, purl: purl, acurl: curl, - acurltls: curltls, - acurlHost: curlHost, initialToken: cfg.initialToken, } } - for i := 0; i < cfg.proxySize; i++ { - port := cfg.basePort + 2*cfg.clusterSize + i + 1 - curlHost := fmt.Sprintf("localhost:%d", port) - curl := url.URL{Scheme: clientScheme, Host: curlHost} - name := fmt.Sprintf("testname-proxy%d", i) - dataDirPath, derr := ioutil.TempDir("", name+".etcd") - if derr != nil { - panic("could not get tempdir for datadir") - } - args := []string{ - "--name", name, - "--proxy", "on", - "--listen-client-urls", curl.String(), - "--data-dir", dataDirPath, - } - args = append(args, cfg.tlsArgs()...) - etcdCfgs[cfg.clusterSize+i] = &etcdProcessConfig{ - execPath: cfg.execPath, - args: args, - dataDirPath: dataDirPath, - keepDataDir: cfg.keepDataDir, - name: name, - acurl: curl.String(), - acurlHost: curlHost, - isProxy: true, - } - } initialClusterArgs := []string{"--initial-cluster", strings.Join(initialCluster, ",")} for i := range etcdCfgs { @@ -356,7 +248,7 @@ func (cfg *etcdProcessClusterConfig) etcdProcessConfigs() []*etcdProcessConfig { func (cfg *etcdProcessClusterConfig) tlsArgs() (args []string) { if cfg.clientTLS != clientNonTLS { if cfg.isClientAutoTLS { - args = append(args, "--auto-tls=true") + args = append(args, "--auto-tls") } else { tlsClientArgs := []string{ "--cert-file", certPath, @@ -373,7 +265,7 @@ func (cfg *etcdProcessClusterConfig) tlsArgs() (args []string) { if cfg.isPeerTLS { if cfg.isPeerAutoTLS { - args = append(args, "--peer-auto-tls=true") + args = append(args, "--peer-auto-tls") } else { tlsPeerArgs := []string{ "--peer-cert-file", certPath, @@ -383,13 +275,41 @@ func (cfg *etcdProcessClusterConfig) tlsArgs() (args []string) { args = append(args, tlsPeerArgs...) } } + + if cfg.isClientCRL { + args = append(args, "--client-crl-file", crlPath, "--client-cert-auth") + } + return args } -func (epc *etcdProcessCluster) Start() (err error) { - readyC := make(chan error, epc.cfg.clusterSize+epc.cfg.proxySize) +func (epc *etcdProcessCluster) EndpointsV2() []string { + return epc.endpoints(func(ep etcdProcess) []string { return ep.EndpointsV2() }) +} + +func (epc *etcdProcessCluster) EndpointsV3() []string { + return epc.endpoints(func(ep etcdProcess) []string { return ep.EndpointsV3() }) +} + +func (epc *etcdProcessCluster) endpoints(f func(ep etcdProcess) []string) (ret []string) { + for _, p := range epc.procs { + ret = append(ret, f(p)...) + } + return ret +} + +func (epc *etcdProcessCluster) Start() error { + return epc.start(func(ep etcdProcess) error { return ep.Start() }) +} + +func (epc *etcdProcessCluster) Restart() error { + return epc.start(func(ep etcdProcess) error { return ep.Restart() }) +} + +func (epc *etcdProcessCluster) start(f func(ep etcdProcess) error) error { + readyC := make(chan error, len(epc.procs)) for i := range epc.procs { - go func(n int) { readyC <- epc.procs[n].waitReady() }(i) + go func(n int) { readyC <- f(epc.procs[n]) }(i) } for range epc.procs { if err := <-readyC; err != nil { @@ -400,19 +320,7 @@ func (epc *etcdProcessCluster) Start() (err error) { return nil } -func (epc *etcdProcessCluster) RestartAll() error { - for i := range epc.procs { - proc, err := newEtcdProcess(epc.procs[i].cfg) - if err != nil { - epc.Close() - return err - } - epc.procs[i] = proc - } - return epc.Start() -} - -func (epc *etcdProcessCluster) StopAll() (err error) { +func (epc *etcdProcessCluster) Stop() (err error) { for _, p := range epc.procs { if p == nil { continue @@ -429,127 +337,23 @@ func (epc *etcdProcessCluster) StopAll() (err error) { } func (epc *etcdProcessCluster) Close() error { - err := epc.StopAll() + err := epc.Stop() for _, p := range epc.procs { // p is nil when newEtcdProcess fails in the middle // Close still gets called to clean up test data if p == nil { continue } - os.RemoveAll(p.cfg.dataDirPath) - } - return err -} - -func (ep *etcdProcess) Restart() error { - newEp, err := newEtcdProcess(ep.cfg) - if err != nil { - ep.Stop() - return err - } - *ep = *newEp - if err = ep.waitReady(); err != nil { - ep.Stop() - return err - } - return nil -} - -func (ep *etcdProcess) Stop() error { - if ep == nil { - return nil - } - if err := ep.proc.Stop(); err != nil { - return err - } - <-ep.donec - - if ep.cfg.purl.Scheme == "unix" || ep.cfg.purl.Scheme == "unixs" { - os.Remove(ep.cfg.purl.Host + ep.cfg.purl.Path) - } - return nil -} - -func (ep *etcdProcess) waitReady() error { - defer close(ep.donec) - return waitReadyExpectProc(ep.proc, ep.cfg.isProxy) -} - -func waitReadyExpectProc(exproc *expect.ExpectProcess, isProxy bool) error { - readyStrs := []string{"enabled capabilities for version", "published"} - if isProxy { - readyStrs = []string{"httpproxy: endpoints found"} - } - c := 0 - matchSet := func(l string) bool { - for _, s := range readyStrs { - if strings.Contains(l, s) { - c++ - break - } + if cerr := p.Close(); cerr != nil { + err = cerr } - return c == len(readyStrs) } - _, err := exproc.ExpectFunc(matchSet) return err } -func spawnWithExpect(args []string, expected string) error { - return spawnWithExpects(args, []string{expected}...) -} - -func spawnWithExpects(args []string, xs ...string) error { - proc, err := spawnCmd(args) - if err != nil { - return err - } - // process until either stdout or stderr contains - // the expected string - var ( - lines []string - lineFunc = func(txt string) bool { return true } - ) - for _, txt := range xs { - for { - l, lerr := proc.ExpectFunc(lineFunc) - if lerr != nil { - proc.Close() - return fmt.Errorf("%v (expected %q, got %q)", lerr, txt, lines) - } - lines = append(lines, l) - if strings.Contains(l, txt) { - break - } - } - } - perr := proc.Close() - if len(xs) == 0 && proc.LineCount() != noOutputLineCount { // expect no output - return fmt.Errorf("unexpected output (got lines %q, line count %d)", lines, proc.LineCount()) - } - return perr -} - -// proxies returns only the proxy etcdProcess. -func (epc *etcdProcessCluster) proxies() []*etcdProcess { - return epc.procs[epc.cfg.clusterSize:] -} - -func (epc *etcdProcessCluster) processes() []*etcdProcess { - return epc.procs[:epc.cfg.clusterSize] -} - -func (epc *etcdProcessCluster) endpoints() []string { - eps := make([]string, epc.cfg.clusterSize) - for i, ep := range epc.processes() { - eps[i] = ep.cfg.acurl - } - return eps -} - -func (epc *etcdProcessCluster) grpcEndpoints() []string { - eps := make([]string, epc.cfg.clusterSize) - for i, ep := range epc.processes() { - eps[i] = ep.cfg.acurlHost +func (epc *etcdProcessCluster) WithStopSignal(sig os.Signal) (ret os.Signal) { + for _, p := range epc.procs { + ret = p.WithStopSignal(sig) } - return eps + return ret } diff --git a/github.com/coreos/etcd/e2e/ctl_v2_test.go b/github.com/coreos/etcd/e2e/ctl_v2_test.go index 294a478f0d..f986eb1f31 100644 --- a/github.com/coreos/etcd/e2e/ctl_v2_test.go +++ b/github.com/coreos/etcd/e2e/ctl_v2_test.go @@ -128,10 +128,9 @@ func testCtlV2Ls(t *testing.T, cfg *etcdProcessClusterConfig, quorum bool) { } } -func TestCtlV2Watch(t *testing.T) { testCtlV2Watch(t, &configNoTLS, false) } -func TestCtlV2WatchTLS(t *testing.T) { testCtlV2Watch(t, &configTLS, false) } -func TestCtlV2WatchWithProxy(t *testing.T) { testCtlV2Watch(t, &configWithProxy, false) } -func TestCtlV2WatchWithProxyNoSync(t *testing.T) { testCtlV2Watch(t, &configWithProxy, true) } +func TestCtlV2Watch(t *testing.T) { testCtlV2Watch(t, &configNoTLS, false) } +func TestCtlV2WatchTLS(t *testing.T) { testCtlV2Watch(t, &configTLS, false) } + func testCtlV2Watch(t *testing.T, cfg *etcdProcessClusterConfig, noSync bool) { defer testutil.AfterTest(t) @@ -158,12 +157,10 @@ func testCtlV2Watch(t *testing.T, cfg *etcdProcessClusterConfig, noSync bool) { } } -func TestCtlV2GetRoleUser(t *testing.T) { testCtlV2GetRoleUser(t, &configNoTLS) } -func TestCtlV2GetRoleUserWithProxy(t *testing.T) { testCtlV2GetRoleUser(t, &configWithProxy) } -func testCtlV2GetRoleUser(t *testing.T, cfg *etcdProcessClusterConfig) { +func TestCtlV2GetRoleUser(t *testing.T) { defer testutil.AfterTest(t) - epc := setupEtcdctlTest(t, cfg, true) + epc := setupEtcdctlTest(t, &configNoTLS, false) defer func() { if err := epc.Close(); err != nil { t.Fatalf("error closing etcd processes (%v)", err) @@ -196,7 +193,7 @@ func TestCtlV2UserListRoot(t *testing.T) { testCtlV2UserList(t, "root") } func testCtlV2UserList(t *testing.T, username string) { defer testutil.AfterTest(t) - epc := setupEtcdctlTest(t, &configWithProxy, false) + epc := setupEtcdctlTest(t, &configNoTLS, false) defer func() { if err := epc.Close(); err != nil { t.Fatalf("error closing etcd processes (%v)", err) @@ -214,7 +211,7 @@ func testCtlV2UserList(t *testing.T, username string) { func TestCtlV2RoleList(t *testing.T) { defer testutil.AfterTest(t) - epc := setupEtcdctlTest(t, &configWithProxy, false) + epc := setupEtcdctlTest(t, &configNoTLS, false) defer func() { if err := epc.Close(); err != nil { t.Fatalf("error closing etcd processes (%v)", err) @@ -243,7 +240,7 @@ func TestCtlV2Backup(t *testing.T) { // For https://github.com/coreos/etcd/issue t.Fatal(err) } - if err := etcdctlBackup(epc1, epc1.procs[0].cfg.dataDirPath, backupDir); err != nil { + if err := etcdctlBackup(epc1, epc1.procs[0].Config().dataDirPath, backupDir); err != nil { t.Fatal(err) } @@ -321,31 +318,36 @@ func TestCtlV2ClusterHealth(t *testing.T) { } }() - // has quorum + // all members available if err := etcdctlClusterHealth(epc, "cluster is healthy"); err != nil { t.Fatalf("cluster-health expected to be healthy (%v)", err) } - // cut quorum + // missing members, has quorum epc.procs[0].Stop() + + for i := 0; i < 3; i++ { + err := etcdctlClusterHealth(epc, "cluster is degraded") + if err == nil { + break + } else if i == 2 { + t.Fatalf("cluster-health expected to be degraded (%v)", err) + } + // possibly no leader yet; retry + time.Sleep(time.Second) + } + + // no quorum epc.procs[1].Stop() - if err := etcdctlClusterHealth(epc, "cluster is unhealthy"); err != nil { - t.Fatalf("cluster-health expected to be unhealthy (%v)", err) + if err := etcdctlClusterHealth(epc, "cluster is unavailable"); err != nil { + t.Fatalf("cluster-health expected to be unavailable (%v)", err) } + epc.procs[0], epc.procs[1] = nil, nil } func etcdctlPrefixArgs(clus *etcdProcessCluster) []string { - endpoints := "" - if proxies := clus.proxies(); len(proxies) != 0 { - endpoints = proxies[0].cfg.acurl - } else if processes := clus.processes(); len(processes) != 0 { - es := []string{} - for _, b := range processes { - es = append(es, b.cfg.acurl) - } - endpoints = strings.Join(es, ",") - } + endpoints := strings.Join(clus.EndpointsV2(), ",") cmdArgs := []string{ctlBinPath, "--endpoints", endpoints} if clus.cfg.clientTLS == clientTLS { cmdArgs = append(cmdArgs, "--ca-file", caPath, "--cert-file", certPath, "--key-file", privateKeyPath) diff --git a/github.com/coreos/etcd/e2e/ctl_v3_alarm_test.go b/github.com/coreos/etcd/e2e/ctl_v3_alarm_test.go index a8555400da..50baae5e9e 100644 --- a/github.com/coreos/etcd/e2e/ctl_v3_alarm_test.go +++ b/github.com/coreos/etcd/e2e/ctl_v3_alarm_test.go @@ -52,6 +52,11 @@ func alarmTest(cx ctlCtx) { cx.t.Fatal(err) } + // '/health' handler should return 'false' + if err := cURLGet(cx.epc, cURLReq{endpoint: "/health", expected: `{"health": "false"}`}); err != nil { + cx.t.Fatalf("failed get with curl (%v)", err) + } + // check that Put is rejected when alarm is on if err := ctlV3Put(cx, "3rd_test", smallbuf, ""); err != nil { if !strings.Contains(err.Error(), "etcdserver: mvcc: database space exceeded") { @@ -59,7 +64,7 @@ func alarmTest(cx ctlCtx) { } } - eps := cx.epc.grpcEndpoints() + eps := cx.epc.EndpointsV3() // get latest revision to compact cli, err := clientv3.New(clientv3.Config{ diff --git a/github.com/coreos/etcd/e2e/ctl_v3_auth_test.go b/github.com/coreos/etcd/e2e/ctl_v3_auth_test.go index 27207d8ac5..e8fc31f81d 100644 --- a/github.com/coreos/etcd/e2e/ctl_v3_auth_test.go +++ b/github.com/coreos/etcd/e2e/ctl_v3_auth_test.go @@ -12,10 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Skip proxy tests for now since auth is broken on grpcproxy. +// +build !cluster_proxy + package e2e import ( "fmt" + "os" "testing" "github.com/coreos/etcd/clientv3" @@ -38,6 +42,17 @@ func TestCtlV3AuthCertCN(t *testing.T) { testCtl(t, authTestCertCN, wi func TestCtlV3AuthRevokeWithDelete(t *testing.T) { testCtl(t, authTestRevokeWithDelete) } func TestCtlV3AuthInvalidMgmt(t *testing.T) { testCtl(t, authTestInvalidMgmt) } func TestCtlV3AuthFromKeyPerm(t *testing.T) { testCtl(t, authTestFromKeyPerm) } +func TestCtlV3AuthAndWatch(t *testing.T) { testCtl(t, authTestWatch) } + +func TestCtlV3AuthRoleGet(t *testing.T) { testCtl(t, authTestRoleGet) } +func TestCtlV3AuthUserGet(t *testing.T) { testCtl(t, authTestUserGet) } +func TestCtlV3AuthRoleList(t *testing.T) { testCtl(t, authTestRoleList) } + +func TestCtlV3AuthDefrag(t *testing.T) { testCtl(t, authTestDefrag) } +func TestCtlV3AuthEndpointHealth(t *testing.T) { + testCtl(t, authTestEndpointHealth, withQuorum()) +} +func TestCtlV3AuthSnapshot(t *testing.T) { testCtl(t, authTestSnapshot) } func authEnableTest(cx ctlCtx) { if err := authEnable(cx); err != nil { @@ -88,9 +103,9 @@ func authDisableTest(cx ctlCtx) { cx.t.Fatalf("authDisableTest ctlV3AuthDisable error (%v)", err) } - // now auth fails unconditionally, note that failed RPC is Authenticate(), not Put() + // now ErrAuthNotEnabled of Authenticate() is simply ignored cx.user, cx.pass = "test-user", "pass" - if err := ctlV3PutFailAuthDisabled(cx, "hoo", "bar"); err != nil { + if err := ctlV3Put(cx, "hoo", "bar", ""); err != nil { cx.t.Fatal(err) } @@ -330,10 +345,6 @@ func ctlV3PutFailPerm(cx ctlCtx, key, val string) error { return spawnWithExpect(append(cx.PrefixArgs(), "put", key, val), "permission denied") } -func ctlV3PutFailAuthDisabled(cx ctlCtx, key, val string) error { - return spawnWithExpect(append(cx.PrefixArgs(), "put", key, val), "authentication is not enabled") -} - func authSetupTestUser(cx ctlCtx) { if err := ctlV3User(cx, []string{"add", "test-user", "--interactive=false"}, "User test-user created", []string{"pass"}); err != nil { cx.t.Fatal(err) @@ -402,7 +413,7 @@ func authTestTxn(cx ctlCtx) { compare: []string{`version("c1") = "1"`}, ifSucess: []string{"get s2"}, ifFail: []string{"get f2"}, - results: []string{"Error: etcdserver: permission denied"}, + results: []string{"Error: etcdserver: permission denied"}, } if err := ctlV3Txn(cx, rqs); err != nil { cx.t.Fatal(err) @@ -413,7 +424,7 @@ func authTestTxn(cx ctlCtx) { compare: []string{`version("c2") = "1"`}, ifSucess: []string{"get s1"}, ifFail: []string{"get f2"}, - results: []string{"Error: etcdserver: permission denied"}, + results: []string{"Error: etcdserver: permission denied"}, } if err := ctlV3Txn(cx, rqs); err != nil { cx.t.Fatal(err) @@ -424,7 +435,7 @@ func authTestTxn(cx ctlCtx) { compare: []string{`version("c2") = "1"`}, ifSucess: []string{"get s2"}, ifFail: []string{"get f1"}, - results: []string{"Error: etcdserver: permission denied"}, + results: []string{"Error: etcdserver: permission denied"}, } if err := ctlV3Txn(cx, rqs); err != nil { cx.t.Fatal(err) @@ -606,11 +617,11 @@ func authTestInvalidMgmt(cx ctlCtx) { cx.t.Fatal(err) } - if err := ctlV3Role(cx, []string{"delete", "root"}, "Error: etcdserver: invalid auth management"); err == nil { + if err := ctlV3Role(cx, []string{"delete", "root"}, "Error: etcdserver: invalid auth management"); err == nil { cx.t.Fatal("deleting the role root must not be allowed") } - if err := ctlV3User(cx, []string{"revoke-role", "root", "root"}, "Error: etcdserver: invalid auth management", []string{}); err == nil { + if err := ctlV3User(cx, []string{"revoke-role", "root", "root"}, "Error: etcdserver: invalid auth management", []string{}); err == nil { cx.t.Fatal("revoking the role root from the user root must not be allowed") } } @@ -665,3 +676,242 @@ func authTestFromKeyPerm(cx ctlCtx) { } } } + +func authTestWatch(cx ctlCtx) { + if err := authEnable(cx); err != nil { + cx.t.Fatal(err) + } + + cx.user, cx.pass = "root", "root" + authSetupTestUser(cx) + + // grant a key range + if err := ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "key", "key4", false}); err != nil { + cx.t.Fatal(err) + } + + tests := []struct { + puts []kv + args []string + + wkv []kv + want bool + }{ + { // watch 1 key, should be successful + []kv{{"key", "value"}}, + []string{"key", "--rev", "1"}, + []kv{{"key", "value"}}, + true, + }, + { // watch 3 keys by range, should be successful + []kv{{"key1", "val1"}, {"key3", "val3"}, {"key2", "val2"}}, + []string{"key", "key3", "--rev", "1"}, + []kv{{"key1", "val1"}, {"key2", "val2"}}, + true, + }, + + { // watch 1 key, should not be successful + []kv{}, + []string{"key5", "--rev", "1"}, + []kv{}, + false, + }, + { // watch 3 keys by range, should not be successful + []kv{}, + []string{"key", "key6", "--rev", "1"}, + []kv{}, + false, + }, + } + + cx.user, cx.pass = "test-user", "pass" + for i, tt := range tests { + donec := make(chan struct{}) + go func(i int, puts []kv) { + defer close(donec) + for j := range puts { + if err := ctlV3Put(cx, puts[j].key, puts[j].val, ""); err != nil { + cx.t.Fatalf("watchTest #%d-%d: ctlV3Put error (%v)", i, j, err) + } + } + }(i, tt.puts) + + var err error + if tt.want { + err = ctlV3Watch(cx, tt.args, tt.wkv...) + } else { + err = ctlV3WatchFailPerm(cx, tt.args) + } + + if err != nil { + if cx.dialTimeout > 0 && !isGRPCTimedout(err) { + cx.t.Errorf("watchTest #%d: ctlV3Watch error (%v)", i, err) + } + } + + <-donec + } + +} + +func authTestRoleGet(cx ctlCtx) { + if err := authEnable(cx); err != nil { + cx.t.Fatal(err) + } + cx.user, cx.pass = "root", "root" + authSetupTestUser(cx) + + expected := []string{ + "Role test-role", + "KV Read:", "foo", + "KV Write:", "foo", + } + if err := spawnWithExpects(append(cx.PrefixArgs(), "role", "get", "test-role"), expected...); err != nil { + cx.t.Fatal(err) + } + + // test-user can get the information of test-role because it belongs to the role + cx.user, cx.pass = "test-user", "pass" + if err := spawnWithExpects(append(cx.PrefixArgs(), "role", "get", "test-role"), expected...); err != nil { + cx.t.Fatal(err) + } + + // test-user cannot get the information of root because it doesn't belong to the role + expected = []string{ + "Error: etcdserver: permission denied", + } + if err := spawnWithExpects(append(cx.PrefixArgs(), "role", "get", "root"), expected...); err != nil { + cx.t.Fatal(err) + } +} + +func authTestUserGet(cx ctlCtx) { + if err := authEnable(cx); err != nil { + cx.t.Fatal(err) + } + cx.user, cx.pass = "root", "root" + authSetupTestUser(cx) + + expected := []string{ + "User: test-user", + "Roles: test-role", + } + + if err := spawnWithExpects(append(cx.PrefixArgs(), "user", "get", "test-user"), expected...); err != nil { + cx.t.Fatal(err) + } + + // test-user can get the information of test-user itself + cx.user, cx.pass = "test-user", "pass" + if err := spawnWithExpects(append(cx.PrefixArgs(), "user", "get", "test-user"), expected...); err != nil { + cx.t.Fatal(err) + } + + // test-user cannot get the information of root + expected = []string{ + "Error: etcdserver: permission denied", + } + if err := spawnWithExpects(append(cx.PrefixArgs(), "user", "get", "root"), expected...); err != nil { + cx.t.Fatal(err) + } +} + +func authTestRoleList(cx ctlCtx) { + if err := authEnable(cx); err != nil { + cx.t.Fatal(err) + } + cx.user, cx.pass = "root", "root" + authSetupTestUser(cx) + if err := spawnWithExpect(append(cx.PrefixArgs(), "role", "list"), "test-role"); err != nil { + cx.t.Fatal(err) + } +} + +func authTestDefrag(cx ctlCtx) { + maintenanceInitKeys(cx) + + if err := authEnable(cx); err != nil { + cx.t.Fatal(err) + } + + cx.user, cx.pass = "root", "root" + authSetupTestUser(cx) + + // ordinary user cannot defrag + cx.user, cx.pass = "test-user", "pass" + if err := ctlV3Defrag(cx); err == nil { + cx.t.Fatal("ordinary user should not be able to issue a defrag request") + } + + // root can defrag + cx.user, cx.pass = "root", "root" + if err := ctlV3Defrag(cx); err != nil { + cx.t.Fatal(err) + } +} + +func authTestSnapshot(cx ctlCtx) { + maintenanceInitKeys(cx) + + if err := authEnable(cx); err != nil { + cx.t.Fatal(err) + } + + cx.user, cx.pass = "root", "root" + authSetupTestUser(cx) + + fpath := "test.snapshot" + defer os.RemoveAll(fpath) + + // ordinary user cannot save a snapshot + cx.user, cx.pass = "test-user", "pass" + if err := ctlV3SnapshotSave(cx, fpath); err == nil { + cx.t.Fatal("ordinary user should not be able to save a snapshot") + } + + // root can save a snapshot + cx.user, cx.pass = "root", "root" + if err := ctlV3SnapshotSave(cx, fpath); err != nil { + cx.t.Fatalf("snapshotTest ctlV3SnapshotSave error (%v)", err) + } + + st, err := getSnapshotStatus(cx, fpath) + if err != nil { + cx.t.Fatalf("snapshotTest getSnapshotStatus error (%v)", err) + } + if st.Revision != 4 { + cx.t.Fatalf("expected 4, got %d", st.Revision) + } + if st.TotalKey < 3 { + cx.t.Fatalf("expected at least 3, got %d", st.TotalKey) + } +} + +func authTestEndpointHealth(cx ctlCtx) { + if err := authEnable(cx); err != nil { + cx.t.Fatal(err) + } + + cx.user, cx.pass = "root", "root" + authSetupTestUser(cx) + + if err := ctlV3EndpointHealth(cx); err != nil { + cx.t.Fatalf("endpointStatusTest ctlV3EndpointHealth error (%v)", err) + } + + // health checking with an ordinary user "succeeds" since permission denial goes through consensus + cx.user, cx.pass = "test-user", "pass" + if err := ctlV3EndpointHealth(cx); err != nil { + cx.t.Fatalf("endpointStatusTest ctlV3EndpointHealth error (%v)", err) + } + + // succeed if permissions granted for ordinary user + cx.user, cx.pass = "root", "root" + if err := ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "health", "", false}); err != nil { + cx.t.Fatal(err) + } + cx.user, cx.pass = "test-user", "pass" + if err := ctlV3EndpointHealth(cx); err != nil { + cx.t.Fatalf("endpointStatusTest ctlV3EndpointHealth error (%v)", err) + } +} diff --git a/github.com/coreos/etcd/e2e/ctl_v3_defrag_test.go b/github.com/coreos/etcd/e2e/ctl_v3_defrag_test.go index cc197d3628..64c3bb9f0c 100644 --- a/github.com/coreos/etcd/e2e/ctl_v3_defrag_test.go +++ b/github.com/coreos/etcd/e2e/ctl_v3_defrag_test.go @@ -16,8 +16,7 @@ package e2e import "testing" -func TestCtlV3Defrag(t *testing.T) { testCtl(t, defragTest) } -func TestCtlV3DefragWithAuth(t *testing.T) { testCtl(t, defragTestWithAuth) } +func TestCtlV3Defrag(t *testing.T) { testCtl(t, defragTest) } func maintenanceInitKeys(cx ctlCtx) { var kvs = []kv{{"key", "val1"}, {"key", "val2"}, {"key", "val3"}} @@ -40,29 +39,6 @@ func defragTest(cx ctlCtx) { } } -func defragTestWithAuth(cx ctlCtx) { - maintenanceInitKeys(cx) - - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - // ordinary user cannot defrag - cx.user, cx.pass = "test-user", "pass" - if err := ctlV3Defrag(cx); err == nil { - cx.t.Fatal("ordinary user should not be able to issue a defrag request") - } - - // root can defrag - cx.user, cx.pass = "root", "root" - if err := ctlV3Defrag(cx); err != nil { - cx.t.Fatal(err) - } -} - func ctlV3Defrag(cx ctlCtx) error { cmdArgs := append(cx.PrefixArgs(), "defrag") lines := make([]string, cx.epc.cfg.clusterSize) diff --git a/github.com/coreos/etcd/e2e/ctl_v3_elect_test.go b/github.com/coreos/etcd/e2e/ctl_v3_elect_test.go index 3da5427809..410c00f813 100644 --- a/github.com/coreos/etcd/e2e/ctl_v3_elect_test.go +++ b/github.com/coreos/etcd/e2e/ctl_v3_elect_test.go @@ -23,9 +23,19 @@ import ( "github.com/coreos/etcd/pkg/expect" ) -func TestCtlV3Elect(t *testing.T) { testCtl(t, testElect) } +func TestCtlV3Elect(t *testing.T) { + oldenv := os.Getenv("EXPECT_DEBUG") + defer os.Setenv("EXPECT_DEBUG", oldenv) + os.Setenv("EXPECT_DEBUG", "1") + + testCtl(t, testElect) +} func testElect(cx ctlCtx) { + // debugging for #6934 + sig := cx.epc.WithStopSignal(debugLockSignal) + defer cx.epc.WithStopSignal(sig) + name := "a" holder, ch, err := ctlV3Elect(cx, name, "p1") @@ -70,7 +80,7 @@ func testElect(cx ctlCtx) { if err = blocked.Signal(os.Interrupt); err != nil { cx.t.Fatal(err) } - if err = blocked.Close(); err != nil { + if err = closeWithTimeout(blocked, time.Second); err != nil { cx.t.Fatal(err) } @@ -78,7 +88,7 @@ func testElect(cx ctlCtx) { if err = holder.Signal(os.Interrupt); err != nil { cx.t.Fatal(err) } - if err = holder.Close(); err != nil { + if err = closeWithTimeout(holder, time.Second); err != nil { cx.t.Fatal(err) } @@ -102,6 +112,7 @@ func ctlV3Elect(cx ctlCtx, name, proposal string) (*expect.ExpectProcess, <-chan close(outc) return proc, outc, err } + proc.StopSignal = debugLockSignal go func() { s, xerr := proc.ExpectFunc(func(string) bool { return true }) if xerr != nil { diff --git a/github.com/coreos/etcd/e2e/ctl_v3_endpoint_test.go b/github.com/coreos/etcd/e2e/ctl_v3_endpoint_test.go index 3a42c1c9b4..74a2ebb7a1 100644 --- a/github.com/coreos/etcd/e2e/ctl_v3_endpoint_test.go +++ b/github.com/coreos/etcd/e2e/ctl_v3_endpoint_test.go @@ -21,9 +21,6 @@ import ( func TestCtlV3EndpointHealth(t *testing.T) { testCtl(t, endpointHealthTest, withQuorum()) } func TestCtlV3EndpointStatus(t *testing.T) { testCtl(t, endpointStatusTest, withQuorum()) } -func TestCtlV3EndpointHealthWithAuth(t *testing.T) { - testCtl(t, endpointHealthTestWithAuth, withQuorum()) -} func endpointHealthTest(cx ctlCtx) { if err := ctlV3EndpointHealth(cx); err != nil { @@ -49,38 +46,9 @@ func endpointStatusTest(cx ctlCtx) { func ctlV3EndpointStatus(cx ctlCtx) error { cmdArgs := append(cx.PrefixArgs(), "endpoint", "status") var eps []string - for _, ep := range cx.epc.endpoints() { + for _, ep := range cx.epc.EndpointsV3() { u, _ := url.Parse(ep) eps = append(eps, u.Host) } return spawnWithExpects(cmdArgs, eps...) } - -func endpointHealthTestWithAuth(cx ctlCtx) { - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - if err := ctlV3EndpointHealth(cx); err != nil { - cx.t.Fatalf("endpointStatusTest ctlV3EndpointHealth error (%v)", err) - } - - // health checking with an ordinary user "succeeds" since permission denial goes through consensus - cx.user, cx.pass = "test-user", "pass" - if err := ctlV3EndpointHealth(cx); err != nil { - cx.t.Fatalf("endpointStatusTest ctlV3EndpointHealth error (%v)", err) - } - - // succeed if permissions granted for ordinary user - cx.user, cx.pass = "root", "root" - if err := ctlV3RoleGrantPermission(cx, "test-role", grantingPerm{true, true, "health", "", false}); err != nil { - cx.t.Fatal(err) - } - cx.user, cx.pass = "test-user", "pass" - if err := ctlV3EndpointHealth(cx); err != nil { - cx.t.Fatalf("endpointStatusTest ctlV3EndpointHealth error (%v)", err) - } -} diff --git a/github.com/coreos/etcd/e2e/ctl_v3_kv_test.go b/github.com/coreos/etcd/e2e/ctl_v3_kv_test.go index 1fcc38ecce..d05a69ccf4 100644 --- a/github.com/coreos/etcd/e2e/ctl_v3_kv_test.go +++ b/github.com/coreos/etcd/e2e/ctl_v3_kv_test.go @@ -16,6 +16,7 @@ package e2e import ( "fmt" + "strings" "testing" ) @@ -49,6 +50,29 @@ func TestCtlV3DelClientTLS(t *testing.T) { testCtl(t, delTest, withCfg(configCli func TestCtlV3DelPeerTLS(t *testing.T) { testCtl(t, delTest, withCfg(configPeerTLS)) } func TestCtlV3DelTimeout(t *testing.T) { testCtl(t, delTest, withDialTimeout(0)) } +func TestCtlV3GetRevokedCRL(t *testing.T) { + cfg := etcdProcessClusterConfig{ + clusterSize: 1, + initialToken: "new", + clientTLS: clientTLS, + isClientCRL: true, + clientCertAuthEnabled: true, + } + testCtl(t, testGetRevokedCRL, withCfg(cfg)) +} + +func testGetRevokedCRL(cx ctlCtx) { + // test reject + if err := ctlV3Put(cx, "k", "v", ""); err == nil || !strings.Contains(err.Error(), "Error:") { + cx.t.Fatalf("expected reset connection on put, got %v", err) + } + // test accept + cx.epc.cfg.isClientCRL = false + if err := ctlV3Put(cx, "k", "v", ""); err != nil { + cx.t.Fatal(err) + } +} + func putTest(cx ctlCtx) { key, value := "foo", "bar" diff --git a/github.com/coreos/etcd/e2e/ctl_v3_lock_test.go b/github.com/coreos/etcd/e2e/ctl_v3_lock_test.go index e380f7cd84..ddda3fc03c 100644 --- a/github.com/coreos/etcd/e2e/ctl_v3_lock_test.go +++ b/github.com/coreos/etcd/e2e/ctl_v3_lock_test.go @@ -16,16 +16,49 @@ package e2e import ( "os" + "runtime" "strings" + "syscall" "testing" "time" "github.com/coreos/etcd/pkg/expect" ) -func TestCtlV3Lock(t *testing.T) { testCtl(t, testLock) } +// debugLockSignal forces SIGQUIT to debug etcdctl elect and lock failures +var debugLockSignal os.Signal + +func init() { + // hacks to ignore SIGQUIT debugging for some builds + switch { + case os.Getenv("COVERDIR") != "": + // SIGQUIT interferes with coverage collection + debugLockSignal = syscall.SIGTERM + case runtime.GOARCH == "ppc64le": + // ppc64le's signal handling won't kill processes with SIGQUIT + // in the same way as amd64/i386, so processes won't terminate + // as expected. Since this debugging code for CI, just ignore + // ppc64le. + debugLockSignal = syscall.SIGKILL + default: + // stack dumping OK + debugLockSignal = syscall.SIGQUIT + } +} + +func TestCtlV3Lock(t *testing.T) { + oldenv := os.Getenv("EXPECT_DEBUG") + defer os.Setenv("EXPECT_DEBUG", oldenv) + os.Setenv("EXPECT_DEBUG", "1") + + testCtl(t, testLock) +} func testLock(cx ctlCtx) { + // debugging for #6464 + sig := cx.epc.WithStopSignal(debugLockSignal) + defer cx.epc.WithStopSignal(sig) + name := "a" holder, ch, err := ctlV3Lock(cx, name) @@ -70,7 +103,7 @@ func testLock(cx ctlCtx) { if err = blocked.Signal(os.Interrupt); err != nil { cx.t.Fatal(err) } - if err = blocked.Close(); err != nil { + if err = closeWithTimeout(blocked, time.Second); err != nil { cx.t.Fatal(err) } @@ -78,7 +111,7 @@ func testLock(cx ctlCtx) { if err = holder.Signal(os.Interrupt); err != nil { cx.t.Fatal(err) } - if err = holder.Close(); err != nil { + if err = closeWithTimeout(holder, time.Second); err != nil { cx.t.Fatal(err) } @@ -102,6 +135,7 @@ func ctlV3Lock(cx ctlCtx, name string) (*expect.ExpectProcess, <-chan string, er close(outc) return proc, outc, err } + proc.StopSignal = debugLockSignal go func() { s, xerr := proc.ExpectFunc(func(string) bool { return true }) if xerr != nil { diff --git a/github.com/coreos/etcd/e2e/ctl_v3_migrate_test.go b/github.com/coreos/etcd/e2e/ctl_v3_migrate_test.go index 3136c4920f..7252fe0a3a 100644 --- a/github.com/coreos/etcd/e2e/ctl_v3_migrate_test.go +++ b/github.com/coreos/etcd/e2e/ctl_v3_migrate_test.go @@ -48,8 +48,8 @@ func TestCtlV3Migrate(t *testing.T) { } } - dataDir := epc.procs[0].cfg.dataDirPath - if err := epc.StopAll(); err != nil { + dataDir := epc.procs[0].Config().dataDirPath + if err := epc.Stop(); err != nil { t.Fatalf("error closing etcd processes (%v)", err) } @@ -65,8 +65,8 @@ func TestCtlV3Migrate(t *testing.T) { t.Fatal(err) } - epc.procs[0].cfg.keepDataDir = true - if err := epc.RestartAll(); err != nil { + epc.procs[0].Config().keepDataDir = true + if err := epc.Restart(); err != nil { t.Fatal(err) } @@ -75,7 +75,7 @@ func TestCtlV3Migrate(t *testing.T) { t.Fatal(err) } cli, err := clientv3.New(clientv3.Config{ - Endpoints: epc.grpcEndpoints(), + Endpoints: epc.EndpointsV3(), DialTimeout: 3 * time.Second, }) if err != nil { diff --git a/github.com/coreos/etcd/e2e/ctl_v3_move_leader_test.go b/github.com/coreos/etcd/e2e/ctl_v3_move_leader_test.go new file mode 100644 index 0000000000..eb2afda5f0 --- /dev/null +++ b/github.com/coreos/etcd/e2e/ctl_v3_move_leader_test.go @@ -0,0 +1,92 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/pkg/testutil" + "github.com/coreos/etcd/pkg/types" +) + +func TestCtlV3MoveLeader(t *testing.T) { + defer testutil.AfterTest(t) + + epc := setupEtcdctlTest(t, &configNoTLS, true) + defer func() { + if errC := epc.Close(); errC != nil { + t.Fatalf("error closing etcd processes (%v)", errC) + } + }() + + var leadIdx int + var leaderID uint64 + var transferee uint64 + for i, ep := range epc.EndpointsV3() { + cli, err := clientv3.New(clientv3.Config{ + Endpoints: []string{ep}, + DialTimeout: 3 * time.Second, + }) + if err != nil { + t.Fatal(err) + } + resp, err := cli.Status(context.Background(), ep) + if err != nil { + t.Fatal(err) + } + cli.Close() + + if resp.Header.GetMemberId() == resp.Leader { + leadIdx = i + leaderID = resp.Leader + } else { + transferee = resp.Header.GetMemberId() + } + } + + os.Setenv("ETCDCTL_API", "3") + defer os.Unsetenv("ETCDCTL_API") + cx := ctlCtx{ + t: t, + cfg: configNoTLS, + dialTimeout: 7 * time.Second, + epc: epc, + } + + tests := []struct { + prefixes []string + expect string + }{ + { // request to non-leader + cx.prefixArgs([]string{cx.epc.EndpointsV3()[(leadIdx+1)%3]}), + "no leader endpoint given at ", + }, + { // request to leader + cx.prefixArgs([]string{cx.epc.EndpointsV3()[leadIdx]}), + fmt.Sprintf("Leadership transferred from %s to %s", types.ID(leaderID), types.ID(transferee)), + }, + } + for i, tc := range tests { + cmdArgs := append(tc.prefixes, "move-leader", types.ID(transferee).String()) + if err := spawnWithExpect(cmdArgs, tc.expect); err != nil { + t.Fatalf("#%d: %v", i, err) + } + } +} diff --git a/github.com/coreos/etcd/e2e/ctl_v3_snapshot_test.go b/github.com/coreos/etcd/e2e/ctl_v3_snapshot_test.go index d2394885d6..234d5b037d 100644 --- a/github.com/coreos/etcd/e2e/ctl_v3_snapshot_test.go +++ b/github.com/coreos/etcd/e2e/ctl_v3_snapshot_test.go @@ -152,7 +152,7 @@ func TestIssue6361(t *testing.T) { }() dialTimeout := 7 * time.Second - prefixArgs := []string{ctlBinPath, "--endpoints", strings.Join(epc.grpcEndpoints(), ","), "--dial-timeout", dialTimeout.String()} + prefixArgs := []string{ctlBinPath, "--endpoints", strings.Join(epc.EndpointsV3(), ","), "--dial-timeout", dialTimeout.String()} // write some keys kvs := []kv{{"foo1", "val1"}, {"foo2", "val2"}, {"foo3", "val3"}} @@ -170,7 +170,7 @@ func TestIssue6361(t *testing.T) { t.Fatal(err) } - if err = epc.processes()[0].Stop(); err != nil { + if err = epc.procs[0].Stop(); err != nil { t.Fatal(err) } @@ -178,19 +178,19 @@ func TestIssue6361(t *testing.T) { defer os.RemoveAll(newDataDir) // etcdctl restore the snapshot - err = spawnWithExpect([]string{ctlBinPath, "snapshot", "restore", fpath, "--name", epc.procs[0].cfg.name, "--initial-cluster", epc.procs[0].cfg.initialCluster, "--initial-cluster-token", epc.procs[0].cfg.initialToken, "--initial-advertise-peer-urls", epc.procs[0].cfg.purl.String(), "--data-dir", newDataDir}, "membership: added member") + err = spawnWithExpect([]string{ctlBinPath, "snapshot", "restore", fpath, "--name", epc.procs[0].Config().name, "--initial-cluster", epc.procs[0].Config().initialCluster, "--initial-cluster-token", epc.procs[0].Config().initialToken, "--initial-advertise-peer-urls", epc.procs[0].Config().purl.String(), "--data-dir", newDataDir}, "membership: added member") if err != nil { t.Fatal(err) } // start the etcd member using the restored snapshot - epc.procs[0].cfg.dataDirPath = newDataDir - for i := range epc.procs[0].cfg.args { - if epc.procs[0].cfg.args[i] == "--data-dir" { - epc.procs[0].cfg.args[i+1] = newDataDir + epc.procs[0].Config().dataDirPath = newDataDir + for i := range epc.procs[0].Config().args { + if epc.procs[0].Config().args[i] == "--data-dir" { + epc.procs[0].Config().args[i+1] = newDataDir } } - if err = epc.processes()[0].Restart(); err != nil { + if err = epc.procs[0].Restart(); err != nil { t.Fatal(err) } @@ -217,11 +217,11 @@ func TestIssue6361(t *testing.T) { defer os.RemoveAll(newDataDir2) name2 := "infra2" - initialCluster2 := epc.procs[0].cfg.initialCluster + fmt.Sprintf(",%s=%s", name2, peerURL) + initialCluster2 := epc.procs[0].Config().initialCluster + fmt.Sprintf(",%s=%s", name2, peerURL) // start the new member var nepc *expect.ExpectProcess - nepc, err = spawnCmd([]string{epc.procs[0].cfg.execPath, "--name", name2, + nepc, err = spawnCmd([]string{epc.procs[0].Config().execPath, "--name", name2, "--listen-client-urls", clientURL, "--advertise-client-urls", clientURL, "--listen-peer-urls", peerURL, "--initial-advertise-peer-urls", peerURL, "--initial-cluster", initialCluster2, "--initial-cluster-state", "existing", "--data-dir", newDataDir2}) @@ -245,42 +245,3 @@ func TestIssue6361(t *testing.T) { t.Fatal(err) } } - -func TestCtlV3SnapshotWithAuth(t *testing.T) { testCtl(t, snapshotTestWithAuth) } - -func snapshotTestWithAuth(cx ctlCtx) { - maintenanceInitKeys(cx) - - if err := authEnable(cx); err != nil { - cx.t.Fatal(err) - } - - cx.user, cx.pass = "root", "root" - authSetupTestUser(cx) - - fpath := "test.snapshot" - defer os.RemoveAll(fpath) - - // ordinary user cannot save a snapshot - cx.user, cx.pass = "test-user", "pass" - if err := ctlV3SnapshotSave(cx, fpath); err == nil { - cx.t.Fatal("ordinary user should not be able to save a snapshot") - } - - // root can save a snapshot - cx.user, cx.pass = "root", "root" - if err := ctlV3SnapshotSave(cx, fpath); err != nil { - cx.t.Fatalf("snapshotTest ctlV3SnapshotSave error (%v)", err) - } - - st, err := getSnapshotStatus(cx, fpath) - if err != nil { - cx.t.Fatalf("snapshotTest getSnapshotStatus error (%v)", err) - } - if st.Revision != 4 { - cx.t.Fatalf("expected 4, got %d", st.Revision) - } - if st.TotalKey < 3 { - cx.t.Fatalf("expected at least 3, got %d", st.TotalKey) - } -} diff --git a/github.com/coreos/etcd/e2e/ctl_v3_test.go b/github.com/coreos/etcd/e2e/ctl_v3_test.go index a4eab96822..45e2abeecb 100644 --- a/github.com/coreos/etcd/e2e/ctl_v3_test.go +++ b/github.com/coreos/etcd/e2e/ctl_v3_test.go @@ -45,7 +45,7 @@ func TestCtlV3DialWithHTTPScheme(t *testing.T) { } func dialWithSchemeTest(cx ctlCtx) { - cmdArgs := append(cx.prefixArgs(cx.epc.endpoints()), "put", "foo", "bar") + cmdArgs := append(cx.prefixArgs(cx.epc.EndpointsV3()), "put", "foo", "bar") if err := spawnWithExpect(cmdArgs, "OK"); err != nil { cx.t.Fatal(err) } @@ -169,10 +169,6 @@ func testCtl(t *testing.T, testFunc func(ctlCtx), opts ...ctlOption) { } func (cx *ctlCtx) prefixArgs(eps []string) []string { - if len(cx.epc.proxies()) > 0 { // TODO: add proxy check as in v2 - panic("v3 proxy not implemented") - } - fmap := make(map[string]string) fmap["endpoints"] = strings.Join(eps, ",") fmap["dial-timeout"] = cx.dialTimeout.String() @@ -180,6 +176,10 @@ func (cx *ctlCtx) prefixArgs(eps []string) []string { if cx.epc.cfg.isClientAutoTLS { fmap["insecure-transport"] = "false" fmap["insecure-skip-tls-verify"] = "true" + } else if cx.epc.cfg.isClientCRL { + fmap["cacert"] = caPath + fmap["cert"] = revokedCertPath + fmap["key"] = revokedPrivateKeyPath } else { fmap["cacert"] = caPath fmap["cert"] = certPath @@ -208,7 +208,7 @@ func (cx *ctlCtx) prefixArgs(eps []string) []string { // PrefixArgs prefixes etcdctl command. // Make sure to unset environment variables after tests. func (cx *ctlCtx) PrefixArgs() []string { - return cx.prefixArgs(cx.epc.grpcEndpoints()) + return cx.prefixArgs(cx.epc.EndpointsV3()) } func isGRPCTimedout(err error) bool { diff --git a/github.com/coreos/etcd/e2e/ctl_v3_txn_test.go b/github.com/coreos/etcd/e2e/ctl_v3_txn_test.go index c6e0b12de5..960e7c06bd 100644 --- a/github.com/coreos/etcd/e2e/ctl_v3_txn_test.go +++ b/github.com/coreos/etcd/e2e/ctl_v3_txn_test.go @@ -119,7 +119,7 @@ func ctlV3Txn(cx ctlCtx, rqs txnRequests) error { return err } - _, err = proc.Expect("success requests (get, put, delete):") + _, err = proc.Expect("success requests (get, put, del):") if err != nil { return err } @@ -132,7 +132,7 @@ func ctlV3Txn(cx ctlCtx, rqs txnRequests) error { return err } - _, err = proc.Expect("failure requests (get, put, delete):") + _, err = proc.Expect("failure requests (get, put, del):") if err != nil { return err } diff --git a/github.com/coreos/etcd/e2e/ctl_v3_watch_test.go b/github.com/coreos/etcd/e2e/ctl_v3_watch_test.go index e8a892e0ea..bc9d64ac97 100644 --- a/github.com/coreos/etcd/e2e/ctl_v3_watch_test.go +++ b/github.com/coreos/etcd/e2e/ctl_v3_watch_test.go @@ -86,7 +86,7 @@ func watchTest(cx ctlCtx) { } } -func ctlV3Watch(cx ctlCtx, args []string, kvs ...kv) error { +func setupWatchArgs(cx ctlCtx, args []string) []string { cmdArgs := append(cx.PrefixArgs(), "watch") if cx.interactive { cmdArgs = append(cmdArgs, "--interactive") @@ -94,6 +94,12 @@ func ctlV3Watch(cx ctlCtx, args []string, kvs ...kv) error { cmdArgs = append(cmdArgs, args...) } + return cmdArgs +} + +func ctlV3Watch(cx ctlCtx, args []string, kvs ...kv) error { + cmdArgs := setupWatchArgs(cx, args) + proc, err := spawnCmd(cmdArgs) if err != nil { return err @@ -116,3 +122,28 @@ func ctlV3Watch(cx ctlCtx, args []string, kvs ...kv) error { } return proc.Stop() } + +func ctlV3WatchFailPerm(cx ctlCtx, args []string) error { + cmdArgs := setupWatchArgs(cx, args) + + proc, err := spawnCmd(cmdArgs) + if err != nil { + return err + } + + if cx.interactive { + wl := strings.Join(append([]string{"watch"}, args...), " ") + "\r" + if err = proc.Send(wl); err != nil { + return err + } + } + + // TODO(mitake): after printing accurate error message that includes + // "permission denied", the above string argument of proc.Expect() + // should be updated. + _, err = proc.Expect("watch is canceled by the server") + if err != nil { + return err + } + return proc.Close() +} diff --git a/github.com/coreos/etcd/e2e/docker/Dockerfile b/github.com/coreos/etcd/e2e/docker/Dockerfile new file mode 100644 index 0000000000..c94e1612a2 --- /dev/null +++ b/github.com/coreos/etcd/e2e/docker/Dockerfile @@ -0,0 +1,12 @@ +FROM golang:1.8.3-stretch +LABEL Description="Image for etcd DNS testing" +RUN apt update -y +RUN go get github.com/mattn/goreman +RUN apt install -y bind9 +RUN mkdir /var/bind +RUN chown bind /var/bind +ADD Procfile.tls /Procfile.tls +ADD run.sh /run.sh +ADD named.conf etcd.zone rdns.zone /etc/bind/ +ADD resolv.conf /etc/resolv.conf +CMD ["/run.sh"] \ No newline at end of file diff --git a/github.com/coreos/etcd/e2e/docker/Makefile b/github.com/coreos/etcd/e2e/docker/Makefile new file mode 100644 index 0000000000..7ec14d42cb --- /dev/null +++ b/github.com/coreos/etcd/e2e/docker/Makefile @@ -0,0 +1,7 @@ +# run makefile from repo root + +docker-dns-build: + docker build -t etcd-dns e2e/docker/ + +docker-dns-test: docker-dns-build + docker run --dns 127.0.0.1 --rm -v `pwd`/bin/:/etcd -v `pwd`/integration/fixtures:/certs -w /etcd -t etcd-dns diff --git a/github.com/coreos/etcd/e2e/docker/Procfile.tls b/github.com/coreos/etcd/e2e/docker/Procfile.tls new file mode 100644 index 0000000000..d8b7902056 --- /dev/null +++ b/github.com/coreos/etcd/e2e/docker/Procfile.tls @@ -0,0 +1,6 @@ +# Use goreman to run `go get github.com/mattn/goreman` +etcd1: ./etcd --name infra1 --listen-client-urls https://127.0.0.1:2379 --advertise-client-urls https://m1.etcd.local:2379 --listen-peer-urls https://127.0.0.1:12380 --initial-advertise-peer-urls=https://m1.etcd.local:12380 --initial-cluster-token etcd-cluster-1 --initial-cluster=infra1=https://m1.etcd.local:12380,infra2=https://m2.etcd.local:22380,infra3=https://m3.etcd.local:32380 --initial-cluster-state new --enable-pprof --peer-cert-file=/certs/server-wildcard.crt --peer-key-file=/certs/server-wildcard.key.insecure --peer-client-cert-auth --cert-file=/certs/server-wildcard.crt --key-file=/certs/server-wildcard.key.insecure --peer-trusted-ca-file=/certs/ca.crt --trusted-ca-file=/certs/ca.crt + +etcd2: ./etcd --name infra2 --listen-client-urls https://127.0.0.1:22379 --advertise-client-urls https://m2.etcd.local:22379 --listen-peer-urls https://127.0.0.1:22380 --initial-advertise-peer-urls=https://m2.etcd.local:22380 --initial-cluster-token etcd-cluster-1 --initial-cluster=infra1=https://m1.etcd.local:12380,infra2=https://m2.etcd.local:22380,infra3=https://m3.etcd.local:32380 --initial-cluster-state new --enable-pprof --peer-cert-file=/certs/server-wildcard.crt -peer-key-file=/certs/server-wildcard.key.insecure --peer-client-cert-auth --cert-file=/certs/server-wildcard.crt --key-file=/certs/server-wildcard.key.insecure --peer-trusted-ca-file=/certs/ca.crt --trusted-ca-file=/certs/ca.crt + +etcd3: ./etcd --name infra3 --listen-client-urls https://127.0.0.1:32379 --advertise-client-urls https://m3.etcd.local:32379 --listen-peer-urls https://127.0.0.1:32380 --initial-advertise-peer-urls=https://m3.etcd.local:32380 --initial-cluster-token etcd-cluster-1 --initial-cluster=infra1=https://m1.etcd.local:12380,infra2=https://m2.etcd.local:22380,infra3=https://m3.etcd.local:32380 --initial-cluster-state new --enable-pprof --peer-cert-file=/certs/server-wildcard.crt --peer-key-file=/certs/server-wildcard.key.insecure --peer-client-cert-auth --cert-file=/certs/server-wildcard.crt --key-file=/certs/server-wildcard.key.insecure --peer-trusted-ca-file=/certs/ca.crt --trusted-ca-file=/certs/ca.crt diff --git a/github.com/coreos/etcd/e2e/docker/etcd.zone b/github.com/coreos/etcd/e2e/docker/etcd.zone new file mode 100644 index 0000000000..03c15fe8e6 --- /dev/null +++ b/github.com/coreos/etcd/e2e/docker/etcd.zone @@ -0,0 +1,14 @@ +$TTL 86400 +@ IN SOA etcdns.local. root.etcdns.local. ( + 100500 ; Serial + 604800 ; Refresh + 86400 ; Retry + 2419200 ; Expire + 86400 ) ; Negative Cache TTL + IN NS ns.etcdns.local. + IN A 127.0.0.1 + +ns IN A 127.0.0.1 +m1 IN A 127.0.0.1 +m2 IN A 127.0.0.1 +m3 IN A 127.0.0.1 diff --git a/github.com/coreos/etcd/e2e/docker/named.conf b/github.com/coreos/etcd/e2e/docker/named.conf new file mode 100644 index 0000000000..83549305c3 --- /dev/null +++ b/github.com/coreos/etcd/e2e/docker/named.conf @@ -0,0 +1,23 @@ +options { + directory "/var/bind"; + listen-on { 127.0.0.1; }; + listen-on-v6 { none; }; + allow-transfer { + none; + }; + // If you have problems and are behind a firewall: + query-source address * port 53; + pid-file "/var/run/named/named.pid"; + allow-recursion { none; }; + recursion no; +}; + +zone "etcd.local" IN { + type master; + file "/etc/bind/etcd.zone"; +}; + +zone "0.0.127.in-addr.arpa" { + type master; + file "/etc/bind/rdns.zone"; +}; diff --git a/github.com/coreos/etcd/e2e/docker/rdns.zone b/github.com/coreos/etcd/e2e/docker/rdns.zone new file mode 100644 index 0000000000..fb71b30b1f --- /dev/null +++ b/github.com/coreos/etcd/e2e/docker/rdns.zone @@ -0,0 +1,13 @@ +$TTL 86400 +@ IN SOA etcdns.local. root.etcdns.local. ( + 100500 ; Serial + 604800 ; Refresh + 86400 ; Retry + 2419200 ; Expire + 86400 ) ; Negative Cache TTL + IN NS ns.etcdns.local. + IN A 127.0.0.1 + +1 IN PTR m1.etcd.local. +1 IN PTR m2.etcd.local. +1 IN PTR m3.etcd.local. diff --git a/github.com/coreos/etcd/e2e/docker/resolv.conf b/github.com/coreos/etcd/e2e/docker/resolv.conf new file mode 100644 index 0000000000..bbc8559cd5 --- /dev/null +++ b/github.com/coreos/etcd/e2e/docker/resolv.conf @@ -0,0 +1 @@ +nameserver 127.0.0.1 diff --git a/github.com/coreos/etcd/e2e/docker/run.sh b/github.com/coreos/etcd/e2e/docker/run.sh new file mode 100755 index 0000000000..e020bcbeca --- /dev/null +++ b/github.com/coreos/etcd/e2e/docker/run.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +/etc/init.d/bind9 start +# get rid of hosts so go lookup won't resolve 127.0.0.1 to localhost +cat /dev/null >/etc/hosts +goreman -f /Procfile.tls start & +sleep 5s +ETCDCTL_API=3 ./etcdctl --cacert=/certs/ca.crt --endpoints=https://m1.etcd.local:2379 put abc def diff --git a/github.com/coreos/etcd/e2e/etcd_config_test.go b/github.com/coreos/etcd/e2e/etcd_config_test.go index 9e308c3bcb..e7531866ad 100644 --- a/github.com/coreos/etcd/e2e/etcd_config_test.go +++ b/github.com/coreos/etcd/e2e/etcd_config_test.go @@ -25,7 +25,7 @@ func TestEtcdExampleConfig(t *testing.T) { if err != nil { t.Fatal(err) } - if err = waitReadyExpectProc(proc, false); err != nil { + if err = waitReadyExpectProc(proc, etcdServerReadyLines); err != nil { t.Fatal(err) } if err = proc.Stop(); err != nil { diff --git a/github.com/coreos/etcd/e2e/etcd_process.go b/github.com/coreos/etcd/e2e/etcd_process.go new file mode 100644 index 0000000000..cfde0255a6 --- /dev/null +++ b/github.com/coreos/etcd/e2e/etcd_process.go @@ -0,0 +1,134 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "fmt" + "net/url" + "os" + + "github.com/coreos/etcd/pkg/expect" + "github.com/coreos/etcd/pkg/fileutil" +) + +var etcdServerReadyLines = []string{"enabled capabilities for version", "published"} + +// etcdProcess is a process that serves etcd requests. +type etcdProcess interface { + EndpointsV2() []string + EndpointsV3() []string + + Start() error + Restart() error + Stop() error + Close() error + WithStopSignal(sig os.Signal) os.Signal + Config() *etcdServerProcessConfig +} + +type etcdServerProcess struct { + cfg *etcdServerProcessConfig + proc *expect.ExpectProcess + donec chan struct{} // closed when Interact() terminates +} + +type etcdServerProcessConfig struct { + execPath string + args []string + tlsArgs []string + + dataDirPath string + keepDataDir bool + + name string + + purl url.URL + + acurl string + + initialToken string + initialCluster string +} + +func newEtcdServerProcess(cfg *etcdServerProcessConfig) (*etcdServerProcess, error) { + if !fileutil.Exist(cfg.execPath) { + return nil, fmt.Errorf("could not find etcd binary") + } + if !cfg.keepDataDir { + if err := os.RemoveAll(cfg.dataDirPath); err != nil { + return nil, err + } + } + return &etcdServerProcess{cfg: cfg, donec: make(chan struct{})}, nil +} + +func (ep *etcdServerProcess) EndpointsV2() []string { return []string{ep.cfg.acurl} } +func (ep *etcdServerProcess) EndpointsV3() []string { return ep.EndpointsV2() } + +func (ep *etcdServerProcess) Start() error { + if ep.proc != nil { + panic("already started") + } + proc, err := spawnCmd(append([]string{ep.cfg.execPath}, ep.cfg.args...)) + if err != nil { + return err + } + ep.proc = proc + return ep.waitReady() +} + +func (ep *etcdServerProcess) Restart() error { + if err := ep.Stop(); err != nil { + return err + } + ep.donec = make(chan struct{}) + return ep.Start() +} + +func (ep *etcdServerProcess) Stop() error { + if ep == nil || ep.proc == nil { + return nil + } + if err := ep.proc.Stop(); err != nil { + return err + } + ep.proc = nil + <-ep.donec + ep.donec = make(chan struct{}) + if ep.cfg.purl.Scheme == "unix" || ep.cfg.purl.Scheme == "unixs" { + os.Remove(ep.cfg.purl.Host + ep.cfg.purl.Path) + } + return nil +} + +func (ep *etcdServerProcess) Close() error { + if err := ep.Stop(); err != nil { + return err + } + return os.RemoveAll(ep.cfg.dataDirPath) +} + +func (ep *etcdServerProcess) WithStopSignal(sig os.Signal) os.Signal { + ret := ep.proc.StopSignal + ep.proc.StopSignal = sig + return ret +} + +func (ep *etcdServerProcess) waitReady() error { + defer close(ep.donec) + return waitReadyExpectProc(ep.proc, etcdServerReadyLines) +} + +func (ep *etcdServerProcess) Config() *etcdServerProcessConfig { return ep.cfg } diff --git a/github.com/coreos/etcd/e2e/etcd_release_upgrade_test.go b/github.com/coreos/etcd/e2e/etcd_release_upgrade_test.go index cb2ee34d3f..6b1d42323e 100644 --- a/github.com/coreos/etcd/e2e/etcd_release_upgrade_test.go +++ b/github.com/coreos/etcd/e2e/etcd_release_upgrade_test.go @@ -23,6 +23,7 @@ import ( "github.com/coreos/etcd/pkg/fileutil" "github.com/coreos/etcd/pkg/testutil" + "github.com/coreos/etcd/version" ) // TestReleaseUpgrade ensures that changes to master branch does not affect @@ -53,7 +54,7 @@ func TestReleaseUpgrade(t *testing.T) { // so there's a window at boot time where it doesn't have V3rpcCapability enabled // poll /version until etcdcluster is >2.3.x before making v3 requests for i := 0; i < 7; i++ { - if err = cURLGet(epc, cURLReq{endpoint: "/version", expected: `"etcdcluster":"3.1`}); err != nil { + if err = cURLGet(epc, cURLReq{endpoint: "/version", expected: `"etcdcluster":"` + version.Cluster(version.Version)}); err != nil { t.Logf("#%d: v3 is not ready yet (%v)", i, err) time.Sleep(time.Second) continue @@ -87,8 +88,8 @@ func TestReleaseUpgrade(t *testing.T) { if err := epc.procs[i].Stop(); err != nil { t.Fatalf("#%d: error closing etcd process (%v)", i, err) } - epc.procs[i].cfg.execPath = binDir + "/etcd" - epc.procs[i].cfg.keepDataDir = true + epc.procs[i].Config().execPath = binDir + "/etcd" + epc.procs[i].Config().keepDataDir = true if err := epc.procs[i].Restart(); err != nil { t.Fatalf("error restarting etcd process (%v)", err) @@ -154,8 +155,8 @@ func TestReleaseUpgradeWithRestart(t *testing.T) { wg.Add(len(epc.procs)) for i := range epc.procs { go func(i int) { - epc.procs[i].cfg.execPath = binDir + "/etcd" - epc.procs[i].cfg.keepDataDir = true + epc.procs[i].Config().execPath = binDir + "/etcd" + epc.procs[i].Config().keepDataDir = true if err := epc.procs[i].Restart(); err != nil { t.Fatalf("error restarting etcd process (%v)", err) } diff --git a/github.com/coreos/etcd/e2e/etcd_spawn_cov.go b/github.com/coreos/etcd/e2e/etcd_spawn_cov.go index 840cde4935..ca45a571ef 100644 --- a/github.com/coreos/etcd/e2e/etcd_spawn_cov.go +++ b/github.com/coreos/etcd/e2e/etcd_spawn_cov.go @@ -33,20 +33,7 @@ const noOutputLineCount = 2 // cov-enabled binaries emit PASS and coverage count func spawnCmd(args []string) (*expect.ExpectProcess, error) { if args[0] == binPath { - covArgs, err := getCovArgs() - if err != nil { - return nil, err - } - ep, err := expect.NewExpectWithEnv(binDir+"/etcd_test", covArgs, args2env(args[1:])) - if err != nil { - return nil, err - } - // ep sends SIGTERM to etcd_test process on ep.close() - // allowing the process to exit gracefully in order to generate a coverage report. - // note: go runtime ignores SIGINT but not SIGTERM - // if e2e test is run as a background process. - ep.StopSignal = syscall.SIGTERM - return ep, nil + return spawnEtcd(args) } if args[0] == ctlBinPath { @@ -56,7 +43,8 @@ func spawnCmd(args []string) (*expect.ExpectProcess, error) { } // avoid test flag conflicts in coverage enabled etcdctl by putting flags in ETCDCTL_ARGS ctl_cov_env := []string{ - "ETCDCTL_ARGS" + "=" + strings.Join(args, "\xff"), + // was \xff, but that's used for testing boundary conditions; 0xe7cd should be safe + "ETCDCTL_ARGS=" + strings.Join(args, "\xe7\xcd"), } // when withFlagByEnv() is used in testCtl(), env variables for ctl is set to os.env. // they must be included in ctl_cov_env. @@ -72,6 +60,32 @@ func spawnCmd(args []string) (*expect.ExpectProcess, error) { return expect.NewExpect(args[0], args[1:]...) } +func spawnEtcd(args []string) (*expect.ExpectProcess, error) { + covArgs, err := getCovArgs() + if err != nil { + return nil, err + } + + env := []string{} + if args[1] == "grpc-proxy" { + // avoid test flag conflicts in coverage enabled etcd by putting flags in ETCDCOV_ARGS + env = append(os.Environ(), "ETCDCOV_ARGS="+strings.Join(args, "\xe7\xcd")) + } else { + env = args2env(args[1:]) + } + + ep, err := expect.NewExpectWithEnv(binDir+"/etcd_test", covArgs, env) + if err != nil { + return nil, err + } + // ep sends SIGTERM to etcd_test process on ep.close() + // allowing the process to exit gracefully in order to generate a coverage report. + // note: go runtime ignores SIGINT but not SIGTERM + // if e2e test is run as a background process. + ep.StopSignal = syscall.SIGTERM + return ep, nil +} + func getCovArgs() ([]string, error) { coverPath := os.Getenv("COVERDIR") if !filepath.IsAbs(coverPath) { @@ -91,7 +105,7 @@ func getCovArgs() ([]string, error) { func args2env(args []string) []string { var covEnvs []string - for i := range args[1:] { + for i := range args { if !strings.HasPrefix(args[i], "--") { continue } diff --git a/github.com/coreos/etcd/e2e/gateway_test.go b/github.com/coreos/etcd/e2e/gateway_test.go index 9eee0170e3..6539e6f84c 100644 --- a/github.com/coreos/etcd/e2e/gateway_test.go +++ b/github.com/coreos/etcd/e2e/gateway_test.go @@ -31,9 +31,9 @@ func TestGateway(t *testing.T) { if err != nil { t.Fatal(err) } - defer ec.StopAll() + defer ec.Stop() - eps := strings.Join(ec.grpcEndpoints(), ",") + eps := strings.Join(ec.EndpointsV3(), ",") p := startGateway(t, eps) defer p.Stop() diff --git a/github.com/coreos/etcd/e2e/main_test.go b/github.com/coreos/etcd/e2e/main_test.go index 5958950784..858018a26a 100644 --- a/github.com/coreos/etcd/e2e/main_test.go +++ b/github.com/coreos/etcd/e2e/main_test.go @@ -13,8 +13,20 @@ import ( "github.com/coreos/etcd/pkg/testutil" ) -var binDir string -var certDir string +var ( + binDir string + certDir string + + binPath string + ctlBinPath string + certPath string + privateKeyPath string + caPath string + + crlPath string + revokedCertPath string + revokedPrivateKeyPath string +) func TestMain(m *testing.M) { os.Setenv("ETCD_UNSUPPORTED_ARCH", runtime.GOARCH) @@ -24,6 +36,15 @@ func TestMain(m *testing.M) { flag.StringVar(&certDir, "cert-dir", "../integration/fixtures", "The directory for store certificate files.") flag.Parse() + binPath = binDir + "/etcd" + ctlBinPath = binDir + "/etcdctl" + certPath = certDir + "/server.crt" + privateKeyPath = certDir + "/server.key.insecure" + caPath = certDir + "/ca.crt" + revokedCertPath = certDir + "/server-revoked.crt" + revokedPrivateKeyPath = certDir + "/server-revoked.key.insecure" + crlPath = certDir + "/revoke.crl" + v := m.Run() if v == 0 && testutil.CheckLeakedGoroutine() { os.Exit(1) diff --git a/github.com/coreos/etcd/e2e/util.go b/github.com/coreos/etcd/e2e/util.go new file mode 100644 index 0000000000..e4125137a6 --- /dev/null +++ b/github.com/coreos/etcd/e2e/util.go @@ -0,0 +1,91 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package e2e + +import ( + "fmt" + "strings" + "time" + + "github.com/coreos/etcd/pkg/expect" +) + +func waitReadyExpectProc(exproc *expect.ExpectProcess, readyStrs []string) error { + c := 0 + matchSet := func(l string) bool { + for _, s := range readyStrs { + if strings.Contains(l, s) { + c++ + break + } + } + return c == len(readyStrs) + } + _, err := exproc.ExpectFunc(matchSet) + return err +} + +func spawnWithExpect(args []string, expected string) error { + return spawnWithExpects(args, []string{expected}...) +} + +func spawnWithExpects(args []string, xs ...string) error { + proc, err := spawnCmd(args) + if err != nil { + return err + } + // process until either stdout or stderr contains + // the expected string + var ( + lines []string + lineFunc = func(txt string) bool { return true } + ) + for _, txt := range xs { + for { + l, lerr := proc.ExpectFunc(lineFunc) + if lerr != nil { + proc.Close() + return fmt.Errorf("%v (expected %q, got %q)", lerr, txt, lines) + } + lines = append(lines, l) + if strings.Contains(l, txt) { + break + } + } + } + perr := proc.Close() + if len(xs) == 0 && proc.LineCount() != noOutputLineCount { // expect no output + return fmt.Errorf("unexpected output (got lines %q, line count %d)", lines, proc.LineCount()) + } + return perr +} + +func closeWithTimeout(p *expect.ExpectProcess, d time.Duration) error { + errc := make(chan error, 1) + go func() { errc <- p.Close() }() + select { + case err := <-errc: + return err + case <-time.After(d): + p.Stop() + // retry close after stopping to collect SIGQUIT data, if any + closeWithTimeout(p, time.Second) + } + return fmt.Errorf("took longer than %v to Close process %+v", d, p) +} + +func toTLS(s string) string { + return strings.Replace(s, "http://", "https://", 1) +} diff --git a/github.com/coreos/etcd/e2e/v2_curl_test.go b/github.com/coreos/etcd/e2e/v2_curl_test.go index 289d64c0d2..2322a8549f 100644 --- a/github.com/coreos/etcd/e2e/v2_curl_test.go +++ b/github.com/coreos/etcd/e2e/v2_curl_test.go @@ -23,15 +23,12 @@ import ( "github.com/coreos/etcd/pkg/testutil" ) -func TestV2CurlNoTLS(t *testing.T) { testCurlPutGet(t, &configNoTLS) } -func TestV2CurlAutoTLS(t *testing.T) { testCurlPutGet(t, &configAutoTLS) } -func TestV2CurlAllTLS(t *testing.T) { testCurlPutGet(t, &configTLS) } -func TestV2CurlPeerTLS(t *testing.T) { testCurlPutGet(t, &configPeerTLS) } -func TestV2CurlClientTLS(t *testing.T) { testCurlPutGet(t, &configClientTLS) } -func TestV2CurlProxyNoTLS(t *testing.T) { testCurlPutGet(t, &configWithProxy) } -func TestV2CurlProxyTLS(t *testing.T) { testCurlPutGet(t, &configWithProxyTLS) } -func TestV2CurlProxyPeerTLS(t *testing.T) { testCurlPutGet(t, &configWithProxyPeerTLS) } -func TestV2CurlClientBoth(t *testing.T) { testCurlPutGet(t, &configClientBoth) } +func TestV2CurlNoTLS(t *testing.T) { testCurlPutGet(t, &configNoTLS) } +func TestV2CurlAutoTLS(t *testing.T) { testCurlPutGet(t, &configAutoTLS) } +func TestV2CurlAllTLS(t *testing.T) { testCurlPutGet(t, &configTLS) } +func TestV2CurlPeerTLS(t *testing.T) { testCurlPutGet(t, &configPeerTLS) } +func TestV2CurlClientTLS(t *testing.T) { testCurlPutGet(t, &configClientTLS) } +func TestV2CurlClientBoth(t *testing.T) { testCurlPutGet(t, &configClientBoth) } func testCurlPutGet(t *testing.T, cfg *etcdProcessClusterConfig) { defer testutil.AfterTest(t) @@ -127,6 +124,7 @@ type cURLReq struct { value string expected string + header string } // cURLPrefixArgs builds the beginning of a curl command for a given key @@ -134,14 +132,14 @@ type cURLReq struct { func cURLPrefixArgs(clus *etcdProcessCluster, method string, req cURLReq) []string { var ( cmdArgs = []string{"curl"} - acurl = clus.procs[rand.Intn(clus.cfg.clusterSize)].cfg.acurl + acurl = clus.procs[rand.Intn(clus.cfg.clusterSize)].Config().acurl ) if req.isTLS { if clus.cfg.clientTLS != clientTLSAndNonTLS { panic("should not use cURLPrefixArgsUseTLS when serving only TLS or non-TLS") } cmdArgs = append(cmdArgs, "--cacert", caPath, "--cert", certPath, "--key", privateKeyPath) - acurl = clus.procs[rand.Intn(clus.cfg.clusterSize)].cfg.acurltls + acurl = toTLS(clus.procs[rand.Intn(clus.cfg.clusterSize)].Config().acurl) } else if clus.cfg.clientTLS == clientTLS { cmdArgs = append(cmdArgs, "--cacert", caPath, "--cert", certPath, "--key", privateKeyPath) } @@ -156,6 +154,10 @@ func cURLPrefixArgs(clus *etcdProcessCluster, method string, req cURLReq) []stri cmdArgs = append(cmdArgs, "-m", fmt.Sprintf("%d", req.timeout)) } + if req.header != "" { + cmdArgs = append(cmdArgs, "-H", req.header) + } + switch method { case "POST", "PUT": dt := req.value diff --git a/github.com/coreos/etcd/e2e/v3_curl_test.go b/github.com/coreos/etcd/e2e/v3_curl_test.go index 0f7193a857..a15fa21421 100644 --- a/github.com/coreos/etcd/e2e/v3_curl_test.go +++ b/github.com/coreos/etcd/e2e/v3_curl_test.go @@ -20,6 +20,8 @@ import ( pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/pkg/testutil" + + "github.com/grpc-ecosystem/grpc-gateway/runtime" ) func TestV3CurlPutGetNoTLS(t *testing.T) { testCurlPutGetGRPCGateway(t, &configNoTLS) } @@ -111,3 +113,136 @@ func TestV3CurlWatch(t *testing.T) { t.Fatal(err) } } + +func TestV3CurlTxn(t *testing.T) { + defer testutil.AfterTest(t) + epc, err := newEtcdProcessCluster(&configNoTLS) + if err != nil { + t.Fatalf("could not start etcd process cluster (%v)", err) + } + defer func() { + if cerr := epc.Close(); err != nil { + t.Fatalf("error closing etcd processes (%v)", cerr) + } + }() + + txn := &pb.TxnRequest{ + Compare: []*pb.Compare{ + { + Key: []byte("foo"), + Result: pb.Compare_EQUAL, + Target: pb.Compare_CREATE, + TargetUnion: &pb.Compare_CreateRevision{0}, + }, + }, + Success: []*pb.RequestOp{ + { + Request: &pb.RequestOp_RequestPut{ + RequestPut: &pb.PutRequest{ + Key: []byte("foo"), + Value: []byte("bar"), + }, + }, + }, + }, + } + m := &runtime.JSONPb{} + jsonDat, jerr := m.Marshal(txn) + if jerr != nil { + t.Fatal(jerr) + } + expected := `"succeeded":true,"responses":[{"response_put":{"header":{"revision":"2"}}}]` + if err = cURLPost(epc, cURLReq{endpoint: "/v3alpha/kv/txn", value: string(jsonDat), expected: expected}); err != nil { + t.Fatalf("failed txn with curl (%v)", err) + } + + // was crashing etcd server + malformed := `{"compare":[{"result":0,"target":1,"key":"Zm9v","TargetUnion":null}],"success":[{"Request":{"RequestPut":{"key":"Zm9v","value":"YmFy"}}}]}` + if err = cURLPost(epc, cURLReq{endpoint: "/v3alpha/kv/txn", value: malformed, expected: "error"}); err != nil { + t.Fatalf("failed put with curl (%v)", err) + } +} + +func TestV3CurlAuth(t *testing.T) { + defer testutil.AfterTest(t) + epc, err := newEtcdProcessCluster(&configNoTLS) + if err != nil { + t.Fatalf("could not start etcd process cluster (%v)", err) + } + defer func() { + if cerr := epc.Close(); err != nil { + t.Fatalf("error closing etcd processes (%v)", cerr) + } + }() + + // create root user + userreq, err := json.Marshal(&pb.AuthUserAddRequest{Name: string("root"), Password: string("toor")}) + testutil.AssertNil(t, err) + + if err = cURLPost(epc, cURLReq{endpoint: "/v3alpha/auth/user/add", value: string(userreq), expected: "revision"}); err != nil { + t.Fatalf("failed add user with curl (%v)", err) + } + + // create root role + rolereq, err := json.Marshal(&pb.AuthRoleAddRequest{Name: string("root")}) + testutil.AssertNil(t, err) + + if err = cURLPost(epc, cURLReq{endpoint: "/v3alpha/auth/role/add", value: string(rolereq), expected: "revision"}); err != nil { + t.Fatalf("failed create role with curl (%v)", err) + } + + // grant root role + grantrolereq, err := json.Marshal(&pb.AuthUserGrantRoleRequest{User: string("root"), Role: string("root")}) + testutil.AssertNil(t, err) + + if err = cURLPost(epc, cURLReq{endpoint: "/v3alpha/auth/user/grant", value: string(grantrolereq), expected: "revision"}); err != nil { + t.Fatalf("failed grant role with curl (%v)", err) + } + + // enable auth + if err = cURLPost(epc, cURLReq{endpoint: "/v3alpha/auth/enable", value: string("{}"), expected: "revision"}); err != nil { + t.Fatalf("failed enable auth with curl (%v)", err) + } + + // put "bar" into "foo" + putreq, err := json.Marshal(&pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}) + testutil.AssertNil(t, err) + + // fail put no auth + if err = cURLPost(epc, cURLReq{endpoint: "/v3alpha/kv/put", value: string(putreq), expected: "error"}); err != nil { + t.Fatalf("failed no auth put with curl (%v)", err) + } + + // auth request + authreq, err := json.Marshal(&pb.AuthenticateRequest{Name: string("root"), Password: string("toor")}) + testutil.AssertNil(t, err) + + var ( + authHeader string + cmdArgs []string + lineFunc = func(txt string) bool { return true } + ) + + cmdArgs = cURLPrefixArgs(epc, "POST", cURLReq{endpoint: "/v3alpha/auth/authenticate", value: string(authreq)}) + proc, err := spawnCmd(cmdArgs) + testutil.AssertNil(t, err) + + cURLRes, err := proc.ExpectFunc(lineFunc) + testutil.AssertNil(t, err) + + authRes := make(map[string]interface{}) + testutil.AssertNil(t, json.Unmarshal([]byte(cURLRes), &authRes)) + + token, ok := authRes["token"].(string) + if !ok { + t.Fatalf("failed invalid token in authenticate response with curl") + } + + authHeader = "Authorization : " + token + + // put with auth + if err = cURLPost(epc, cURLReq{endpoint: "/v3alpha/kv/put", value: string(putreq), header: authHeader, expected: "revision"}); err != nil { + t.Fatalf("failed auth put with curl (%v)", err) + } + +} diff --git a/github.com/coreos/etcd/embed/config.go b/github.com/coreos/etcd/embed/config.go index e06a61472a..2fb2a3280e 100644 --- a/github.com/coreos/etcd/embed/config.go +++ b/github.com/coreos/etcd/embed/config.go @@ -20,12 +20,13 @@ import ( "net" "net/http" "net/url" + "path/filepath" "strings" - "github.com/coreos/etcd/discovery" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/pkg/cors" "github.com/coreos/etcd/pkg/netutil" + "github.com/coreos/etcd/pkg/srv" "github.com/coreos/etcd/pkg/transport" "github.com/coreos/etcd/pkg/types" @@ -37,9 +38,11 @@ const ( ClusterStateFlagNew = "new" ClusterStateFlagExisting = "existing" - DefaultName = "default" - DefaultMaxSnapshots = 5 - DefaultMaxWALs = 5 + DefaultName = "default" + DefaultMaxSnapshots = 5 + DefaultMaxWALs = 5 + DefaultMaxTxnOps = uint(128) + DefaultMaxRequestBytes = 1.5 * 1024 * 1024 DefaultListenPeerURLs = "http://localhost:2380" DefaultListenClientURLs = "http://localhost:2379" @@ -78,6 +81,7 @@ type Config struct { Name string `json:"name"` SnapCount uint64 `json:"snapshot-count"` AutoCompactionRetention int `json:"auto-compaction-retention"` + AutoCompactionMode string `json:"auto-compaction-mode"` // TickMs is the number of milliseconds between heartbeat ticks. // TODO: decouple tickMs and heartbeat tick (current heartbeat tick = 1). @@ -85,6 +89,8 @@ type Config struct { TickMs uint `json:"heartbeat-interval"` ElectionMs uint `json:"election-timeout"` QuotaBackendBytes int64 `json:"quota-backend-bytes"` + MaxTxnOps uint `json:"max-txn-ops"` + MaxRequestBytes uint `json:"max-request-bytes"` // clustering @@ -107,10 +113,12 @@ type Config struct { // debug - Debug bool `json:"debug"` - LogPkgLevels string `json:"log-package-levels"` - EnablePprof bool - Metrics string `json:"metrics"` + Debug bool `json:"debug"` + LogPkgLevels string `json:"log-package-levels"` + EnablePprof bool + Metrics string `json:"metrics"` + ListenMetricsUrls []url.URL + ListenMetricsUrlsJSON string `json:"listen-metrics-urls"` // ForceNewCluster starts a new cluster even if previously started; unsafe. ForceNewCluster bool `json:"force-new-cluster"` @@ -172,6 +180,8 @@ func NewConfig() *Config { MaxWalFiles: DefaultMaxWALs, Name: DefaultName, SnapCount: etcdserver.DefaultSnapCount, + MaxTxnOps: DefaultMaxTxnOps, + MaxRequestBytes: DefaultMaxRequestBytes, TickMs: 100, ElectionMs: 1000, LPUrls: []url.URL{*lpurl}, @@ -248,6 +258,14 @@ func (cfg *configYAML) configFromFile(path string) error { cfg.ACUrls = []url.URL(u) } + if cfg.ListenMetricsUrlsJSON != "" { + u, err := types.NewURLs(strings.Split(cfg.ListenMetricsUrlsJSON, ",")) + if err != nil { + plog.Fatalf("unexpected error setting up listen-metrics-urls: %v", err) + } + cfg.ListenMetricsUrls = []url.URL(u) + } + // If a discovery flag is set, clear default initial cluster set by InitialClusterFromName if (cfg.Durl != "" || cfg.DNSCluster != "") && cfg.InitialCluster == defaultInitialCluster { cfg.InitialCluster = "" @@ -278,6 +296,9 @@ func (cfg *Config) Validate() error { if err := checkBindURLs(cfg.LCUrls); err != nil { return err } + if err := checkBindURLs(cfg.ListenMetricsUrls); err != nil { + return err + } // Check if conflicting flags are passed. nSet := 0 @@ -321,11 +342,15 @@ func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, tok urlsmap[cfg.Name] = cfg.APUrls token = cfg.Durl case cfg.DNSCluster != "": - var clusterStr string - clusterStr, err = discovery.SRVGetCluster(cfg.Name, cfg.DNSCluster, cfg.APUrls) - if err != nil { - return nil, "", err + clusterStrs, cerr := srv.GetCluster("etcd-server", cfg.Name, cfg.DNSCluster, cfg.APUrls) + if cerr != nil { + plog.Errorf("couldn't resolve during SRV discovery (%v)", cerr) + return nil, "", cerr + } + for _, s := range clusterStrs { + plog.Noticef("got bootstrap from DNS for etcd-server at %s", s) } + clusterStr := strings.Join(clusterStrs, ",") if strings.Contains(clusterStr, "https://") && cfg.PeerTLSInfo.CAFile == "" { cfg.PeerTLSInfo.ServerName = cfg.DNSCluster } @@ -369,6 +394,34 @@ func (cfg Config) defaultClientHost() bool { return len(cfg.ACUrls) == 1 && cfg.ACUrls[0].String() == DefaultAdvertiseClientURLs } +func (cfg *Config) ClientSelfCert() (err error) { + if cfg.ClientAutoTLS && cfg.ClientTLSInfo.Empty() { + chosts := make([]string, len(cfg.LCUrls)) + for i, u := range cfg.LCUrls { + chosts[i] = u.Host + } + cfg.ClientTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "client"), chosts) + return err + } else if cfg.ClientAutoTLS { + plog.Warningf("ignoring client auto TLS since certs given") + } + return nil +} + +func (cfg *Config) PeerSelfCert() (err error) { + if cfg.PeerAutoTLS && cfg.PeerTLSInfo.Empty() { + phosts := make([]string, len(cfg.LPUrls)) + for i, u := range cfg.LPUrls { + phosts[i] = u.Host + } + cfg.PeerTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "peer"), phosts) + return err + } else if cfg.PeerAutoTLS { + plog.Warningf("ignoring peer auto TLS since certs given") + } + return nil +} + // UpdateDefaultClusterFromName updates cluster advertise URLs with, if available, default host, // if advertise URLs are default values(localhost:2379,2380) AND if listen URL is 0.0.0.0. // e.g. advertise peer URL localhost:2380 or listen peer URL 0.0.0.0:2380 @@ -387,7 +440,7 @@ func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (s } used := false - pip, pport, _ := net.SplitHostPort(cfg.LPUrls[0].Host) + pip, pport := cfg.LPUrls[0].Hostname(), cfg.LPUrls[0].Port() if cfg.defaultPeerHost() && pip == "0.0.0.0" { cfg.APUrls[0] = url.URL{Scheme: cfg.APUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, pport)} used = true @@ -397,7 +450,7 @@ func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (s cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) } - cip, cport, _ := net.SplitHostPort(cfg.LCUrls[0].Host) + cip, cport := cfg.LCUrls[0].Hostname(), cfg.LCUrls[0].Port() if cfg.defaultClientHost() && cip == "0.0.0.0" { cfg.ACUrls[0] = url.URL{Scheme: cfg.ACUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, cport)} used = true diff --git a/github.com/coreos/etcd/embed/config_test.go b/github.com/coreos/etcd/embed/config_test.go index 1be6bcd9c1..27b73238ea 100644 --- a/github.com/coreos/etcd/embed/config_test.go +++ b/github.com/coreos/etcd/embed/config_test.go @@ -17,7 +17,6 @@ package embed import ( "fmt" "io/ioutil" - "net" "net/url" "os" "testing" @@ -74,7 +73,7 @@ func TestUpdateDefaultClusterFromName(t *testing.T) { origadvc := cfg.ACUrls[0].String() cfg.Name = "abc" - _, lpport, _ := net.SplitHostPort(cfg.LPUrls[0].Host) + lpport := cfg.LPUrls[0].Port() // in case of 'etcd --name=abc' exp := fmt.Sprintf("%s=%s://localhost:%s", cfg.Name, oldscheme, lpport) @@ -105,13 +104,13 @@ func TestUpdateDefaultClusterFromNameOverwrite(t *testing.T) { origadvc := cfg.ACUrls[0].String() cfg.Name = "abc" - _, lpport, _ := net.SplitHostPort(cfg.LPUrls[0].Host) + lpport := cfg.LPUrls[0].Port() cfg.LPUrls[0] = url.URL{Scheme: cfg.LPUrls[0].Scheme, Host: fmt.Sprintf("0.0.0.0:%s", lpport)} dhost, _ := cfg.UpdateDefaultClusterFromName(defaultInitialCluster) if dhost != defaultHostname { t.Fatalf("expected default host %q, got %q", defaultHostname, dhost) } - aphost, apport, _ := net.SplitHostPort(cfg.APUrls[0].Host) + aphost, apport := cfg.APUrls[0].Hostname(), cfg.APUrls[0].Port() if apport != lpport { t.Fatalf("advertise peer url got different port %s, expected %s", apport, lpport) } diff --git a/github.com/coreos/etcd/embed/etcd.go b/github.com/coreos/etcd/embed/etcd.go index 4d23a04009..b48caa8989 100644 --- a/github.com/coreos/etcd/embed/etcd.go +++ b/github.com/coreos/etcd/embed/etcd.go @@ -15,14 +15,18 @@ package embed import ( - "crypto/tls" + "context" "fmt" + "io/ioutil" + defaultLog "log" "net" "net/http" - "path/filepath" + "net/url" "sync" + "time" "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/etcdhttp" "github.com/coreos/etcd/etcdserver/api/v2http" "github.com/coreos/etcd/pkg/cors" "github.com/coreos/etcd/pkg/debugutil" @@ -31,6 +35,7 @@ import ( "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/rafthttp" "github.com/coreos/pkg/capnslog" + "github.com/prometheus/client_golang/prometheus" ) var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "embed") @@ -51,9 +56,10 @@ const ( // Etcd contains a running etcd server and its listeners. type Etcd struct { - Peers []net.Listener - Clients []net.Listener - Server *etcdserver.EtcdServer + Peers []*peerListener + Clients []net.Listener + metricsListeners []net.Listener + Server *etcdserver.EtcdServer cfg Config stopc chan struct{} @@ -63,6 +69,12 @@ type Etcd struct { closeOnce sync.Once } +type peerListener struct { + net.Listener + serve func() error + close func(context.Context) error +} + // StartEtcd launches the etcd server and HTTP handlers for client/server communication. // The returned Etcd.Server is not guaranteed to have joined the cluster. Wait // on the Etcd.Server.ReadyNotify() channel to know when it completes and is ready for use. @@ -70,13 +82,21 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { if err = inCfg.Validate(); err != nil { return nil, err } + serving := false e = &Etcd{cfg: *inCfg, stopc: make(chan struct{})} cfg := &e.cfg defer func() { - if e != nil && err != nil { - e.Close() - e = nil + if e == nil || err == nil { + return } + if !serving { + // errored before starting gRPC server for serveCtx.grpcServerC + for _, sctx := range e.sctxs { + close(sctx.grpcServerC) + } + } + e.Close() + e = nil }() if e.Peers, err = startPeerListeners(cfg); err != nil { @@ -101,7 +121,7 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { } } - srvcfg := &etcdserver.ServerConfig{ + srvcfg := etcdserver.ServerConfig{ Name: cfg.Name, ClientURLs: cfg.ACUrls, PeerURLs: cfg.APUrls, @@ -120,7 +140,10 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { TickMs: cfg.TickMs, ElectionTicks: cfg.ElectionTicks(), AutoCompactionRetention: cfg.AutoCompactionRetention, + AutoCompactionMode: cfg.AutoCompactionMode, QuotaBackendBytes: cfg.QuotaBackendBytes, + MaxTxnOps: cfg.MaxTxnOps, + MaxRequestBytes: cfg.MaxRequestBytes, StrictReconfigCheck: cfg.StrictReconfigCheck, ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth, AuthToken: cfg.AuthToken, @@ -130,6 +153,25 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { return } + // configure peer handlers after rafthttp.Transport started + ph := etcdhttp.NewPeerHandler(e.Server) + for i := range e.Peers { + srv := &http.Server{ + Handler: ph, + ReadTimeout: 5 * time.Minute, + ErrorLog: defaultLog.New(ioutil.Discard, "", 0), // do not log user error + } + e.Peers[i].serve = func() error { + return srv.Serve(e.Peers[i].Listener) + } + e.Peers[i].close = func(ctx context.Context) error { + // gracefully shutdown http.Server + // close open listeners, idle connections + // until context cancel or time-out + return srv.Shutdown(ctx) + } + } + // buffer channel so goroutines on closed connections won't wait forever e.errc = make(chan error, len(e.Peers)+len(e.Clients)+2*len(e.sctxs)) @@ -137,6 +179,7 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { if err = e.serve(); err != nil { return } + serving = true return } @@ -148,63 +191,79 @@ func (e *Etcd) Config() Config { func (e *Etcd) Close() { e.closeOnce.Do(func() { close(e.stopc) }) - // (gRPC server) stops accepting new connections, - // RPCs, and blocks until all pending RPCs are finished + timeout := 2 * time.Second + if e.Server != nil { + timeout = e.Server.Cfg.ReqTimeout() + } for _, sctx := range e.sctxs { for gs := range sctx.grpcServerC { - gs.GracefulStop() + ch := make(chan struct{}) + go func() { + defer close(ch) + // close listeners to stop accepting new connections, + // will block on any existing transports + gs.GracefulStop() + }() + // wait until all pending RPCs are finished + select { + case <-ch: + case <-time.After(timeout): + // took too long, manually close open transports + // e.g. watch streams + gs.Stop() + // concurrent GracefulStop should be interrupted + <-ch + } } } for _, sctx := range e.sctxs { sctx.cancel() } - for i := range e.Peers { - if e.Peers[i] != nil { - e.Peers[i].Close() - } - } for i := range e.Clients { if e.Clients[i] != nil { e.Clients[i].Close() } } + for i := range e.metricsListeners { + e.metricsListeners[i].Close() + } + + // close rafthttp transports if e.Server != nil { e.Server.Stop() } + + // close all idle connections in peer handler (wait up to 1-second) + for i := range e.Peers { + if e.Peers[i] != nil && e.Peers[i].close != nil { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + e.Peers[i].close(ctx) + cancel() + } + } } func (e *Etcd) Err() <-chan error { return e.errc } -func startPeerListeners(cfg *Config) (plns []net.Listener, err error) { - if cfg.PeerAutoTLS && cfg.PeerTLSInfo.Empty() { - phosts := make([]string, len(cfg.LPUrls)) - for i, u := range cfg.LPUrls { - phosts[i] = u.Host - } - cfg.PeerTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "peer"), phosts) - if err != nil { - plog.Fatalf("could not get certs (%v)", err) - } - } else if cfg.PeerAutoTLS { - plog.Warningf("ignoring peer auto TLS since certs given") +func startPeerListeners(cfg *Config) (peers []*peerListener, err error) { + if err = cfg.PeerSelfCert(); err != nil { + plog.Fatalf("could not get certs (%v)", err) } - if !cfg.PeerTLSInfo.Empty() { plog.Infof("peerTLS: %s", cfg.PeerTLSInfo) } - plns = make([]net.Listener, len(cfg.LPUrls)) + peers = make([]*peerListener, len(cfg.LPUrls)) defer func() { if err == nil { return } - for i := range plns { - if plns[i] == nil { - continue + for i := range peers { + if peers[i] != nil && peers[i].close != nil { + plog.Info("stopping listening for peers on ", cfg.LPUrls[i].String()) + peers[i].close(context.Background()) } - plns[i].Close() - plog.Info("stopping listening for peers on ", cfg.LPUrls[i].String()) } }() @@ -217,28 +276,24 @@ func startPeerListeners(cfg *Config) (plns []net.Listener, err error) { plog.Warningf("The scheme of peer url %s is HTTP while client cert auth (--peer-client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String()) } } - if plns[i], err = rafthttp.NewListener(u, &cfg.PeerTLSInfo); err != nil { + peers[i] = &peerListener{close: func(context.Context) error { return nil }} + peers[i].Listener, err = rafthttp.NewListener(u, &cfg.PeerTLSInfo) + if err != nil { return nil, err } + // once serve, overwrite with 'http.Server.Shutdown' + peers[i].close = func(context.Context) error { + return peers[i].Listener.Close() + } plog.Info("listening for peers on ", u.String()) } - return plns, nil + return peers, nil } func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) { - if cfg.ClientAutoTLS && cfg.ClientTLSInfo.Empty() { - chosts := make([]string, len(cfg.LCUrls)) - for i, u := range cfg.LCUrls { - chosts[i] = u.Host - } - cfg.ClientTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "client"), chosts) - if err != nil { - plog.Fatalf("could not get certs (%v)", err) - } - } else if cfg.ClientAutoTLS { - plog.Warningf("ignoring client auto TLS since certs given") + if err = cfg.ClientSelfCert(); err != nil { + plog.Fatalf("could not get certs (%v)", err) } - if cfg.EnablePprof { plog.Infof("pprof is enabled under %s", debugutil.HTTPPrefixPProf) } @@ -277,6 +332,9 @@ func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) { if sctx.l, err = net.Listen(proto, addr); err != nil { return nil, err } + // net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking + // hosts that disable ipv6. So, use the address given by the user. + sctx.addr = addr if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil { if fdLimit <= reservedInternalFDNum { @@ -314,12 +372,8 @@ func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) { } func (e *Etcd) serve() (err error) { - var ctlscfg *tls.Config if !e.cfg.ClientTLSInfo.Empty() { plog.Infof("ClientTLS: %s", e.cfg.ClientTLSInfo) - if ctlscfg, err = e.cfg.ClientTLSInfo.ServerConfig(); err != nil { - return err - } } if e.cfg.CorsInfo.String() != "" { @@ -327,26 +381,47 @@ func (e *Etcd) serve() (err error) { } // Start the peer server in a goroutine - ph := v2http.NewPeerHandler(e.Server) - for _, l := range e.Peers { - go func(l net.Listener) { - e.errHandler(servePeerHTTP(l, ph)) - }(l) + for _, pl := range e.Peers { + go func(l *peerListener) { + e.errHandler(l.serve()) + }(pl) } // Start a client server goroutine for each listen address - var v2h http.Handler + var h http.Handler if e.Config().EnableV2 { - v2h = http.Handler(&cors.CORSHandler{ - Handler: v2http.NewClientHandler(e.Server, e.Server.Cfg.ReqTimeout()), - Info: e.cfg.CorsInfo, - }) + h = v2http.NewClientHandler(e.Server, e.Server.Cfg.ReqTimeout()) + } else { + mux := http.NewServeMux() + etcdhttp.HandleBasic(mux, e.Server) + h = mux } + h = http.Handler(&cors.CORSHandler{Handler: h, Info: e.cfg.CorsInfo}) + for _, sctx := range e.sctxs { go func(s *serveCtx) { - e.errHandler(s.serve(e.Server, ctlscfg, v2h, e.errHandler)) + e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, h, e.errHandler)) }(sctx) } + + if len(e.cfg.ListenMetricsUrls) > 0 { + // TODO: maybe etcdhttp.MetricsPath or get the path from the user-provided URL + metricsMux := http.NewServeMux() + metricsMux.Handle("/metrics", prometheus.Handler()) + + for _, murl := range e.cfg.ListenMetricsUrls { + ml, err := transport.NewListener(murl.Host, murl.Scheme, &e.cfg.ClientTLSInfo) + if err != nil { + return err + } + e.metricsListeners = append(e.metricsListeners, ml) + go func(u url.URL, ln net.Listener) { + plog.Info("listening for metrics on ", u.String()) + e.errHandler(http.Serve(ln, metricsMux)) + }(murl, ml) + } + } + return nil } diff --git a/github.com/coreos/etcd/embed/serve.go b/github.com/coreos/etcd/embed/serve.go index 07d2159ce8..3e9b37ea07 100644 --- a/github.com/coreos/etcd/embed/serve.go +++ b/github.com/coreos/etcd/embed/serve.go @@ -15,23 +15,24 @@ package embed import ( - "crypto/tls" "io/ioutil" defaultLog "log" "net" "net/http" "strings" - "time" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/api/v3client" "github.com/coreos/etcd/etcdserver/api/v3election" "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" + v3electiongw "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw" "github.com/coreos/etcd/etcdserver/api/v3lock" "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" + v3lockgw "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw" "github.com/coreos/etcd/etcdserver/api/v3rpc" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + etcdservergw "github.com/coreos/etcd/etcdserver/etcdserverpb/gw" "github.com/coreos/etcd/pkg/debugutil" + "github.com/coreos/etcd/pkg/transport" "github.com/cockroachdb/cmux" gw "github.com/grpc-ecosystem/grpc-gateway/runtime" @@ -43,6 +44,7 @@ import ( type serveCtx struct { l net.Listener + addr string secure bool insecure bool @@ -64,7 +66,7 @@ func newServeCtx() *serveCtx { // serve accepts incoming connections on the listener l, // creating a new service goroutine for each. The service goroutines // read requests and then call handler to reply to them. -func (sctx *serveCtx) serve(s *etcdserver.EtcdServer, tlscfg *tls.Config, handler http.Handler, errHandler func(error)) error { +func (sctx *serveCtx) serve(s *etcdserver.EtcdServer, tlsinfo *transport.TLSInfo, handler http.Handler, errHandler func(error)) error { logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0) <-s.ReadyNotify() plog.Info("ready to serve client requests") @@ -74,8 +76,6 @@ func (sctx *serveCtx) serve(s *etcdserver.EtcdServer, tlscfg *tls.Config, handle servElection := v3election.NewElectionServer(v3c) servLock := v3lock.NewLockServer(v3c) - defer close(sctx.grpcServerC) - if sctx.insecure { gs := v3rpc.Server(s, nil) sctx.grpcServerC <- gs @@ -107,6 +107,10 @@ func (sctx *serveCtx) serve(s *etcdserver.EtcdServer, tlscfg *tls.Config, handle } if sctx.secure { + tlscfg, tlsErr := tlsinfo.ServerConfig() + if tlsErr != nil { + return tlsErr + } gs := v3rpc.Server(s, tlscfg) sctx.grpcServerC <- gs v3electionpb.RegisterElectionServer(gs, servElection) @@ -126,7 +130,10 @@ func (sctx *serveCtx) serve(s *etcdserver.EtcdServer, tlscfg *tls.Config, handle return err } - tlsl := tls.NewListener(m.Match(cmux.Any()), tlscfg) + tlsl, lerr := transport.NewTLSListener(m.Match(cmux.Any()), tlsinfo) + if lerr != nil { + return lerr + } // TODO: add debug flag; enable logging when debug flag is set httpmux := sctx.createMux(gwmux, handler) @@ -140,6 +147,7 @@ func (sctx *serveCtx) serve(s *etcdserver.EtcdServer, tlscfg *tls.Config, handle plog.Infof("serving client requests on %s", sctx.l.Addr().String()) } + close(sctx.grpcServerC) return m.Serve() } @@ -160,39 +168,38 @@ func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Ha }) } -func servePeerHTTP(l net.Listener, handler http.Handler) error { - logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0) - // TODO: add debug flag; enable logging when debug flag is set - srv := &http.Server{ - Handler: handler, - ReadTimeout: 5 * time.Minute, - ErrorLog: logger, // do not log user error - } - return srv.Serve(l) -} - -type registerHandlerFunc func(context.Context, *gw.ServeMux, string, []grpc.DialOption) error +type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, error) { ctx := sctx.ctx - addr := sctx.l.Addr().String() + conn, err := grpc.DialContext(ctx, sctx.addr, opts...) + if err != nil { + return nil, err + } gwmux := gw.NewServeMux() handlers := []registerHandlerFunc{ - pb.RegisterKVHandlerFromEndpoint, - pb.RegisterWatchHandlerFromEndpoint, - pb.RegisterLeaseHandlerFromEndpoint, - pb.RegisterClusterHandlerFromEndpoint, - pb.RegisterMaintenanceHandlerFromEndpoint, - pb.RegisterAuthHandlerFromEndpoint, - v3lockpb.RegisterLockHandlerFromEndpoint, - v3electionpb.RegisterElectionHandlerFromEndpoint, + etcdservergw.RegisterKVHandler, + etcdservergw.RegisterWatchHandler, + etcdservergw.RegisterLeaseHandler, + etcdservergw.RegisterClusterHandler, + etcdservergw.RegisterMaintenanceHandler, + etcdservergw.RegisterAuthHandler, + v3lockgw.RegisterLockHandler, + v3electiongw.RegisterElectionHandler, } for _, h := range handlers { - if err := h(ctx, gwmux, addr, opts); err != nil { + if err := h(ctx, gwmux, conn); err != nil { return nil, err } } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + plog.Warningf("failed to close conn to %s: %v", sctx.l.Addr().String(), cerr) + } + }() + return gwmux, nil } diff --git a/github.com/coreos/etcd/wal/wal_windows.go b/github.com/coreos/etcd/embed/serve_test.go similarity index 50% rename from github.com/coreos/etcd/wal/wal_windows.go rename to github.com/coreos/etcd/embed/serve_test.go index 0b9e434cf5..d46631fcfd 100644 --- a/github.com/coreos/etcd/wal/wal_windows.go +++ b/github.com/coreos/etcd/embed/serve_test.go @@ -1,4 +1,4 @@ -// Copyright 2016 The etcd Authors +// Copyright 2017 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,30 +12,27 @@ // See the License for the specific language governing permissions and // limitations under the License. -package wal +package embed import ( + "io/ioutil" "os" + "testing" - "github.com/coreos/etcd/wal/walpb" + "github.com/coreos/etcd/auth" ) -func (w *WAL) renameWal(tmpdirpath string) (*WAL, error) { - // rename of directory with locked files doesn't work on - // windows; close the WAL to release the locks so the directory - // can be renamed - w.Close() - if err := os.Rename(tmpdirpath, w.dir); err != nil { - return nil, err +// TestStartEtcdWrongToken ensures that StartEtcd with wrong configs returns with error. +func TestStartEtcdWrongToken(t *testing.T) { + tdir, err := ioutil.TempDir(os.TempDir(), "token-test") + if err != nil { + t.Fatal(err) } - // reopen and relock - newWAL, oerr := Open(w.dir, walpb.Snapshot{}) - if oerr != nil { - return nil, oerr + defer os.RemoveAll(tdir) + cfg := NewConfig() + cfg.Dir = tdir + cfg.AuthToken = "wrong-token" + if _, err = StartEtcd(cfg); err != auth.ErrInvalidAuthOpts { + t.Fatalf("expected %v, got %v", auth.ErrInvalidAuthOpts, err) } - if _, _, _, err := newWAL.ReadAll(); err != nil { - newWAL.Close() - return nil, err - } - return newWAL, nil } diff --git a/github.com/coreos/etcd/error/error.go b/github.com/coreos/etcd/error/error.go index 8cf83cc716..b541a628b8 100644 --- a/github.com/coreos/etcd/error/error.go +++ b/github.com/coreos/etcd/error/error.go @@ -154,9 +154,10 @@ func (e Error) StatusCode() int { return status } -func (e Error) WriteTo(w http.ResponseWriter) { +func (e Error) WriteTo(w http.ResponseWriter) error { w.Header().Add("X-Etcd-Index", fmt.Sprint(e.Index)) w.Header().Set("Content-Type", "application/json") w.WriteHeader(e.StatusCode()) - fmt.Fprintln(w, e.toJsonString()) + _, err := w.Write([]byte(e.toJsonString() + "\n")) + return err } diff --git a/github.com/coreos/etcd/etcdctl/README.md b/github.com/coreos/etcd/etcdctl/README.md index 508c9198ba..948617d1a5 100644 --- a/github.com/coreos/etcd/etcdctl/README.md +++ b/github.com/coreos/etcd/etcdctl/README.md @@ -565,6 +565,10 @@ Prints a humanized table of the member IDs, statuses, names, peer addresses, and ENDPOINT provides commands for querying individual endpoints. +#### Options + +- cluster -- fetch and use all endpoints from the etcd cluster member list + ### ENDPOINT HEALTH ENDPOINT HEALTH checks the health of the list of endpoints with respect to cluster. An endpoint is unhealthy @@ -576,11 +580,20 @@ If an endpoint can participate in consensus, prints a message indicating the end #### Example +Check the default endpoint's health: + ```bash ./etcdctl endpoint health -# 127.0.0.1:32379 is healthy: successfully committed proposal: took = 2.130877ms # 127.0.0.1:2379 is healthy: successfully committed proposal: took = 2.095242ms -# 127.0.0.1:22379 is healthy: successfully committed proposal: took = 2.083263ms +``` + +Check all endpoints for the cluster associated with the default endpoint: + +```bash +./etcdctl endpoint --cluster health +# http://127.0.0.1:2379 is healthy: successfully committed proposal: took = 1.060091ms +# http://127.0.0.1:22379 is healthy: successfully committed proposal: took = 903.138µs +# http://127.0.0.1:32379 is healthy: successfully committed proposal: took = 1.113848ms ``` ### ENDPOINT STATUS @@ -599,27 +612,31 @@ Prints a line of JSON encoding each endpoint URL, ID, version, database size, le #### Examples +Get the status for the default endpoint: + ```bash ./etcdctl endpoint status # 127.0.0.1:2379, 8211f1d0f64f3269, 3.0.0, 25 kB, false, 2, 63 -# 127.0.0.1:22379, 91bc3c398fb3c146, 3.0.0, 25 kB, false, 2, 63 -# 127.0.0.1:32379, fd422379fda50e48, 3.0.0, 25 kB, true, 2, 63 ``` +Get the status for the default endpoint as JSON: + ```bash ./etcdctl -w json endpoint status -# [{"Endpoint":"127.0.0.1:2379","Status":{"header":{"cluster_id":17237436991929493444,"member_id":9372538179322589801,"revision":2,"raft_term":2},"version":"3.0.0","dbSize":24576,"leader":18249187646912138824,"raftIndex":32623,"raftTerm":2}},{"Endpoint":"127.0.0.1:22379","Status":{"header":{"cluster_id":17237436991929493444,"member_id":10501334649042878790,"revision":2,"raft_term":2},"version":"3.0.0","dbSize":24576,"leader":18249187646912138824,"raftIndex":32623,"raftTerm":2}},{"Endpoint":"127.0.0.1:32379","Status":{"header":{"cluster_id":17237436991929493444,"member_id":18249187646912138824,"revision":2,"raft_term":2},"version":"3.0.0","dbSize":24576,"leader":18249187646912138824,"raftIndex":32623,"raftTerm":2}}] +# [{"Endpoint":"127.0.0.1:2379","Status":{"header":{"cluster_id":17237436991929493444,"member_id":9372538179322589801,"revision":2,"raft_term":2},"version":"3.0.0","dbSize":24576,"leader":18249187646912138824,"raftIndex":32623,"raftTerm":2}}] ``` +Get the status for all endpoints in the cluster associated with the default endpoint: + ```bash -./etcdctl -w table endpoint status -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX | -+-----------------+------------------+---------+---------+-----------+-----------+------------+ -| 127.0.0.1:2379 | 8211f1d0f64f3269 | 3.0.0 | 25 kB | false | 2 | 52 | -| 127.0.0.1:22379 | 91bc3c398fb3c146 | 3.0.0 | 25 kB | false | 2 | 52 | -| 127.0.0.1:32379 | fd422379fda50e48 | 3.0.0 | 25 kB | true | 2 | 52 | -+-----------------+------------------+---------+---------+-----------+-----------+------------+ +./etcdctl -w table endpoint --cluster status ++------------------------+------------------+----------------+---------+-----------+-----------+------------+ +| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | RAFT TERM | RAFT INDEX | ++------------------------+------------------+----------------+---------+-----------+-----------+------------+ +| http://127.0.0.1:2379 | 8211f1d0f64f3269 | 3.2.0-rc.1+git | 25 kB | false | 2 | 8 | +| http://127.0.0.1:22379 | 91bc3c398fb3c146 | 3.2.0-rc.1+git | 25 kB | false | 2 | 8 | +| http://127.0.0.1:32379 | fd422379fda50e48 | 3.2.0-rc.1+git | 25 kB | true | 2 | 8 | ++------------------------+------------------+----------------+---------+-----------+-----------+------------+ ``` ### ALARM \ @@ -788,9 +805,32 @@ Prints a line of JSON encoding the database hash, revision, total keys, and size +----------+----------+------------+------------+ ``` +### MOVE-LEADER \ + +MOVE-LEADER transfers leadership from the leader to another member in the cluster. + +#### Example + +```bash +# to choose transferee +transferee_id=$(./etcdctl \ + --endpoints localhost:12379,localhost:22379,localhost:32379 \ + endpoint status | grep -m 1 "false" | awk -F', ' '{print $2}') +echo ${transferee_id} +# c89feb932daef420 + +# endpoints should include leader node +./etcdctl --endpoints ${transferee_ep} move-leader ${transferee_id} +# Error: no leader endpoint given at [localhost:22379 localhost:32379] + +# request to leader with target node ID +./etcdctl --endpoints ${leader_ep} move-leader ${transferee_id} +# Leadership transferred from 45ddc0e800e20b93 to c89feb932daef420 +``` + ## Concurrency commands -### LOCK \ +### LOCK \ [command arg1 arg2 ...] LOCK acquires a distributed named mutex with a given name. Once the lock is acquired, it will be held until etcdctl is terminated. @@ -798,13 +838,24 @@ LOCK acquires a distributed named mutex with a given name. Once the lock is acqu Once the lock is acquired, the result for the GET on the unique lock holder key is displayed. +If a command is given, it will be launched with environment variables `ETCD_LOCK_KEY` and `ETCD_LOCK_REV` set to the lock's holder key and revision. + #### Example +Acquire lock with standard output display: + ```bash ./etcdctl lock mylock # mylock/1234534535445 ``` +Acquire lock and execute `echo lock acquired`: + +```bash +./etcdctl lock mylock echo lock acquired +# lock acquired +``` + #### Remarks LOCK returns a zero exit code only if it is terminated by a signal and releases the lock. @@ -961,25 +1012,42 @@ RPC: RoleGrantPermission #### Options +- from-key -- grant a permission of keys that are greater than or equal to the given key using byte compare + - prefix -- grant a prefix permission -#### Ouptut +#### Output -`Role updated`. +`Role updated`. #### Examples +Grant read and write permission on the key `foo` to role `myrole`: + ```bash ./etcdctl --user=root:123 role grant-permission myrole readwrite foo # Role myrole updated ``` +Grant read permission on the wildcard key pattern `foo/*` to role `myrole`: + +```bash +./etcdctl --user=root:123 role grant-permission --prefix myrole readwrite foo/ +# Role myrole updated +``` + ### ROLE REVOKE-PERMISSION \ \ \ [endkey] `role revoke-permission` revokes a key from a role. RPC: RoleRevokePermission +#### Options + +- from-key -- revoke a permission of keys that are greater than or equal to the given key using byte compare + +- prefix -- revoke a prefix permission + #### Output `Permission of key is revoked from role ` for single key. `Permission of range [, ) is revoked from role ` for a key range. Exit code is zero. diff --git a/github.com/coreos/etcd/etcdctl/READMEv2.md b/github.com/coreos/etcd/etcdctl/READMEv2.md index 608c1963f9..5ee2329e3e 100644 --- a/github.com/coreos/etcd/etcdctl/READMEv2.md +++ b/github.com/coreos/etcd/etcdctl/READMEv2.md @@ -331,6 +331,6 @@ etcdctl is under the Apache 2.0 license. See the [LICENSE][license] file for det [authentication]: ../Documentation/v2/authentication.md [etcd]: https://github.com/coreos/etcd [github-release]: https://github.com/coreos/etcd/releases/ -[license]: https://github.com/coreos/etcdctl/blob/master/LICENSE +[license]: ../LICENSE [semver]: http://semver.org/ [username-flag]: #--username--u diff --git a/github.com/coreos/etcd/etcdctl/ctlv2/command/cluster_health.go b/github.com/coreos/etcd/etcdctl/ctlv2/command/cluster_health.go index 95101785dc..d1429649ae 100644 --- a/github.com/coreos/etcd/etcdctl/ctlv2/command/cluster_health.go +++ b/github.com/coreos/etcd/etcdctl/ctlv2/command/cluster_health.go @@ -70,7 +70,7 @@ func handleClusterHealth(c *cli.Context) error { } for { - health := false + healthyMembers := 0 for _, m := range ms { if len(m.ClientURLs) == 0 { fmt.Printf("member %s is unreachable: no available published client urls\n", m.ID) @@ -105,8 +105,8 @@ func handleClusterHealth(c *cli.Context) error { checked = true if result.Health == "true" || nresult.Health { - health = true fmt.Printf("member %s is healthy: got healthy result from %s\n", m.ID, url) + healthyMembers++ } else { fmt.Printf("member %s is unhealthy: got unhealthy result from %s\n", m.ID, url) } @@ -116,19 +116,20 @@ func handleClusterHealth(c *cli.Context) error { fmt.Printf("member %s is unreachable: %v are all unreachable\n", m.ID, m.ClientURLs) } } - if health { + switch healthyMembers { + case len(ms): fmt.Println("cluster is healthy") - } else { - fmt.Println("cluster is unhealthy") + case 0: + fmt.Println("cluster is unavailable") + default: + fmt.Println("cluster is degraded") } if !forever { - if health { + if healthyMembers == len(ms) { os.Exit(ExitSuccess) - return nil } os.Exit(ExitClusterNotHealthy) - return nil } fmt.Printf("\nnext check after 10 second...\n\n") diff --git a/github.com/coreos/etcd/etcdctl/ctlv2/ctl_cov.go b/github.com/coreos/etcd/etcdctl/ctlv2/ctl_cov.go index f76125dce7..e9f22f25d8 100644 --- a/github.com/coreos/etcd/etcdctl/ctlv2/ctl_cov.go +++ b/github.com/coreos/etcd/etcdctl/ctlv2/ctl_cov.go @@ -24,5 +24,5 @@ import ( ) func runCtlV2(app *cli.App) error { - return app.Run(strings.Split(os.Getenv("ETCDCTL_ARGS"), "\xff")) + return app.Run(strings.Split(os.Getenv("ETCDCTL_ARGS"), "\xe7\xcd")) } diff --git a/github.com/coreos/etcd/etcdctl/ctlv3/command/check.go b/github.com/coreos/etcd/etcdctl/ctlv3/command/check.go index 0a40f0b6f2..590234ee57 100644 --- a/github.com/coreos/etcd/etcdctl/ctlv3/command/check.go +++ b/github.com/coreos/etcd/etcdctl/ctlv3/command/check.go @@ -150,8 +150,8 @@ func newCheckPerfCommand(cmd *cobra.Command, args []string) { } go func() { - cctx, _ := context.WithTimeout(context.Background(), time.Duration(cfg.duration)*time.Second) - + cctx, ccancel := context.WithTimeout(context.Background(), time.Duration(cfg.duration)*time.Second) + defer ccancel() for limit.Wait(cctx) == nil { binary.PutVarint(k, int64(rand.Int63n(math.MaxInt64))) requests <- v3.OpPut(checkPerfPrefix+string(k), v) diff --git a/github.com/coreos/etcd/etcdctl/ctlv3/command/compaction_command.go b/github.com/coreos/etcd/etcdctl/ctlv3/command/compaction_command.go index 1b791b9d16..59e8990fb7 100644 --- a/github.com/coreos/etcd/etcdctl/ctlv3/command/compaction_command.go +++ b/github.com/coreos/etcd/etcdctl/ctlv3/command/compaction_command.go @@ -57,7 +57,6 @@ func compactionCommandFunc(cmd *cobra.Command, args []string) { cancel() if cerr != nil { ExitWithError(ExitError, cerr) - return } fmt.Println("compacted revision", rev) } diff --git a/github.com/coreos/etcd/etcdctl/ctlv3/command/ep_command.go b/github.com/coreos/etcd/etcdctl/ctlv3/command/ep_command.go index dab6d20dfd..329875cd79 100644 --- a/github.com/coreos/etcd/etcdctl/ctlv3/command/ep_command.go +++ b/github.com/coreos/etcd/etcdctl/ctlv3/command/ep_command.go @@ -27,6 +27,8 @@ import ( "github.com/spf13/cobra" ) +var epClusterEndpoints bool + // NewEndpointCommand returns the cobra command for "endpoint". func NewEndpointCommand() *cobra.Command { ec := &cobra.Command{ @@ -34,6 +36,7 @@ func NewEndpointCommand() *cobra.Command { Short: "Endpoint related commands", } + ec.PersistentFlags().BoolVar(&epClusterEndpoints, "cluster", false, "use all endpoints from the cluster member list") ec.AddCommand(newEpHealthCommand()) ec.AddCommand(newEpStatusCommand()) @@ -64,16 +67,12 @@ The items in the lists are endpoint, ID, version, db size, is leader, raft term, // epHealthCommandFunc executes the "endpoint-health" command. func epHealthCommandFunc(cmd *cobra.Command, args []string) { flags.SetPflagsFromEnv("ETCDCTL", cmd.InheritedFlags()) - endpoints, err := cmd.Flags().GetStringSlice("endpoints") - if err != nil { - ExitWithError(ExitError, err) - } sec := secureCfgFromCmd(cmd) dt := dialTimeoutFromCmd(cmd) auth := authCfgFromCmd(cmd) cfgs := []*v3.Config{} - for _, ep := range endpoints { + for _, ep := range endpointsFromCluster(cmd) { cfg, err := newClientCfg([]string{ep}, dt, sec, auth) if err != nil { ExitWithError(ExitBadArgs, err) @@ -121,7 +120,7 @@ func epStatusCommandFunc(cmd *cobra.Command, args []string) { statusList := []epStatus{} var err error - for _, ep := range c.Endpoints() { + for _, ep := range endpointsFromCluster(cmd) { ctx, cancel := commandCtx(cmd) resp, serr := c.Status(ctx, ep) cancel() @@ -139,3 +138,30 @@ func epStatusCommandFunc(cmd *cobra.Command, args []string) { os.Exit(ExitError) } } + +func endpointsFromCluster(cmd *cobra.Command) []string { + if !epClusterEndpoints { + endpoints, err := cmd.Flags().GetStringSlice("endpoints") + if err != nil { + ExitWithError(ExitError, err) + } + return endpoints + } + c := mustClientFromCmd(cmd) + ctx, cancel := commandCtx(cmd) + defer func() { + c.Close() + cancel() + }() + membs, err := c.MemberList(ctx) + if err != nil { + err = fmt.Errorf("failed to fetch endpoints from etcd cluster member list: %v", err) + ExitWithError(ExitError, err) + } + + ret := []string{} + for _, m := range membs.Members { + ret = append(ret, m.ClientURLs...) + } + return ret +} diff --git a/github.com/coreos/etcd/etcdctl/ctlv3/command/error.go b/github.com/coreos/etcd/etcdctl/ctlv3/command/error.go index 3188cd5e46..314b07f052 100644 --- a/github.com/coreos/etcd/etcdctl/ctlv3/command/error.go +++ b/github.com/coreos/etcd/etcdctl/ctlv3/command/error.go @@ -34,7 +34,7 @@ const ( ) func ExitWithError(code int, err error) { - fmt.Fprintln(os.Stderr, "Error: ", err) + fmt.Fprintln(os.Stderr, "Error:", err) if cerr, ok := err.(*client.ClusterError); ok { fmt.Fprintln(os.Stderr, cerr.Detail()) } diff --git a/github.com/coreos/etcd/etcdctl/ctlv3/command/lease_command.go b/github.com/coreos/etcd/etcdctl/ctlv3/command/lease_command.go index d0649775f0..0afb3d69c7 100644 --- a/github.com/coreos/etcd/etcdctl/ctlv3/command/lease_command.go +++ b/github.com/coreos/etcd/etcdctl/ctlv3/command/lease_command.go @@ -67,7 +67,7 @@ func leaseGrantCommandFunc(cmd *cobra.Command, args []string) { if err != nil { ExitWithError(ExitError, fmt.Errorf("failed to grant lease (%v)\n", err)) } - fmt.Printf("lease %016x granted with TTL(%ds)\n", resp.ID, resp.TTL) + display.Grant(*resp) } // NewLeaseRevokeCommand returns the cobra command for "lease revoke". @@ -90,12 +90,12 @@ func leaseRevokeCommandFunc(cmd *cobra.Command, args []string) { id := leaseFromArgs(args[0]) ctx, cancel := commandCtx(cmd) - _, err := mustClientFromCmd(cmd).Revoke(ctx, id) + resp, err := mustClientFromCmd(cmd).Revoke(ctx, id) cancel() if err != nil { ExitWithError(ExitError, fmt.Errorf("failed to revoke lease (%v)\n", err)) } - fmt.Printf("lease %016x revoked\n", id) + display.Revoke(id, *resp) } var timeToLiveKeys bool @@ -154,9 +154,12 @@ func leaseKeepAliveCommandFunc(cmd *cobra.Command, args []string) { } for resp := range respc { - fmt.Printf("lease %016x keepalived with TTL(%d)\n", resp.ID, resp.TTL) + display.KeepAlive(*resp) + } + + if _, ok := (display).(*simplePrinter); ok { + fmt.Printf("lease %016x expired or revoked.\n", id) } - fmt.Printf("lease %016x expired or revoked.\n", id) } func leaseFromArgs(arg string) v3.LeaseID { diff --git a/github.com/coreos/etcd/etcdctl/ctlv3/command/lock_command.go b/github.com/coreos/etcd/etcdctl/ctlv3/command/lock_command.go index 2e55c49df8..e130493f81 100644 --- a/github.com/coreos/etcd/etcdctl/ctlv3/command/lock_command.go +++ b/github.com/coreos/etcd/etcdctl/ctlv3/command/lock_command.go @@ -16,7 +16,9 @@ package command import ( "errors" + "fmt" "os" + "os/exec" "os/signal" "syscall" @@ -29,7 +31,7 @@ import ( // NewLockCommand returns the cobra command for "lock". func NewLockCommand() *cobra.Command { c := &cobra.Command{ - Use: "lock ", + Use: "lock [exec-command arg1 arg2 ...]", Short: "Acquires a named lock", Run: lockCommandFunc, } @@ -37,16 +39,16 @@ func NewLockCommand() *cobra.Command { } func lockCommandFunc(cmd *cobra.Command, args []string) { - if len(args) != 1 { - ExitWithError(ExitBadArgs, errors.New("lock takes one lock name argument.")) + if len(args) == 0 { + ExitWithError(ExitBadArgs, errors.New("lock takes a lock name argument and an optional command to execute.")) } c := mustClientFromCmd(cmd) - if err := lockUntilSignal(c, args[0]); err != nil { + if err := lockUntilSignal(c, args[0], args[1:]); err != nil { ExitWithError(ExitError, err) } } -func lockUntilSignal(c *clientv3.Client, lockname string) error { +func lockUntilSignal(c *clientv3.Client, lockname string, cmdArgs []string) error { s, err := concurrency.NewSession(c) if err != nil { return err @@ -69,6 +71,18 @@ func lockUntilSignal(c *clientv3.Client, lockname string) error { return err } + if len(cmdArgs) > 0 { + cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...) + cmd.Env = append(environLockResponse(m), os.Environ()...) + cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr + err := cmd.Run() + unlockErr := m.Unlock(context.TODO()) + if err != nil { + return err + } + return unlockErr + } + k, kerr := c.Get(ctx, m.Key()) if kerr != nil { return kerr @@ -76,7 +90,6 @@ func lockUntilSignal(c *clientv3.Client, lockname string) error { if len(k.Kvs) == 0 { return errors.New("lock lost on init") } - display.Get(*k) select { @@ -87,3 +100,10 @@ func lockUntilSignal(c *clientv3.Client, lockname string) error { return errors.New("session expired") } + +func environLockResponse(m *concurrency.Mutex) []string { + return []string{ + "ETCD_LOCK_KEY=" + m.Key(), + fmt.Sprintf("ETCD_LOCK_REV=%d", m.Header().Revision), + } +} diff --git a/github.com/coreos/etcd/etcdctl/ctlv3/command/move_leader_command.go b/github.com/coreos/etcd/etcdctl/ctlv3/command/move_leader_command.go new file mode 100644 index 0000000000..3bd44255ef --- /dev/null +++ b/github.com/coreos/etcd/etcdctl/ctlv3/command/move_leader_command.go @@ -0,0 +1,87 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package command + +import ( + "fmt" + "strconv" + "time" + + "github.com/coreos/etcd/clientv3" + "github.com/spf13/cobra" +) + +// NewMoveLeaderCommand returns the cobra command for "move-leader". +func NewMoveLeaderCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "move-leader ", + Short: "Transfers leadership to another etcd cluster member.", + Run: transferLeadershipCommandFunc, + } + return cmd +} + +// transferLeadershipCommandFunc executes the "compaction" command. +func transferLeadershipCommandFunc(cmd *cobra.Command, args []string) { + if len(args) != 1 { + ExitWithError(ExitBadArgs, fmt.Errorf("move-leader command needs 1 argument")) + } + target, err := strconv.ParseUint(args[0], 16, 64) + if err != nil { + ExitWithError(ExitBadArgs, err) + } + + c := mustClientFromCmd(cmd) + eps := c.Endpoints() + c.Close() + + ctx, cancel := commandCtx(cmd) + + // find current leader + var leaderCli *clientv3.Client + var leaderID uint64 + for _, ep := range eps { + cli, err := clientv3.New(clientv3.Config{ + Endpoints: []string{ep}, + DialTimeout: 3 * time.Second, + }) + if err != nil { + ExitWithError(ExitError, err) + } + resp, err := cli.Status(ctx, ep) + if err != nil { + ExitWithError(ExitError, err) + } + + if resp.Header.GetMemberId() == resp.Leader { + leaderCli = cli + leaderID = resp.Leader + break + } + cli.Close() + } + if leaderCli == nil { + ExitWithError(ExitBadArgs, fmt.Errorf("no leader endpoint given at %v", eps)) + } + + var resp *clientv3.MoveLeaderResponse + resp, err = leaderCli.MoveLeader(ctx, target) + cancel() + if err != nil { + ExitWithError(ExitError, err) + } + + display.MoveLeader(leaderID, target, *resp) +} diff --git a/github.com/coreos/etcd/etcdctl/ctlv3/command/printer.go b/github.com/coreos/etcd/etcdctl/ctlv3/command/printer.go index 6df3bfe5c2..b84dcda74a 100644 --- a/github.com/coreos/etcd/etcdctl/ctlv3/command/printer.go +++ b/github.com/coreos/etcd/etcdctl/ctlv3/command/printer.go @@ -32,6 +32,9 @@ type printer interface { Txn(v3.TxnResponse) Watch(v3.WatchResponse) + Grant(r v3.LeaseGrantResponse) + Revoke(id v3.LeaseID, r v3.LeaseRevokeResponse) + KeepAlive(r v3.LeaseKeepAliveResponse) TimeToLive(r v3.LeaseTimeToLiveResponse, keys bool) MemberAdd(v3.MemberAddResponse) @@ -40,6 +43,7 @@ type printer interface { MemberList(v3.MemberListResponse) EndpointStatus([]epStatus) + MoveLeader(leader, target uint64, r v3.MoveLeaderResponse) Alarm(v3.AlarmResponse) DBStatus(dbstatus) @@ -81,13 +85,18 @@ type printerRPC struct { p func(interface{}) } -func (p *printerRPC) Del(r v3.DeleteResponse) { p.p((*pb.DeleteRangeResponse)(&r)) } -func (p *printerRPC) Get(r v3.GetResponse) { p.p((*pb.RangeResponse)(&r)) } -func (p *printerRPC) Put(r v3.PutResponse) { p.p((*pb.PutResponse)(&r)) } -func (p *printerRPC) Txn(r v3.TxnResponse) { p.p((*pb.TxnResponse)(&r)) } -func (p *printerRPC) Watch(r v3.WatchResponse) { p.p(&r) } +func (p *printerRPC) Del(r v3.DeleteResponse) { p.p((*pb.DeleteRangeResponse)(&r)) } +func (p *printerRPC) Get(r v3.GetResponse) { p.p((*pb.RangeResponse)(&r)) } +func (p *printerRPC) Put(r v3.PutResponse) { p.p((*pb.PutResponse)(&r)) } +func (p *printerRPC) Txn(r v3.TxnResponse) { p.p((*pb.TxnResponse)(&r)) } +func (p *printerRPC) Watch(r v3.WatchResponse) { p.p(&r) } + +func (p *printerRPC) Grant(r v3.LeaseGrantResponse) { p.p(r) } +func (p *printerRPC) Revoke(id v3.LeaseID, r v3.LeaseRevokeResponse) { p.p(r) } +func (p *printerRPC) KeepAlive(r v3.LeaseKeepAliveResponse) { p.p(r) } func (p *printerRPC) TimeToLive(r v3.LeaseTimeToLiveResponse, keys bool) { p.p(&r) } -func (p *printerRPC) MemberAdd(r v3.MemberAddResponse) { p.p((*pb.MemberAddResponse)(&r)) } + +func (p *printerRPC) MemberAdd(r v3.MemberAddResponse) { p.p((*pb.MemberAddResponse)(&r)) } func (p *printerRPC) MemberRemove(id uint64, r v3.MemberRemoveResponse) { p.p((*pb.MemberRemoveResponse)(&r)) } @@ -96,7 +105,9 @@ func (p *printerRPC) MemberUpdate(id uint64, r v3.MemberUpdateResponse) { } func (p *printerRPC) MemberList(r v3.MemberListResponse) { p.p((*pb.MemberListResponse)(&r)) } func (p *printerRPC) Alarm(r v3.AlarmResponse) { p.p((*pb.AlarmResponse)(&r)) } - +func (p *printerRPC) MoveLeader(leader, target uint64, r v3.MoveLeaderResponse) { + p.p((*pb.MoveLeaderResponse)(&r)) +} func (p *printerRPC) RoleAdd(_ string, r v3.AuthRoleAddResponse) { p.p((*pb.AuthRoleAddResponse)(&r)) } func (p *printerRPC) RoleGet(_ string, r v3.AuthRoleGetResponse) { p.p((*pb.AuthRoleGetResponse)(&r)) } func (p *printerRPC) RoleDelete(_ string, r v3.AuthRoleDeleteResponse) { @@ -137,6 +148,8 @@ func newPrinterUnsupported(n string) printer { func (p *printerUnsupported) EndpointStatus([]epStatus) { p.p(nil) } func (p *printerUnsupported) DBStatus(dbstatus) { p.p(nil) } +func (p *printerUnsupported) MoveLeader(leader, target uint64, r v3.MoveLeaderResponse) { p.p(nil) } + func makeMemberListTable(r v3.MemberListResponse) (hdr []string, rows [][]string) { hdr = []string{"ID", "Status", "Name", "Peer Addrs", "Client Addrs"} for _, m := range r.Members { @@ -159,10 +172,10 @@ func makeEndpointStatusTable(statusList []epStatus) (hdr []string, rows [][]stri hdr = []string{"endpoint", "ID", "version", "db size", "is leader", "raft term", "raft index"} for _, status := range statusList { rows = append(rows, []string{ - fmt.Sprint(status.Ep), + status.Ep, fmt.Sprintf("%x", status.Resp.Header.MemberId), - fmt.Sprint(status.Resp.Version), - fmt.Sprint(humanize.Bytes(uint64(status.Resp.DbSize))), + status.Resp.Version, + humanize.Bytes(uint64(status.Resp.DbSize)), fmt.Sprint(status.Resp.Leader == status.Resp.Header.MemberId), fmt.Sprint(status.Resp.RaftTerm), fmt.Sprint(status.Resp.RaftIndex), diff --git a/github.com/coreos/etcd/etcdctl/ctlv3/command/printer_fields.go b/github.com/coreos/etcd/etcdctl/ctlv3/command/printer_fields.go index 9558b9322a..2f19a5cf08 100644 --- a/github.com/coreos/etcd/etcdctl/ctlv3/command/printer_fields.go +++ b/github.com/coreos/etcd/etcdctl/ctlv3/command/printer_fields.go @@ -30,7 +30,7 @@ func (p *fieldsPrinter) kv(pfx string, kv *spb.KeyValue) { fmt.Printf("\"%sModRevision\" : %d\n", pfx, kv.ModRevision) fmt.Printf("\"%sVersion\" : %d\n", pfx, kv.Version) fmt.Printf("\"%sValue\" : %q\n", pfx, string(kv.Value)) - fmt.Printf("\"%sLease\" : %d\n", pfx, string(kv.Lease)) + fmt.Printf("\"%sLease\" : %d\n", pfx, kv.Lease) } func (p *fieldsPrinter) hdr(h *pb.ResponseHeader) { @@ -92,6 +92,22 @@ func (p *fieldsPrinter) Watch(resp v3.WatchResponse) { } } +func (p *fieldsPrinter) Grant(r v3.LeaseGrantResponse) { + p.hdr(r.ResponseHeader) + fmt.Println(`"ID" :`, r.ID) + fmt.Println(`"TTL" :`, r.TTL) +} + +func (p *fieldsPrinter) Revoke(id v3.LeaseID, r v3.LeaseRevokeResponse) { + p.hdr(r.Header) +} + +func (p *fieldsPrinter) KeepAlive(r v3.LeaseKeepAliveResponse) { + p.hdr(r.ResponseHeader) + fmt.Println(`"ID" :`, r.ID) + fmt.Println(`"TTL" :`, r.TTL) +} + func (p *fieldsPrinter) TimeToLive(r v3.LeaseTimeToLiveResponse, keys bool) { p.hdr(r.ResponseHeader) fmt.Println(`"ID" :`, r.ID) diff --git a/github.com/coreos/etcd/etcdctl/ctlv3/command/printer_simple.go b/github.com/coreos/etcd/etcdctl/ctlv3/command/printer_simple.go index 96e1032e6e..00dda47f77 100644 --- a/github.com/coreos/etcd/etcdctl/ctlv3/command/printer_simple.go +++ b/github.com/coreos/etcd/etcdctl/ctlv3/command/printer_simple.go @@ -20,6 +20,7 @@ import ( v3 "github.com/coreos/etcd/clientv3" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/pkg/types" ) type simplePrinter struct { @@ -79,6 +80,18 @@ func (s *simplePrinter) Watch(resp v3.WatchResponse) { } } +func (s *simplePrinter) Grant(resp v3.LeaseGrantResponse) { + fmt.Printf("lease %016x granted with TTL(%ds)\n", resp.ID, resp.TTL) +} + +func (p *simplePrinter) Revoke(id v3.LeaseID, r v3.LeaseRevokeResponse) { + fmt.Printf("lease %016x revoked\n", id) +} + +func (p *simplePrinter) KeepAlive(resp v3.LeaseKeepAliveResponse) { + fmt.Printf("lease %016x keepalived with TTL(%d)\n", resp.ID, resp.TTL) +} + func (s *simplePrinter) TimeToLive(resp v3.LeaseTimeToLiveResponse, keys bool) { txt := fmt.Sprintf("lease %016x granted with TTL(%ds), remaining(%ds)", resp.ID, resp.GrantedTTL, resp.TTL) if keys { @@ -130,6 +143,10 @@ func (s *simplePrinter) DBStatus(ds dbstatus) { } } +func (s *simplePrinter) MoveLeader(leader, target uint64, r v3.MoveLeaderResponse) { + fmt.Printf("Leadership transferred from %s to %s\n", types.ID(leader), types.ID(target)) +} + func (s *simplePrinter) RoleAdd(role string, r v3.AuthRoleAddResponse) { fmt.Printf("Role %s created\n", role) } diff --git a/github.com/coreos/etcd/etcdctl/ctlv3/command/role_command.go b/github.com/coreos/etcd/etcdctl/ctlv3/command/role_command.go index f75a8efceb..b467a56322 100644 --- a/github.com/coreos/etcd/etcdctl/ctlv3/command/role_command.go +++ b/github.com/coreos/etcd/etcdctl/ctlv3/command/role_command.go @@ -23,8 +23,8 @@ import ( ) var ( - grantPermissionPrefix bool - permFromKey bool + rolePermPrefix bool + rolePermFromKey bool ) // NewRoleCommand returns the cobra command for "role". @@ -83,8 +83,8 @@ func newRoleGrantPermissionCommand() *cobra.Command { Run: roleGrantPermissionCommandFunc, } - cmd.Flags().BoolVar(&grantPermissionPrefix, "prefix", false, "grant a prefix permission") - cmd.Flags().BoolVar(&permFromKey, "from-key", false, "grant a permission of keys that are greater than or equal to the given key using byte compare") + cmd.Flags().BoolVar(&rolePermPrefix, "prefix", false, "grant a prefix permission") + cmd.Flags().BoolVar(&rolePermFromKey, "from-key", false, "grant a permission of keys that are greater than or equal to the given key using byte compare") return cmd } @@ -96,7 +96,8 @@ func newRoleRevokePermissionCommand() *cobra.Command { Run: roleRevokePermissionCommandFunc, } - cmd.Flags().BoolVar(&permFromKey, "from-key", false, "grant a permission of keys that are greater than or equal to the given key using byte compare") + cmd.Flags().BoolVar(&rolePermPrefix, "prefix", false, "revoke a prefix permission") + cmd.Flags().BoolVar(&rolePermFromKey, "from-key", false, "revoke a permission of keys that are greater than or equal to the given key using byte compare") return cmd } @@ -169,27 +170,10 @@ func roleGrantPermissionCommandFunc(cmd *cobra.Command, args []string) { ExitWithError(ExitBadArgs, err) } - rangeEnd := "" - if 4 <= len(args) { - if grantPermissionPrefix { - ExitWithError(ExitBadArgs, fmt.Errorf("don't pass both of --prefix option and range end to grant permission command")) - } - - if permFromKey { - ExitWithError(ExitBadArgs, fmt.Errorf("don't pass both of --from-key option and range end to grant permission command")) - } - - rangeEnd = args[3] - } else if grantPermissionPrefix { - if permFromKey { - ExitWithError(ExitBadArgs, fmt.Errorf("don't pass both of --from-key option and --prefix option to grant permission command")) - } - - rangeEnd = clientv3.GetPrefixRangeEnd(args[2]) - } else if permFromKey { - rangeEnd = "\x00" + rangeEnd, rerr := rangeEndFromPermFlags(args[2:]) + if rerr != nil { + ExitWithError(ExitBadArgs, rerr) } - resp, err := mustClientFromCmd(cmd).Auth.RoleGrantPermission(context.TODO(), args[0], args[2], rangeEnd, perm) if err != nil { ExitWithError(ExitError, err) @@ -204,16 +188,36 @@ func roleRevokePermissionCommandFunc(cmd *cobra.Command, args []string) { ExitWithError(ExitBadArgs, fmt.Errorf("role revoke-permission command requires role name and key [endkey] as its argument.")) } - rangeEnd := "" - if 3 <= len(args) { - rangeEnd = args[2] - } else if permFromKey { - rangeEnd = "\x00" + rangeEnd, rerr := rangeEndFromPermFlags(args[1:]) + if rerr != nil { + ExitWithError(ExitBadArgs, rerr) } - resp, err := mustClientFromCmd(cmd).Auth.RoleRevokePermission(context.TODO(), args[0], args[1], rangeEnd) if err != nil { ExitWithError(ExitError, err) } display.RoleRevokePermission(args[0], args[1], rangeEnd, *resp) } + +func rangeEndFromPermFlags(args []string) (string, error) { + if len(args) == 1 { + if rolePermPrefix { + if rolePermFromKey { + return "", fmt.Errorf("--from-key and --prefix flags are mutually exclusive") + } + return clientv3.GetPrefixRangeEnd(args[0]), nil + } + if rolePermFromKey { + return "\x00", nil + } + // single key case + return "", nil + } + if rolePermPrefix { + return "", fmt.Errorf("unexpected endkey argument with --prefix flag") + } + if rolePermFromKey { + return "", fmt.Errorf("unexpected endkey argument with --from-key flag") + } + return args[1], nil +} diff --git a/github.com/coreos/etcd/etcdctl/ctlv3/command/snapshot_command.go b/github.com/coreos/etcd/etcdctl/ctlv3/command/snapshot_command.go index eb8630fd13..db12df50af 100644 --- a/github.com/coreos/etcd/etcdctl/ctlv3/command/snapshot_command.go +++ b/github.com/coreos/etcd/etcdctl/ctlv3/command/snapshot_command.go @@ -27,7 +27,7 @@ import ( "reflect" "strings" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/etcdserver/membership" diff --git a/github.com/coreos/etcd/etcdctl/ctlv3/command/txn_command.go b/github.com/coreos/etcd/etcdctl/ctlv3/command/txn_command.go index d5e75b05b4..aa8491be3e 100644 --- a/github.com/coreos/etcd/etcdctl/ctlv3/command/txn_command.go +++ b/github.com/coreos/etcd/etcdctl/ctlv3/command/txn_command.go @@ -52,9 +52,9 @@ func txnCommandFunc(cmd *cobra.Command, args []string) { txn := mustClientFromCmd(cmd).Txn(context.Background()) promptInteractive("compares:") txn.If(readCompares(reader)...) - promptInteractive("success requests (get, put, delete):") + promptInteractive("success requests (get, put, del):") txn.Then(readOps(reader)...) - promptInteractive("failure requests (get, put, delete):") + promptInteractive("failure requests (get, put, del):") txn.Else(readOps(reader)...) resp, err := txn.Commit() diff --git a/github.com/coreos/etcd/etcdctl/ctlv3/command/watch_command.go b/github.com/coreos/etcd/etcdctl/ctlv3/command/watch_command.go index c7cc4f7807..17b670e9d3 100644 --- a/github.com/coreos/etcd/etcdctl/ctlv3/command/watch_command.go +++ b/github.com/coreos/etcd/etcdctl/ctlv3/command/watch_command.go @@ -129,6 +129,9 @@ func getWatchChan(c *clientv3.Client, args []string) (clientv3.WatchChan, error) func printWatchCh(ch clientv3.WatchChan) { for resp := range ch { + if resp.Canceled { + fmt.Fprintf(os.Stderr, "watch was canceled (%v)\n", resp.Err()) + } display.Watch(resp) } } diff --git a/github.com/coreos/etcd/etcdctl/ctlv3/ctl.go b/github.com/coreos/etcd/etcdctl/ctlv3/ctl.go index 92e715d97d..2ca2b40962 100644 --- a/github.com/coreos/etcd/etcdctl/ctlv3/ctl.go +++ b/github.com/coreos/etcd/etcdctl/ctlv3/ctl.go @@ -69,6 +69,7 @@ func init() { command.NewAlarmCommand(), command.NewDefragCommand(), command.NewEndpointCommand(), + command.NewMoveLeaderCommand(), command.NewWatchCommand(), command.NewVersionCommand(), command.NewLeaseCommand(), diff --git a/github.com/coreos/etcd/etcdctl/ctlv3/ctl_cov.go b/github.com/coreos/etcd/etcdctl/ctlv3/ctl_cov.go index 79cf93cdcc..1ce1f524ae 100644 --- a/github.com/coreos/etcd/etcdctl/ctlv3/ctl_cov.go +++ b/github.com/coreos/etcd/etcdctl/ctlv3/ctl_cov.go @@ -26,7 +26,7 @@ import ( func Start() { // ETCDCTL_ARGS=etcdctl_test arg1 arg2... // SetArgs() takes arg1 arg2... - rootCmd.SetArgs(strings.Split(os.Getenv("ETCDCTL_ARGS"), "\xff")[1:]) + rootCmd.SetArgs(strings.Split(os.Getenv("ETCDCTL_ARGS"), "\xe7\xcd")[1:]) if err := rootCmd.Execute(); err != nil { command.ExitWithError(command.ExitError, err) } diff --git a/github.com/coreos/etcd/etcdmain/config.go b/github.com/coreos/etcd/etcdmain/config.go index b8732200ae..4bc900bc1e 100644 --- a/github.com/coreos/etcd/etcdmain/config.go +++ b/github.com/coreos/etcd/etcdmain/config.go @@ -20,12 +20,14 @@ import ( "flag" "fmt" "io/ioutil" + "net/url" "os" "runtime" "strings" "github.com/coreos/etcd/embed" "github.com/coreos/etcd/pkg/flags" + "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/version" "github.com/ghodss/yaml" ) @@ -131,6 +133,7 @@ func newConfig() *config { fs.StringVar(&cfg.WalDir, "wal-dir", cfg.WalDir, "Path to the dedicated wal directory.") fs.Var(flags.NewURLsValue(embed.DefaultListenPeerURLs), "listen-peer-urls", "List of URLs to listen on for peer traffic.") fs.Var(flags.NewURLsValue(embed.DefaultListenClientURLs), "listen-client-urls", "List of URLs to listen on for client traffic.") + fs.StringVar(&cfg.ListenMetricsUrlsJSON, "listen-metrics-urls", "", "List of URLs to listen on for metrics.") fs.UintVar(&cfg.MaxSnapFiles, "max-snapshots", cfg.MaxSnapFiles, "Maximum number of snapshot files to retain (0 is unlimited).") fs.UintVar(&cfg.MaxWalFiles, "max-wals", cfg.MaxWalFiles, "Maximum number of wal files to retain (0 is unlimited).") fs.StringVar(&cfg.Name, "name", cfg.Name, "Human-readable name for this member.") @@ -138,6 +141,8 @@ func newConfig() *config { fs.UintVar(&cfg.TickMs, "heartbeat-interval", cfg.TickMs, "Time (in milliseconds) of a heartbeat interval.") fs.UintVar(&cfg.ElectionMs, "election-timeout", cfg.ElectionMs, "Time (in milliseconds) for an election to timeout.") fs.Int64Var(&cfg.QuotaBackendBytes, "quota-backend-bytes", cfg.QuotaBackendBytes, "Raise alarms when backend size exceeds the given quota. 0 means use the default quota.") + fs.UintVar(&cfg.MaxTxnOps, "max-txn-ops", cfg.MaxTxnOps, "Maximum number of operations permitted in a transaction.") + fs.UintVar(&cfg.MaxRequestBytes, "max-request-bytes", cfg.MaxRequestBytes, "Maximum client request size in bytes the server will accept.") // clustering fs.Var(flags.NewURLsValue(embed.DefaultInitialAdvertisePeerURLs), "initial-advertise-peer-urls", "List of this member's peer URLs to advertise to the rest of the cluster.") @@ -177,6 +182,7 @@ func newConfig() *config { fs.StringVar(&cfg.ClientTLSInfo.CertFile, "cert-file", "", "Path to the client server TLS cert file.") fs.StringVar(&cfg.ClientTLSInfo.KeyFile, "key-file", "", "Path to the client server TLS key file.") fs.BoolVar(&cfg.ClientTLSInfo.ClientCertAuth, "client-cert-auth", false, "Enable client cert authentication.") + fs.StringVar(&cfg.ClientTLSInfo.CRLFile, "client-crl-file", "", "Path to the client certificate revocation list file.") fs.StringVar(&cfg.ClientTLSInfo.TrustedCAFile, "trusted-ca-file", "", "Path to the client server TLS trusted CA key file.") fs.BoolVar(&cfg.ClientAutoTLS, "auto-tls", false, "Client TLS using generated certificates") fs.StringVar(&cfg.PeerTLSInfo.CAFile, "peer-ca-file", "", "DEPRECATED: Path to the peer server TLS CA file.") @@ -185,6 +191,7 @@ func newConfig() *config { fs.BoolVar(&cfg.PeerTLSInfo.ClientCertAuth, "peer-client-cert-auth", false, "Enable peer client cert authentication.") fs.StringVar(&cfg.PeerTLSInfo.TrustedCAFile, "peer-trusted-ca-file", "", "Path to the peer server TLS trusted CA file.") fs.BoolVar(&cfg.PeerAutoTLS, "peer-auto-tls", false, "Peer TLS using generated certificates") + fs.StringVar(&cfg.PeerTLSInfo.CRLFile, "peer-crl-file", "", "Path to the peer certificate revocation list file.") // logging fs.BoolVar(&cfg.Debug, "debug", false, "Enable debug-level logging for etcd.") @@ -197,7 +204,8 @@ func newConfig() *config { // version fs.BoolVar(&cfg.printVersion, "version", false, "Print the version and exit.") - fs.IntVar(&cfg.AutoCompactionRetention, "auto-compaction-retention", 0, "Auto compaction retention for mvcc key value store in hour. 0 means disable auto compaction.") + fs.IntVar(&cfg.AutoCompactionRetention, "auto-compaction-retention", 0, "Auto compaction retention for mvcc key value store. 0 means disable auto compaction.") + fs.StringVar(&cfg.AutoCompactionMode, "auto-compaction-mode", "periodic", "Interpret 'auto-compaction-retention' as hours when 'periodic', as revision numbers when 'revision'.") // pprof profiler via HTTP fs.BoolVar(&cfg.EnablePprof, "enable-pprof", false, "Enable runtime profiling data via HTTP server. Address is at client URL + \"/debug/pprof/\"") @@ -257,6 +265,15 @@ func (cfg *config) configFromCmdLine() error { cfg.APUrls = flags.URLsFromFlag(cfg.FlagSet, "initial-advertise-peer-urls") cfg.LCUrls = flags.URLsFromFlag(cfg.FlagSet, "listen-client-urls") cfg.ACUrls = flags.URLsFromFlag(cfg.FlagSet, "advertise-client-urls") + + if len(cfg.ListenMetricsUrlsJSON) > 0 { + u, err := types.NewURLs(strings.Split(cfg.ListenMetricsUrlsJSON, ",")) + if err != nil { + plog.Fatalf("unexpected error setting up listen-metrics-urls: %v", err) + } + cfg.ListenMetricsUrls = []url.URL(u) + } + cfg.ClusterState = cfg.clusterState.String() cfg.Fallback = cfg.fallback.String() cfg.Proxy = cfg.proxy.String() diff --git a/github.com/coreos/etcd/etcdmain/config_test.go b/github.com/coreos/etcd/etcdmain/config_test.go index 03c0bce201..c82afba475 100644 --- a/github.com/coreos/etcd/etcdmain/config_test.go +++ b/github.com/coreos/etcd/etcdmain/config_test.go @@ -149,7 +149,7 @@ func TestConfigFileClusteringFlags(t *testing.T) { Durl string `json:"discovery"` }{ { - // Use default name and generate a default inital-cluster + // Use default name and generate a default initial-cluster }, { Name: "non-default", diff --git a/github.com/coreos/etcd/etcdmain/etcd.go b/github.com/coreos/etcd/etcdmain/etcd.go index 2f7f00d61a..e5d5bd5b89 100644 --- a/github.com/coreos/etcd/etcdmain/etcd.go +++ b/github.com/coreos/etcd/etcdmain/etcd.go @@ -199,12 +199,24 @@ func startEtcd(cfg *embed.Config) (<-chan struct{}, <-chan error, error) { func startProxy(cfg *config) error { plog.Notice("proxy: this proxy supports v2 API only!") - pt, err := transport.NewTimeoutTransport(cfg.PeerTLSInfo, time.Duration(cfg.ProxyDialTimeoutMs)*time.Millisecond, time.Duration(cfg.ProxyReadTimeoutMs)*time.Millisecond, time.Duration(cfg.ProxyWriteTimeoutMs)*time.Millisecond) + clientTLSInfo := cfg.ClientTLSInfo + if clientTLSInfo.Empty() { + // Support old proxy behavior of defaulting to PeerTLSInfo + // for both client and peer connections. + clientTLSInfo = cfg.PeerTLSInfo + } + clientTLSInfo.InsecureSkipVerify = cfg.ClientAutoTLS + cfg.PeerTLSInfo.InsecureSkipVerify = cfg.PeerAutoTLS + + pt, err := transport.NewTimeoutTransport(clientTLSInfo, time.Duration(cfg.ProxyDialTimeoutMs)*time.Millisecond, time.Duration(cfg.ProxyReadTimeoutMs)*time.Millisecond, time.Duration(cfg.ProxyWriteTimeoutMs)*time.Millisecond) if err != nil { return err } pt.MaxIdleConnsPerHost = httpproxy.DefaultMaxIdleConnsPerHost + if err = cfg.PeerSelfCert(); err != nil { + plog.Fatalf("could not get certs (%v)", err) + } tr, err := transport.NewTimeoutTransport(cfg.PeerTLSInfo, time.Duration(cfg.ProxyDialTimeoutMs)*time.Millisecond, time.Duration(cfg.ProxyReadTimeoutMs)*time.Millisecond, time.Duration(cfg.ProxyWriteTimeoutMs)*time.Millisecond) if err != nil { return err @@ -302,9 +314,28 @@ func startProxy(cfg *config) error { if cfg.isReadonlyProxy() { ph = httpproxy.NewReadonlyHandler(ph) } + + // setup self signed certs when serving https + cHosts, cTLS := []string{}, false + for _, u := range cfg.LCUrls { + cHosts = append(cHosts, u.Host) + cTLS = cTLS || u.Scheme == "https" + } + for _, u := range cfg.ACUrls { + cHosts = append(cHosts, u.Host) + cTLS = cTLS || u.Scheme == "https" + } + listenerTLS := cfg.ClientTLSInfo + if cfg.ClientAutoTLS && cTLS { + listenerTLS, err = transport.SelfCert(filepath.Join(cfg.Dir, "clientCerts"), cHosts) + if err != nil { + plog.Fatalf("proxy: could not initialize self-signed client certs (%v)", err) + } + } + // Start a proxy server goroutine for each listen address for _, u := range cfg.LCUrls { - l, err := transport.NewListener(u.Host, u.Scheme, &cfg.ClientTLSInfo) + l, err := transport.NewListener(u.Host, u.Scheme, &listenerTLS) if err != nil { return err } @@ -313,7 +344,7 @@ func startProxy(cfg *config) error { go func() { plog.Info("proxy: listening for client requests on ", host) mux := http.NewServeMux() - mux.Handle("/metrics", prometheus.Handler()) + mux.Handle("/metrics", prometheus.Handler()) // v2 proxy just uses the same port mux.Handle("/", ph) plog.Fatal(http.Serve(l, mux)) }() diff --git a/github.com/coreos/etcd/etcdmain/gateway.go b/github.com/coreos/etcd/etcdmain/gateway.go index 1a72bddcf0..5487414ebd 100644 --- a/github.com/coreos/etcd/etcdmain/gateway.go +++ b/github.com/coreos/etcd/etcdmain/gateway.go @@ -91,17 +91,28 @@ func stripSchema(eps []string) []string { return endpoints } -func startGateway(cmd *cobra.Command, args []string) { - endpoints := gatewayEndpoints - if eps := discoverEndpoints(gatewayDNSCluster, gatewayCA, gatewayInsecureDiscovery); len(eps) != 0 { - endpoints = eps +func startGateway(cmd *cobra.Command, args []string) { + srvs := discoverEndpoints(gatewayDNSCluster, gatewayCA, gatewayInsecureDiscovery) + if len(srvs.Endpoints) == 0 { + // no endpoints discovered, fall back to provided endpoints + srvs.Endpoints = gatewayEndpoints } - // Strip the schema from the endpoints because we start just a TCP proxy - endpoints = stripSchema(endpoints) + srvs.Endpoints = stripSchema(srvs.Endpoints) + if len(srvs.SRVs) == 0 { + for _, ep := range srvs.Endpoints { + h, p, err := net.SplitHostPort(ep) + if err != nil { + plog.Fatalf("error parsing endpoint %q", ep) + } + var port uint16 + fmt.Sscanf(p, "%d", &port) + srvs.SRVs = append(srvs.SRVs, &net.SRV{Target: h, Port: port}) + } + } - if len(endpoints) == 0 { + if len(srvs.Endpoints) == 0 { plog.Fatalf("no endpoints found") } @@ -113,7 +124,7 @@ func startGateway(cmd *cobra.Command, args []string) { tp := tcpproxy.TCPProxy{ Listener: l, - Endpoints: endpoints, + Endpoints: srvs.SRVs, MonitorInterval: getewayRetryDelay, } diff --git a/github.com/coreos/etcd/etcdmain/grpc_proxy.go b/github.com/coreos/etcd/etcdmain/grpc_proxy.go index 1f701ba129..0fdf69ef51 100644 --- a/github.com/coreos/etcd/etcdmain/grpc_proxy.go +++ b/github.com/coreos/etcd/etcdmain/grpc_proxy.go @@ -15,15 +15,19 @@ package etcdmain import ( - "crypto/tls" "fmt" + "math" "net" "net/http" + "net/url" "os" + "path/filepath" "time" "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/clientv3/namespace" + "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" + "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/pkg/debugutil" "github.com/coreos/etcd/pkg/transport" @@ -38,12 +42,26 @@ import ( var ( grpcProxyListenAddr string + grpcProxyMetricsListenAddr string grpcProxyEndpoints []string grpcProxyDNSCluster string grpcProxyInsecureDiscovery bool - grpcProxyCert string - grpcProxyKey string - grpcProxyCA string + grpcProxyDataDir string + + // tls for connecting to etcd + + grpcProxyCA string + grpcProxyCert string + grpcProxyKey string + grpcProxyInsecureSkipTLSVerify bool + + // tls for clients connecting to proxy + + grpcProxyListenCA string + grpcProxyListenCert string + grpcProxyListenKey string + grpcProxyListenAutoTLS bool + grpcProxyListenCRL string grpcProxyAdvertiseClientURL string grpcProxyResolverPrefix string @@ -78,21 +96,80 @@ func newGRPCProxyStartCommand() *cobra.Command { cmd.Flags().StringVar(&grpcProxyListenAddr, "listen-addr", "127.0.0.1:23790", "listen address") cmd.Flags().StringVar(&grpcProxyDNSCluster, "discovery-srv", "", "DNS domain used to bootstrap initial cluster") + cmd.Flags().StringVar(&grpcProxyMetricsListenAddr, "metrics-addr", "", "listen for /metrics requests on an additional interface") cmd.Flags().BoolVar(&grpcProxyInsecureDiscovery, "insecure-discovery", false, "accept insecure SRV records") cmd.Flags().StringSliceVar(&grpcProxyEndpoints, "endpoints", []string{"127.0.0.1:2379"}, "comma separated etcd cluster endpoints") - cmd.Flags().StringVar(&grpcProxyCert, "cert", "", "identify secure connections with etcd servers using this TLS certificate file") - cmd.Flags().StringVar(&grpcProxyKey, "key", "", "identify secure connections with etcd servers using this TLS key file") - cmd.Flags().StringVar(&grpcProxyCA, "cacert", "", "verify certificates of TLS-enabled secure etcd servers using this CA bundle") cmd.Flags().StringVar(&grpcProxyAdvertiseClientURL, "advertise-client-url", "127.0.0.1:23790", "advertise address to register (must be reachable by client)") cmd.Flags().StringVar(&grpcProxyResolverPrefix, "resolver-prefix", "", "prefix to use for registering proxy (must be shared with other grpc-proxy members)") cmd.Flags().IntVar(&grpcProxyResolverTTL, "resolver-ttl", 0, "specify TTL, in seconds, when registering proxy endpoints") cmd.Flags().StringVar(&grpcProxyNamespace, "namespace", "", "string to prefix to all keys for namespacing requests") cmd.Flags().BoolVar(&grpcProxyEnablePprof, "enable-pprof", false, `Enable runtime profiling data via HTTP server. Address is at client URL + "/debug/pprof/"`) + cmd.Flags().StringVar(&grpcProxyDataDir, "data-dir", "default.proxy", "Data directory for persistent data") + + // client TLS for connecting to server + cmd.Flags().StringVar(&grpcProxyCert, "cert", "", "identify secure connections with etcd servers using this TLS certificate file") + cmd.Flags().StringVar(&grpcProxyKey, "key", "", "identify secure connections with etcd servers using this TLS key file") + cmd.Flags().StringVar(&grpcProxyCA, "cacert", "", "verify certificates of TLS-enabled secure etcd servers using this CA bundle") + cmd.Flags().BoolVar(&grpcProxyInsecureSkipTLSVerify, "insecure-skip-tls-verify", false, "skip authentication of etcd server TLS certificates") + + // client TLS for connecting to proxy + cmd.Flags().StringVar(&grpcProxyListenCert, "cert-file", "", "identify secure connections to the proxy using this TLS certificate file") + cmd.Flags().StringVar(&grpcProxyListenKey, "key-file", "", "identify secure connections to the proxy using this TLS key file") + cmd.Flags().StringVar(&grpcProxyListenCA, "trusted-ca-file", "", "verify certificates of TLS-enabled secure proxy using this CA bundle") + cmd.Flags().BoolVar(&grpcProxyListenAutoTLS, "auto-tls", false, "proxy TLS using generated certificates") + cmd.Flags().StringVar(&grpcProxyListenCRL, "client-crl-file", "", "proxy client certificate revocation list file.") return &cmd } func startGRPCProxy(cmd *cobra.Command, args []string) { + checkArgs() + + tlsinfo := newTLS(grpcProxyListenCA, grpcProxyListenCert, grpcProxyListenKey) + if tlsinfo == nil && grpcProxyListenAutoTLS { + host := []string{"https://" + grpcProxyListenAddr} + dir := filepath.Join(grpcProxyDataDir, "fixtures", "proxy") + autoTLS, err := transport.SelfCert(dir, host) + if err != nil { + plog.Fatal(err) + } + tlsinfo = &autoTLS + } + if tlsinfo != nil { + plog.Infof("ServerTLS: %s", tlsinfo) + } + m := mustListenCMux(tlsinfo) + + grpcl := m.Match(cmux.HTTP2()) + defer func() { + grpcl.Close() + plog.Infof("stopping listening for grpc-proxy client requests on %s", grpcProxyListenAddr) + }() + + client := mustNewClient() + + srvhttp, httpl := mustHTTPListener(m, tlsinfo) + errc := make(chan error) + go func() { errc <- newGRPCProxyServer(client).Serve(grpcl) }() + go func() { errc <- srvhttp.Serve(httpl) }() + go func() { errc <- m.Serve() }() + if len(grpcProxyMetricsListenAddr) > 0 { + mhttpl := mustMetricsListener(tlsinfo) + go func() { + mux := http.NewServeMux() + mux.Handle("/metrics", prometheus.Handler()) + plog.Fatal(http.Serve(mhttpl, mux)) + }() + } + + // grpc-proxy is initialized, ready to serve + notifySystemd() + + fmt.Fprintln(os.Stderr, <-errc) + os.Exit(1) +} + +func checkArgs() { if grpcProxyResolverPrefix != "" && grpcProxyResolverTTL < 1 { fmt.Fprintln(os.Stderr, fmt.Errorf("invalid resolver-ttl %d", grpcProxyResolverTTL)) os.Exit(1) @@ -105,39 +182,79 @@ func startGRPCProxy(cmd *cobra.Command, args []string) { fmt.Fprintln(os.Stderr, fmt.Errorf("invalid advertise-client-url %q", grpcProxyAdvertiseClientURL)) os.Exit(1) } +} - if eps := discoverEndpoints(grpcProxyDNSCluster, grpcProxyCA, grpcProxyInsecureDiscovery); len(eps) != 0 { - grpcProxyEndpoints = eps +func mustNewClient() *clientv3.Client { + srvs := discoverEndpoints(grpcProxyDNSCluster, grpcProxyCA, grpcProxyInsecureDiscovery) + eps := srvs.Endpoints + if len(eps) == 0 { + eps = grpcProxyEndpoints } - - l, err := net.Listen("tcp", grpcProxyListenAddr) + cfg, err := newClientCfg(eps) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } - if l, err = transport.NewKeepAliveListener(l, "tcp", nil); err != nil { + client, err := clientv3.New(*cfg) + if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } - plog.Infof("listening for grpc-proxy client requests on %s", grpcProxyListenAddr) - defer func() { - l.Close() - plog.Infof("stopping listening for grpc-proxy client requests on %s", grpcProxyListenAddr) - }() - m := cmux.New(l) + return client +} - cfg, err := newClientCfg() +func newClientCfg(eps []string) (*clientv3.Config, error) { + // set tls if any one tls option set + cfg := clientv3.Config{ + Endpoints: eps, + DialTimeout: 5 * time.Second, + } + tls := newTLS(grpcProxyCA, grpcProxyCert, grpcProxyKey) + if tls == nil && grpcProxyInsecureSkipTLSVerify { + tls = &transport.TLSInfo{} + } + if tls != nil { + clientTLS, err := tls.ClientConfig() + if err != nil { + return nil, err + } + clientTLS.InsecureSkipVerify = grpcProxyInsecureSkipTLSVerify + cfg.TLS = clientTLS + plog.Infof("ClientTLS: %s", tls) + } + return &cfg, nil +} + +func newTLS(ca, cert, key string) *transport.TLSInfo { + if ca == "" && cert == "" && key == "" { + return nil + } + return &transport.TLSInfo{CAFile: ca, CertFile: cert, KeyFile: key} +} + +func mustListenCMux(tlsinfo *transport.TLSInfo) cmux.CMux { + l, err := net.Listen("tcp", grpcProxyListenAddr) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } - client, err := clientv3.New(*cfg) - if err != nil { + if l, err = transport.NewKeepAliveListener(l, "tcp", nil); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } + if tlsinfo != nil { + tlsinfo.CRLFile = grpcProxyListenCRL + if l, err = transport.NewTLSListener(l, tlsinfo); err != nil { + plog.Fatal(err) + } + } + plog.Infof("listening for grpc-proxy client requests on %s", grpcProxyListenAddr) + return cmux.New(l) +} + +func newGRPCProxyServer(client *clientv3.Client) *grpc.Server { if len(grpcProxyNamespace) > 0 { client.KV = namespace.NewKV(client.KV, grpcProxyNamespace) client.Watcher = namespace.NewWatcher(client.Watcher, grpcProxyNamespace) @@ -153,23 +270,27 @@ func startGRPCProxy(cmd *cobra.Command, args []string) { leasep, _ := grpcproxy.NewLeaseProxy(client) mainp := grpcproxy.NewMaintenanceProxy(client) authp := grpcproxy.NewAuthProxy(client) + electionp := grpcproxy.NewElectionProxy(client) + lockp := grpcproxy.NewLockProxy(client) server := grpc.NewServer( grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor), grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor), + grpc.MaxConcurrentStreams(math.MaxUint32), ) + pb.RegisterKVServer(server, kvp) pb.RegisterWatchServer(server, watchp) pb.RegisterClusterServer(server, clusterp) pb.RegisterLeaseServer(server, leasep) pb.RegisterMaintenanceServer(server, mainp) pb.RegisterAuthServer(server, authp) + v3electionpb.RegisterElectionServer(server, electionp) + v3lockpb.RegisterLockServer(server, lockp) + return server +} - errc := make(chan error) - - grpcl := m.Match(cmux.HTTP2()) - go func() { errc <- server.Serve(grpcl) }() - +func mustHTTPListener(m cmux.CMux, tlsinfo *transport.TLSInfo) (*http.Server, net.Listener) { httpmux := http.NewServeMux() httpmux.HandleFunc("/", http.NotFound) httpmux.Handle("/metrics", prometheus.Handler()) @@ -179,61 +300,31 @@ func startGRPCProxy(cmd *cobra.Command, args []string) { } plog.Infof("pprof is enabled under %s", debugutil.HTTPPrefixPProf) } + srvhttp := &http.Server{Handler: httpmux} - srvhttp := &http.Server{ - Handler: httpmux, + if tlsinfo == nil { + return srvhttp, m.Match(cmux.HTTP1()) } - var httpl net.Listener - if cfg.TLS != nil { - srvhttp.TLSConfig = cfg.TLS - httpl = tls.NewListener(m.Match(cmux.Any()), cfg.TLS) - } else { - httpl = m.Match(cmux.HTTP1()) + srvTLS, err := tlsinfo.ServerConfig() + if err != nil { + plog.Fatalf("could not setup TLS (%v)", err) } - go func() { errc <- srvhttp.Serve(httpl) }() - - go func() { errc <- m.Serve() }() - - // grpc-proxy is initialized, ready to serve - notifySystemd() - - fmt.Fprintln(os.Stderr, <-errc) - os.Exit(1) + srvhttp.TLSConfig = srvTLS + return srvhttp, m.Match(cmux.Any()) } -func newClientCfg() (*clientv3.Config, error) { - // set tls if any one tls option set - var cfgtls *transport.TLSInfo - tlsinfo := transport.TLSInfo{} - if grpcProxyCert != "" { - tlsinfo.CertFile = grpcProxyCert - cfgtls = &tlsinfo - } - - if grpcProxyKey != "" { - tlsinfo.KeyFile = grpcProxyKey - cfgtls = &tlsinfo - } - - if grpcProxyCA != "" { - tlsinfo.CAFile = grpcProxyCA - cfgtls = &tlsinfo - } - - cfg := clientv3.Config{ - Endpoints: grpcProxyEndpoints, - DialTimeout: 5 * time.Second, +func mustMetricsListener(tlsinfo *transport.TLSInfo) net.Listener { + murl, err := url.Parse(grpcProxyMetricsListenAddr) + if err != nil { + fmt.Fprintf(os.Stderr, "cannot parse %q", grpcProxyMetricsListenAddr) + os.Exit(1) } - if cfgtls != nil { - clientTLS, err := cfgtls.ClientConfig() - if err != nil { - return nil, err - } - cfg.TLS = clientTLS + ml, err := transport.NewListener(murl.Host, murl.Scheme, tlsinfo) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) } - - // TODO: support insecure tls - - return &cfg, nil + plog.Info("grpc-proxy: listening for metrics on ", murl.String()) + return ml } diff --git a/github.com/coreos/etcd/etcdmain/help.go b/github.com/coreos/etcd/etcdmain/help.go index cd9282a319..ad4d30240e 100644 --- a/github.com/coreos/etcd/etcdmain/help.go +++ b/github.com/coreos/etcd/etcdmain/help.go @@ -66,6 +66,10 @@ member flags: comma-separated whitelist of origins for CORS (cross-origin resource sharing). --quota-backend-bytes '0' raise alarms when backend size exceeds the given quota (0 defaults to low space quota). + --max-txn-ops '128' + maximum number of operations permitted in a transaction. + --max-request-bytes '1572864' + maximum client request size in bytes the server will accept. clustering flags: @@ -93,7 +97,9 @@ clustering flags: --strict-reconfig-check reject reconfiguration requests that would cause quorum loss. --auto-compaction-retention '0' - auto compaction retention in hour. 0 means disable auto compaction. + auto compaction retention length. 0 means disable auto compaction. + --auto-compaction-mode 'periodic' + 'periodic' means hours, 'revision' means revision numbers to retain by auto compaction --enable-v2 Accept etcd V2 client requests. @@ -124,6 +130,8 @@ security flags: path to the client server TLS key file. --client-cert-auth 'false' enable client cert authentication. + --client-crl-file '' + path to the client certificate revocation list file. --trusted-ca-file '' path to the client server TLS trusted CA key file. --auto-tls 'false' @@ -140,6 +148,8 @@ security flags: path to the peer server TLS trusted CA file. --peer-auto-tls 'false' peer TLS using self-generated certificates if --peer-key-file and --peer-cert-file are not provided. + --peer-crl-file '' + path to the peer certificate revocation list file. logging flags @@ -162,7 +172,9 @@ profiling flags: --enable-pprof 'false' Enable runtime profiling data via HTTP server. Address is at client URL + "/debug/pprof/" --metrics 'basic' - Set level of detail for exported metrics, specify 'extensive' to include histogram metrics. + Set level of detail for exported metrics, specify 'extensive' to include histogram metrics. + --listen-metrics-urls '' + List of URLs to listen on for metrics. auth flags: --auth-token 'simple' diff --git a/github.com/coreos/etcd/etcdmain/main.go b/github.com/coreos/etcd/etcdmain/main.go index fd4e7f6965..06bbae56b8 100644 --- a/github.com/coreos/etcd/etcdmain/main.go +++ b/github.com/coreos/etcd/etcdmain/main.go @@ -17,6 +17,7 @@ package etcdmain import ( "fmt" "os" + "strings" "github.com/coreos/go-systemd/daemon" systemdutil "github.com/coreos/go-systemd/util" @@ -26,7 +27,13 @@ func Main() { checkSupportArch() if len(os.Args) > 1 { - switch os.Args[1] { + cmd := os.Args[1] + if covArgs := os.Getenv("ETCDCOV_ARGS"); len(covArgs) > 0 { + args := strings.Split(os.Getenv("ETCDCOV_ARGS"), "\xe7\xcd")[1:] + rootCmd.SetArgs(args) + cmd = "grpc-proxy" + } + switch cmd { case "gateway", "grpc-proxy": if err := rootCmd.Execute(); err != nil { fmt.Fprint(os.Stderr, err) diff --git a/github.com/coreos/etcd/etcdmain/util.go b/github.com/coreos/etcd/etcdmain/util.go index 23e19b4405..9657271d53 100644 --- a/github.com/coreos/etcd/etcdmain/util.go +++ b/github.com/coreos/etcd/etcdmain/util.go @@ -18,22 +18,23 @@ import ( "fmt" "os" - "github.com/coreos/etcd/client" + "github.com/coreos/etcd/pkg/srv" "github.com/coreos/etcd/pkg/transport" ) -func discoverEndpoints(dns string, ca string, insecure bool) (endpoints []string) { +func discoverEndpoints(dns string, ca string, insecure bool) (s srv.SRVClients) { if dns == "" { - return nil + return s } - endpoints, err := client.NewSRVDiscover().Discover(dns) + srvs, err := srv.GetClient("etcd-client", dns) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } + endpoints := srvs.Endpoints plog.Infof("discovered the cluster %s from %s", endpoints, dns) if insecure { - return endpoints + return *srvs } // confirm TLS connections are good tlsInfo := transport.TLSInfo{ @@ -46,5 +47,19 @@ func discoverEndpoints(dns string, ca string, insecure bool) (endpoints []string plog.Warningf("%v", err) } plog.Infof("using discovered endpoints %v", endpoints) - return endpoints + + // map endpoints back to SRVClients struct with SRV data + eps := make(map[string]struct{}) + for _, ep := range endpoints { + eps[ep] = struct{}{} + } + for i := range srvs.Endpoints { + if _, ok := eps[srvs.Endpoints[i]]; !ok { + continue + } + s.Endpoints = append(s.Endpoints, srvs.Endpoints[i]) + s.SRVs = append(s.SRVs, srvs.SRVs[i]) + } + + return s } diff --git a/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go b/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go new file mode 100644 index 0000000000..98891da242 --- /dev/null +++ b/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go @@ -0,0 +1,190 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdhttp + +import ( + "encoding/json" + "expvar" + "fmt" + "net/http" + "strings" + "time" + + etcdErr "github.com/coreos/etcd/error" + "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api" + "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" + "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/pkg/logutil" + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/version" + "github.com/coreos/pkg/capnslog" + "github.com/prometheus/client_golang/prometheus" + "golang.org/x/net/context" +) + +var ( + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/etcdhttp") + mlog = logutil.NewMergeLogger(plog) +) + +const ( + configPath = "/config" + metricsPath = "/metrics" + healthPath = "/health" + varsPath = "/debug/vars" + versionPath = "/version" +) + +// HandleBasic adds handlers to a mux for serving JSON etcd client requests +// that do not access the v2 store. +func HandleBasic(mux *http.ServeMux, server *etcdserver.EtcdServer) { + mux.HandleFunc(varsPath, serveVars) + mux.HandleFunc(configPath+"/local/log", logHandleFunc) + mux.Handle(metricsPath, prometheus.Handler()) + mux.Handle(healthPath, healthHandler(server)) + mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion)) +} + +func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r, "GET") { + return + } + if uint64(server.Leader()) == raft.None { + http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable) + return + } + if len(server.Alarms()) > 0 { + w.Write([]byte(`{"health": "false"}`)) + return + } + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + if _, err := server.Do(ctx, etcdserverpb.Request{Method: "QGET"}); err != nil { + http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable) + return + } + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"health": "true"}`)) + } +} + +func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + v := c.Version() + if v != nil { + fn(w, r, v.String()) + } else { + fn(w, r, "not_decided") + } + } +} + +func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) { + if !allowMethod(w, r, "GET") { + return + } + vs := version.Versions{ + Server: version.Version, + Cluster: clusterV, + } + + w.Header().Set("Content-Type", "application/json") + b, err := json.Marshal(&vs) + if err != nil { + plog.Panicf("cannot marshal versions to json (%v)", err) + } + w.Write(b) +} + +func logHandleFunc(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r, "PUT") { + return + } + + in := struct{ Level string }{} + + d := json.NewDecoder(r.Body) + if err := d.Decode(&in); err != nil { + WriteError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body")) + return + } + + logl, err := capnslog.ParseLevel(strings.ToUpper(in.Level)) + if err != nil { + WriteError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level)) + return + } + + plog.Noticef("globalLogLevel set to %q", logl.String()) + capnslog.SetGlobalLogLevel(logl) + w.WriteHeader(http.StatusNoContent) +} + +func serveVars(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r, "GET") { + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "{\n") + first := true + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} + +func allowMethod(w http.ResponseWriter, r *http.Request, m string) bool { + if m == r.Method { + return true + } + w.Header().Set("Allow", m) + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + return false +} + +// WriteError logs and writes the given Error to the ResponseWriter +// If Error is an etcdErr, it is rendered to the ResponseWriter +// Otherwise, it is assumed to be a StatusInternalServerError +func WriteError(w http.ResponseWriter, r *http.Request, err error) { + if err == nil { + return + } + switch e := err.(type) { + case *etcdErr.Error: + e.WriteTo(w) + case *httptypes.HTTPError: + if et := e.WriteTo(w); et != nil { + plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) + } + default: + switch err { + case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy: + mlog.MergeError(err) + default: + mlog.MergeErrorf("got unexpected response error (%v)", err) + } + herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error") + if et := herr.WriteTo(w); et != nil { + plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) + } + } +} diff --git a/github.com/coreos/etcd/etcdserver/api/v2http/peer.go b/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go similarity index 97% rename from github.com/coreos/etcd/etcdserver/api/v2http/peer.go rename to github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go index a1abadba8e..721bae3c60 100644 --- a/github.com/coreos/etcd/etcdserver/api/v2http/peer.go +++ b/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package v2http +package etcdhttp import ( "encoding/json" @@ -61,7 +61,7 @@ type peerMembersHandler struct { } func (h *peerMembersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { + if !allowMethod(w, r, "GET") { return } w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) diff --git a/github.com/coreos/etcd/etcdserver/api/v2http/peer_test.go b/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer_test.go similarity index 82% rename from github.com/coreos/etcd/etcdserver/api/v2http/peer_test.go rename to github.com/coreos/etcd/etcdserver/api/etcdhttp/peer_test.go index db4dd5968f..c2b14f1954 100644 --- a/github.com/coreos/etcd/etcdserver/api/v2http/peer_test.go +++ b/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package v2http +package etcdhttp import ( "encoding/json" @@ -20,13 +20,36 @@ import ( "net/http" "net/http/httptest" "path" + "sort" "testing" "github.com/coreos/etcd/etcdserver/membership" "github.com/coreos/etcd/pkg/testutil" + "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/rafthttp" + "github.com/coreos/go-semver/semver" ) +type fakeCluster struct { + id uint64 + clientURLs []string + members map[uint64]*membership.Member +} + +func (c *fakeCluster) ID() types.ID { return types.ID(c.id) } +func (c *fakeCluster) ClientURLs() []string { return c.clientURLs } +func (c *fakeCluster) Members() []*membership.Member { + var ms membership.MembersByID + for _, m := range c.members { + ms = append(ms, m) + } + sort.Sort(ms) + return []*membership.Member(ms) +} +func (c *fakeCluster) Member(id types.ID) *membership.Member { return c.members[uint64(id)] } +func (c *fakeCluster) IsIDRemoved(id types.ID) bool { return false } +func (c *fakeCluster) Version() *semver.Version { return nil } + // TestNewPeerHandlerOnRaftPrefix tests that NewPeerHandler returns a handler that // handles raft-prefix requests well. func TestNewPeerHandlerOnRaftPrefix(t *testing.T) { diff --git a/github.com/coreos/etcd/etcdserver/api/etcdhttp/version_test.go b/github.com/coreos/etcd/etcdserver/api/etcdhttp/version_test.go new file mode 100644 index 0000000000..fea1659508 --- /dev/null +++ b/github.com/coreos/etcd/etcdserver/api/etcdhttp/version_test.go @@ -0,0 +1,66 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdhttp + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/coreos/etcd/version" +) + +func TestServeVersion(t *testing.T) { + req, err := http.NewRequest("GET", "", nil) + if err != nil { + t.Fatalf("error creating request: %v", err) + } + rw := httptest.NewRecorder() + serveVersion(rw, req, "2.1.0") + if rw.Code != http.StatusOK { + t.Errorf("code=%d, want %d", rw.Code, http.StatusOK) + } + vs := version.Versions{ + Server: version.Version, + Cluster: "2.1.0", + } + w, err := json.Marshal(&vs) + if err != nil { + t.Fatal(err) + } + if g := rw.Body.String(); g != string(w) { + t.Fatalf("body = %q, want %q", g, string(w)) + } + if ct := rw.HeaderMap.Get("Content-Type"); ct != "application/json" { + t.Errorf("contet-type header = %s, want %s", ct, "application/json") + } +} + +func TestServeVersionFails(t *testing.T) { + for _, m := range []string{ + "CONNECT", "TRACE", "PUT", "POST", "HEAD", + } { + req, err := http.NewRequest(m, "", nil) + if err != nil { + t.Fatalf("error creating request: %v", err) + } + rw := httptest.NewRecorder() + serveVersion(rw, req, "2.1.0") + if rw.Code != http.StatusMethodNotAllowed { + t.Errorf("method %s: code=%d, want %d", m, rw.Code, http.StatusMethodNotAllowed) + } + } +} diff --git a/github.com/coreos/etcd/etcdserver/api/v2http/client.go b/github.com/coreos/etcd/etcdserver/api/v2http/client.go index 4f900e4f62..aa1e71ec32 100644 --- a/github.com/coreos/etcd/etcdserver/api/v2http/client.go +++ b/github.com/coreos/etcd/etcdserver/api/v2http/client.go @@ -17,7 +17,6 @@ package v2http import ( "encoding/json" "errors" - "expvar" "fmt" "io/ioutil" "net/http" @@ -30,38 +29,36 @@ import ( etcdErr "github.com/coreos/etcd/error" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/api" + "github.com/coreos/etcd/etcdserver/api/etcdhttp" "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" "github.com/coreos/etcd/etcdserver/auth" "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/etcdserver/membership" "github.com/coreos/etcd/etcdserver/stats" "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft" "github.com/coreos/etcd/store" - "github.com/coreos/etcd/version" - "github.com/coreos/pkg/capnslog" "github.com/jonboulle/clockwork" - "github.com/prometheus/client_golang/prometheus" "golang.org/x/net/context" ) const ( - authPrefix = "/v2/auth" - keysPrefix = "/v2/keys" - deprecatedMachinesPrefix = "/v2/machines" - membersPrefix = "/v2/members" - statsPrefix = "/v2/stats" - varsPath = "/debug/vars" - metricsPath = "/metrics" - healthPath = "/health" - versionPath = "/version" - configPath = "/config" + authPrefix = "/v2/auth" + keysPrefix = "/v2/keys" + machinesPrefix = "/v2/machines" + membersPrefix = "/v2/members" + statsPrefix = "/v2/stats" ) // NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests. func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http.Handler { - sec := auth.NewStore(server, timeout) + mux := http.NewServeMux() + etcdhttp.HandleBasic(mux, server) + handleV2(mux, server, timeout) + return requestLogger(mux) +} +func handleV2(mux *http.ServeMux, server *etcdserver.EtcdServer, timeout time.Duration) { + sec := auth.NewStore(server, timeout) kh := &keysHandler{ sec: sec, server: server, @@ -84,34 +81,23 @@ func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled, } - dmh := &deprecatedMachinesHandler{ - cluster: server.Cluster(), - } + mah := &machinesHandler{cluster: server.Cluster()} sech := &authHandler{ sec: sec, cluster: server.Cluster(), clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled, } - - mux := http.NewServeMux() mux.HandleFunc("/", http.NotFound) - mux.Handle(healthPath, healthHandler(server)) - mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion)) mux.Handle(keysPrefix, kh) mux.Handle(keysPrefix+"/", kh) mux.HandleFunc(statsPrefix+"/store", sh.serveStore) mux.HandleFunc(statsPrefix+"/self", sh.serveSelf) mux.HandleFunc(statsPrefix+"/leader", sh.serveLeader) - mux.HandleFunc(varsPath, serveVars) - mux.HandleFunc(configPath+"/local/log", logHandleFunc) - mux.Handle(metricsPath, prometheus.Handler()) mux.Handle(membersPrefix, mh) mux.Handle(membersPrefix+"/", mh) - mux.Handle(deprecatedMachinesPrefix, dmh) + mux.Handle(machinesPrefix, mah) handleAuth(mux, sech) - - return requestLogger(mux) } type keysHandler struct { @@ -170,11 +156,11 @@ func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } -type deprecatedMachinesHandler struct { +type machinesHandler struct { cluster api.Cluster } -func (h *deprecatedMachinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (h *machinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !allowMethod(w, r.Method, "GET", "HEAD") { return } @@ -321,103 +307,13 @@ func (h *statsHandler) serveLeader(w http.ResponseWriter, r *http.Request) { } stats := h.stats.LeaderStats() if stats == nil { - writeError(w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader")) + etcdhttp.WriteError(w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader")) return } w.Header().Set("Content-Type", "application/json") w.Write(stats) } -func serveVars(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - - w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprintf(w, "{\n") - first := true - expvar.Do(func(kv expvar.KeyValue) { - if !first { - fmt.Fprintf(w, ",\n") - } - first = false - fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) - }) - fmt.Fprintf(w, "\n}\n") -} - -func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - if uint64(server.Leader()) == raft.None { - http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable) - return - } - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - if _, err := server.Do(ctx, etcdserverpb.Request{Method: "QGET"}); err != nil { - http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable) - return - } - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"health": "true"}`)) - } -} - -func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - v := c.Version() - if v != nil { - fn(w, r, v.String()) - } else { - fn(w, r, "not_decided") - } - } -} - -func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) { - if !allowMethod(w, r.Method, "GET") { - return - } - vs := version.Versions{ - Server: version.Version, - Cluster: clusterV, - } - - w.Header().Set("Content-Type", "application/json") - b, err := json.Marshal(&vs) - if err != nil { - plog.Panicf("cannot marshal versions to json (%v)", err) - } - w.Write(b) -} - -func logHandleFunc(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "PUT") { - return - } - - in := struct{ Level string }{} - - d := json.NewDecoder(r.Body) - if err := d.Decode(&in); err != nil { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body")) - return - } - - logl, err := capnslog.ParseLevel(strings.ToUpper(in.Level)) - if err != nil { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level)) - return - } - - plog.Noticef("globalLogLevel set to %q", logl.String()) - capnslog.SetGlobalLogLevel(logl) - w.WriteHeader(http.StatusNoContent) -} - // parseKeyRequest converts a received http.Request on keysPrefix to // a server Request, performing validation of supplied fields as appropriate. // If any validation fails, an empty Request and non-nil error is returned. diff --git a/github.com/coreos/etcd/etcdserver/api/v2http/client_test.go b/github.com/coreos/etcd/etcdserver/api/v2http/client_test.go index e94a7bd844..896021b22f 100644 --- a/github.com/coreos/etcd/etcdserver/api/v2http/client_test.go +++ b/github.com/coreos/etcd/etcdserver/api/v2http/client_test.go @@ -37,7 +37,6 @@ import ( "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/raft/raftpb" "github.com/coreos/etcd/store" - "github.com/coreos/etcd/version" "github.com/coreos/go-semver/semver" "github.com/jonboulle/clockwork" "golang.org/x/net/context" @@ -1220,7 +1219,7 @@ func TestWriteEvent(t *testing.T) { } } -func TestV2DeprecatedMachinesEndpoint(t *testing.T) { +func TestV2DMachinesEndpoint(t *testing.T) { tests := []struct { method string wcode int @@ -1230,12 +1229,12 @@ func TestV2DeprecatedMachinesEndpoint(t *testing.T) { {"POST", http.StatusMethodNotAllowed}, } - m := &deprecatedMachinesHandler{cluster: &fakeCluster{}} + m := &machinesHandler{cluster: &fakeCluster{}} s := httptest.NewServer(m) defer s.Close() for _, tt := range tests { - req, err := http.NewRequest(tt.method, s.URL+deprecatedMachinesPrefix, nil) + req, err := http.NewRequest(tt.method, s.URL+machinesPrefix, nil) if err != nil { t.Fatal(err) } @@ -1259,7 +1258,7 @@ func TestServeMachines(t *testing.T) { if err != nil { t.Fatal(err) } - h := &deprecatedMachinesHandler{cluster: cluster} + h := &machinesHandler{cluster: cluster} h.ServeHTTP(writer, req) w := "http://localhost:8080, http://localhost:8081, http://localhost:8082" if g := writer.Body.String(); g != w { @@ -1409,48 +1408,6 @@ func TestServeStoreStats(t *testing.T) { } -func TestServeVersion(t *testing.T) { - req, err := http.NewRequest("GET", "", nil) - if err != nil { - t.Fatalf("error creating request: %v", err) - } - rw := httptest.NewRecorder() - serveVersion(rw, req, "2.1.0") - if rw.Code != http.StatusOK { - t.Errorf("code=%d, want %d", rw.Code, http.StatusOK) - } - vs := version.Versions{ - Server: version.Version, - Cluster: "2.1.0", - } - w, err := json.Marshal(&vs) - if err != nil { - t.Fatal(err) - } - if g := rw.Body.String(); g != string(w) { - t.Fatalf("body = %q, want %q", g, string(w)) - } - if ct := rw.HeaderMap.Get("Content-Type"); ct != "application/json" { - t.Errorf("contet-type header = %s, want %s", ct, "application/json") - } -} - -func TestServeVersionFails(t *testing.T) { - for _, m := range []string{ - "CONNECT", "TRACE", "PUT", "POST", "HEAD", - } { - req, err := http.NewRequest(m, "", nil) - if err != nil { - t.Fatalf("error creating request: %v", err) - } - rw := httptest.NewRecorder() - serveVersion(rw, req, "2.1.0") - if rw.Code != http.StatusMethodNotAllowed { - t.Errorf("method %s: code=%d, want %d", m, rw.Code, http.StatusMethodNotAllowed) - } - } -} - func TestBadServeKeys(t *testing.T) { testBadCases := []struct { req *http.Request diff --git a/github.com/coreos/etcd/etcdserver/api/v2http/http.go b/github.com/coreos/etcd/etcdserver/api/v2http/http.go index 62c99e19d4..589c172dbb 100644 --- a/github.com/coreos/etcd/etcdserver/api/v2http/http.go +++ b/github.com/coreos/etcd/etcdserver/api/v2http/http.go @@ -20,12 +20,11 @@ import ( "strings" "time" - etcdErr "github.com/coreos/etcd/error" - "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/etcdhttp" "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" - "github.com/coreos/etcd/etcdserver/auth" "github.com/coreos/etcd/pkg/logutil" + "github.com/coreos/pkg/capnslog" ) @@ -39,37 +38,18 @@ var ( mlog = logutil.NewMergeLogger(plog) ) -// writeError logs and writes the given Error to the ResponseWriter -// If Error is an etcdErr, it is rendered to the ResponseWriter -// Otherwise, it is assumed to be a StatusInternalServerError func writeError(w http.ResponseWriter, r *http.Request, err error) { if err == nil { return } - switch e := err.(type) { - case *etcdErr.Error: - e.WriteTo(w) - case *httptypes.HTTPError: - if et := e.WriteTo(w); et != nil { - plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) - } - case auth.Error: + if e, ok := err.(auth.Error); ok { herr := httptypes.NewHTTPError(e.HTTPStatus(), e.Error()) if et := herr.WriteTo(w); et != nil { plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) } - default: - switch err { - case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy: - mlog.MergeError(err) - default: - mlog.MergeErrorf("got unexpected response error (%v)", err) - } - herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error") - if et := herr.WriteTo(w); et != nil { - plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) - } + return } + etcdhttp.WriteError(w, r, err) } // allowMethod verifies that the given method is one of the allowed methods, diff --git a/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go b/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go index f2799e56ea..cc4147d2f0 100644 --- a/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go +++ b/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go @@ -15,13 +15,14 @@ package v3client import ( - "context" "time" "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/api/v3rpc" "github.com/coreos/etcd/proxy/grpcproxy/adapter" + + "golang.org/x/net/context" ) // New creates a clientv3 client that wraps an in-process EtcdServer. Instead @@ -37,7 +38,7 @@ func New(s *etcdserver.EtcdServer) *clientv3.Client { c.Lease = clientv3.NewLeaseFromLeaseClient(lc, time.Second) wc := adapter.WatchServerToWatchClient(v3rpc.NewWatchServer(s)) - c.Watcher = clientv3.NewWatchFromWatchClient(wc) + c.Watcher = &watchWrapper{clientv3.NewWatchFromWatchClient(wc)} mc := adapter.MaintenanceServerToMaintenanceClient(v3rpc.NewMaintenanceServer(s)) c.Maintenance = clientv3.NewMaintenanceFromMaintenanceClient(mc) @@ -49,3 +50,18 @@ func New(s *etcdserver.EtcdServer) *clientv3.Client { return c } + +// BlankContext implements Stringer on a context so the ctx string doesn't +// depend on the context's WithValue data, which tends to be unsynchronized +// (e.g., x/net/trace), causing ctx.String() to throw data races. +type blankContext struct{ context.Context } + +func (*blankContext) String() string { return "(blankCtx)" } + +// watchWrapper wraps clientv3 watch calls to blank out the context +// to avoid races on trace data. +type watchWrapper struct{ clientv3.Watcher } + +func (ww *watchWrapper) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan { + return ww.Watcher.Watch(&blankContext{ctx}, key, opts...) +} diff --git a/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.gw.go b/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go similarity index 89% rename from github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.gw.go rename to github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go index 8f0bb9d696..eb4b68c0a4 100644 --- a/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.gw.go +++ b/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go @@ -7,9 +7,10 @@ Package v3electionpb is a reverse proxy. It translates gRPC into RESTful JSON APIs. */ -package v3electionpb +package gw import ( + "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" "io" "net/http" @@ -27,8 +28,8 @@ var _ io.Reader var _ = runtime.String var _ = utilities.NewDoubleArray -func request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshaler, client ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq CampaignRequest +func request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v3electionpb.CampaignRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -40,8 +41,8 @@ func request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshale } -func request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshaler, client ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ProclaimRequest +func request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v3electionpb.ProclaimRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -53,8 +54,8 @@ func request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshale } -func request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, client ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq LeaderRequest +func request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v3electionpb.LeaderRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -66,8 +67,8 @@ func request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, } -func request_Election_Observe_0(ctx context.Context, marshaler runtime.Marshaler, client ElectionClient, req *http.Request, pathParams map[string]string) (Election_ObserveClient, runtime.ServerMetadata, error) { - var protoReq LeaderRequest +func request_Election_Observe_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (v3electionpb.Election_ObserveClient, runtime.ServerMetadata, error) { + var protoReq v3electionpb.LeaderRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -87,8 +88,8 @@ func request_Election_Observe_0(ctx context.Context, marshaler runtime.Marshaler } -func request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler, client ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ResignRequest +func request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v3electionpb.ResignRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -128,7 +129,7 @@ func RegisterElectionHandlerFromEndpoint(ctx context.Context, mux *runtime.Serve // RegisterElectionHandler registers the http handlers for service Election to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterElectionHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewElectionClient(conn) + client := v3electionpb.NewElectionClient(conn) mux.Handle("POST", pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) diff --git a/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go b/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go index 2600756e30..6ce0fd4709 100644 --- a/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go +++ b/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go @@ -67,6 +67,27 @@ func (m *CampaignRequest) String() string { return proto.CompactTextS func (*CampaignRequest) ProtoMessage() {} func (*CampaignRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{0} } +func (m *CampaignRequest) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *CampaignRequest) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +func (m *CampaignRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + type CampaignResponse struct { Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // leader describes the resources used for holding leadereship of the election. @@ -111,6 +132,34 @@ func (m *LeaderKey) String() string { return proto.CompactTextString( func (*LeaderKey) ProtoMessage() {} func (*LeaderKey) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{2} } +func (m *LeaderKey) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *LeaderKey) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *LeaderKey) GetRev() int64 { + if m != nil { + return m.Rev + } + return 0 +} + +func (m *LeaderKey) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + type LeaderRequest struct { // name is the election identifier for the leadership information. Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -121,6 +170,13 @@ func (m *LeaderRequest) String() string { return proto.CompactTextStr func (*LeaderRequest) ProtoMessage() {} func (*LeaderRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{3} } +func (m *LeaderRequest) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + type LeaderResponse struct { Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // kv is the key-value pair representing the latest leader update. @@ -198,6 +254,13 @@ func (m *ProclaimRequest) GetLeader() *LeaderKey { return nil } +func (m *ProclaimRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + type ProclaimResponse struct { Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } diff --git a/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.gw.go b/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go similarity index 90% rename from github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.gw.go rename to github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go index ca506902b2..f31cc40140 100644 --- a/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.gw.go +++ b/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go @@ -7,9 +7,10 @@ Package v3lockpb is a reverse proxy. It translates gRPC into RESTful JSON APIs. */ -package v3lockpb +package gw import ( + "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" "io" "net/http" @@ -27,8 +28,8 @@ var _ io.Reader var _ = runtime.String var _ = utilities.NewDoubleArray -func request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, client LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq LockRequest +func request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v3lockpb.LockRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -40,8 +41,8 @@ func request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, clien } -func request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, client LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq UnlockRequest +func request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v3lockpb.UnlockRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -81,7 +82,7 @@ func RegisterLockHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, // RegisterLockHandler registers the http handlers for service Lock to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterLockHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewLockClient(conn) + client := v3lockpb.NewLockClient(conn) mux.Handle("POST", pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) diff --git a/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go b/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go index 44bde286b2..20bd357525 100644 --- a/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go +++ b/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go @@ -59,6 +59,20 @@ func (m *LockRequest) String() string { return proto.CompactTextStrin func (*LockRequest) ProtoMessage() {} func (*LockRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{0} } +func (m *LockRequest) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *LockRequest) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + type LockResponse struct { Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // key is a key that will exist on etcd for the duration that the Lock caller @@ -79,6 +93,13 @@ func (m *LockResponse) GetHeader() *etcdserverpb.ResponseHeader { return nil } +func (m *LockResponse) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + type UnlockRequest struct { // key is the lock ownership key granted by Lock. Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` @@ -89,6 +110,13 @@ func (m *UnlockRequest) String() string { return proto.CompactTextStr func (*UnlockRequest) ProtoMessage() {} func (*UnlockRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{2} } +func (m *UnlockRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + type UnlockResponse struct { Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } diff --git a/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go b/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go index 88174e3bac..19943ff52d 100644 --- a/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go +++ b/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go @@ -16,12 +16,21 @@ package v3rpc import ( "crypto/tls" + "math" "github.com/coreos/etcd/etcdserver" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/health" + healthpb "google.golang.org/grpc/health/grpc_health_v1" +) + +const ( + grpcOverheadBytes = 512 * 1024 + maxStreams = math.MaxUint32 ) func init() { @@ -36,8 +45,10 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server { } opts = append(opts, grpc.UnaryInterceptor(newUnaryInterceptor(s))) opts = append(opts, grpc.StreamInterceptor(newStreamInterceptor(s))) - + opts = append(opts, grpc.MaxMsgSize(int(s.Cfg.MaxRequestBytes+grpcOverheadBytes))) + opts = append(opts, grpc.MaxConcurrentStreams(maxStreams)) grpcServer := grpc.NewServer(opts...) + pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s)) pb.RegisterWatchServer(grpcServer, NewWatchServer(s)) pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s)) @@ -45,5 +56,12 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server { pb.RegisterAuthServer(grpcServer, NewAuthServer(s)) pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s)) + // server should register all the services manually + // use empty service name for all etcd services' health status, + // see https://github.com/grpc/grpc/blob/master/doc/health-checking.md for more + hsrv := health.NewServer() + hsrv.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) + healthpb.RegisterHealthServer(grpcServer, hsrv) + return grpcServer } diff --git a/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go b/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go index d6d7f35d50..75da52fb8d 100644 --- a/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go +++ b/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go @@ -37,6 +37,9 @@ func newHeader(s *etcdserver.EtcdServer) header { // fill populates pb.ResponseHeader using etcdserver information func (h *header) fill(rh *pb.ResponseHeader) { + if rh == nil { + plog.Panic("unexpected nil resp.Header") + } rh.ClusterId = uint64(h.clusterID) rh.MemberId = uint64(h.memberID) rh.RaftTerm = h.raftTimer.Term() diff --git a/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go b/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go index 29aef2914a..de9470a890 100644 --- a/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go +++ b/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go @@ -45,7 +45,7 @@ func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor { return nil, rpctypes.ErrGRPCNotCapable } - md, ok := metadata.FromContext(ctx) + md, ok := metadata.FromIncomingContext(ctx) if ok { if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { if s.Leader() == types.ID(raft.None) { @@ -66,7 +66,7 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor return rpctypes.ErrGRPCNotCapable } - md, ok := metadata.FromContext(ss.Context()) + md, ok := metadata.FromIncomingContext(ss.Context()) if ok { if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { if s.Leader() == types.ID(raft.None) { diff --git a/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go b/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go index 8acae5983c..973346592d 100644 --- a/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go +++ b/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go @@ -16,30 +16,30 @@ package v3rpc import ( - "sort" - "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/pkg/adt" "github.com/coreos/pkg/capnslog" "golang.org/x/net/context" ) var ( plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v3rpc") - - // Max operations per txn list. For example, Txn.Success can have at most 128 operations, - // and Txn.Failure can have at most 128 operations. - MaxOpsPerTxn = 128 ) type kvServer struct { hdr header kv etcdserver.RaftKV + // maxTxnOps is the max operations per txn. + // e.g suppose maxTxnOps = 128. + // Txn.Success can have at most 128 operations, + // and Txn.Failure can have at most 128 operations. + maxTxnOps uint } func NewKVServer(s *etcdserver.EtcdServer) pb.KVServer { - return &kvServer{hdr: newHeader(s), kv: s} + return &kvServer{hdr: newHeader(s), kv: s, maxTxnOps: s.Cfg.MaxTxnOps} } func (s *kvServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { @@ -52,9 +52,6 @@ func (s *kvServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResp return nil, togRPCError(err) } - if resp.Header == nil { - plog.Panic("unexpected nil resp.Header") - } s.hdr.fill(resp.Header) return resp, nil } @@ -69,9 +66,6 @@ func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, return nil, togRPCError(err) } - if resp.Header == nil { - plog.Panic("unexpected nil resp.Header") - } s.hdr.fill(resp.Header) return resp, nil } @@ -86,15 +80,19 @@ func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (* return nil, togRPCError(err) } - if resp.Header == nil { - plog.Panic("unexpected nil resp.Header") - } s.hdr.fill(resp.Header) return resp, nil } func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - if err := checkTxnRequest(r); err != nil { + if err := checkTxnRequest(r, int(s.maxTxnOps)); err != nil { + return nil, err + } + // check for forbidden put/del overlaps after checking request to avoid quadratic blowup + if _, _, err := checkIntervals(r.Success); err != nil { + return nil, err + } + if _, _, err := checkIntervals(r.Failure); err != nil { return nil, err } @@ -103,9 +101,6 @@ func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, return nil, togRPCError(err) } - if resp.Header == nil { - plog.Panic("unexpected nil resp.Header") - } s.hdr.fill(resp.Header) return resp, nil } @@ -116,9 +111,6 @@ func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.Co return nil, togRPCError(err) } - if resp.Header == nil { - plog.Panic("unexpected nil resp.Header") - } s.hdr.fill(resp.Header) return resp, nil } @@ -150,8 +142,15 @@ func checkDeleteRequest(r *pb.DeleteRangeRequest) error { return nil } -func checkTxnRequest(r *pb.TxnRequest) error { - if len(r.Compare) > MaxOpsPerTxn || len(r.Success) > MaxOpsPerTxn || len(r.Failure) > MaxOpsPerTxn { +func checkTxnRequest(r *pb.TxnRequest, maxTxnOps int) error { + opc := len(r.Compare) + if opc < len(r.Success) { + opc = len(r.Success) + } + if opc < len(r.Failure) { + opc = len(r.Failure) + } + if opc > maxTxnOps { return rpctypes.ErrGRPCTooManyOps } @@ -160,100 +159,117 @@ func checkTxnRequest(r *pb.TxnRequest) error { return rpctypes.ErrGRPCEmptyKey } } - for _, u := range r.Success { - if err := checkRequestOp(u); err != nil { + if err := checkRequestOp(u, maxTxnOps-opc); err != nil { return err } } - if err := checkRequestDupKeys(r.Success); err != nil { - return err - } - for _, u := range r.Failure { - if err := checkRequestOp(u); err != nil { + if err := checkRequestOp(u, maxTxnOps-opc); err != nil { return err } } - return checkRequestDupKeys(r.Failure) + + return nil } -// checkRequestDupKeys gives rpctypes.ErrGRPCDuplicateKey if the same key is modified twice -func checkRequestDupKeys(reqs []*pb.RequestOp) error { - // check put overlap - keys := make(map[string]struct{}) - for _, requ := range reqs { - tv, ok := requ.Request.(*pb.RequestOp_RequestPut) +// checkIntervals tests whether puts and deletes overlap for a list of ops. If +// there is an overlap, returns an error. If no overlap, return put and delete +// sets for recursive evaluation. +func checkIntervals(reqs []*pb.RequestOp) (map[string]struct{}, adt.IntervalTree, error) { + var dels adt.IntervalTree + + // collect deletes from this level; build first to check lower level overlapped puts + for _, req := range reqs { + tv, ok := req.Request.(*pb.RequestOp_RequestDeleteRange) if !ok { continue } - preq := tv.RequestPut - if preq == nil { + dreq := tv.RequestDeleteRange + if dreq == nil { continue } - if _, ok := keys[string(preq.Key)]; ok { - return rpctypes.ErrGRPCDuplicateKey + var iv adt.Interval + if len(dreq.RangeEnd) != 0 { + iv = adt.NewStringAffineInterval(string(dreq.Key), string(dreq.RangeEnd)) + } else { + iv = adt.NewStringAffinePoint(string(dreq.Key)) } - keys[string(preq.Key)] = struct{}{} - } - - // no need to check deletes if no puts; delete overlaps are permitted - if len(keys) == 0 { - return nil + dels.Insert(iv, struct{}{}) } - // sort keys for range checking - sortedKeys := []string{} - for k := range keys { - sortedKeys = append(sortedKeys, k) - } - sort.Strings(sortedKeys) - - // check put overlap with deletes - for _, requ := range reqs { - tv, ok := requ.Request.(*pb.RequestOp_RequestDeleteRange) + // collect children puts/deletes + puts := make(map[string]struct{}) + for _, req := range reqs { + tv, ok := req.Request.(*pb.RequestOp_RequestTxn) if !ok { continue } - dreq := tv.RequestDeleteRange - if dreq == nil { - continue + putsThen, delsThen, err := checkIntervals(tv.RequestTxn.Success) + if err != nil { + return nil, dels, err + } + putsElse, delsElse, err := checkIntervals(tv.RequestTxn.Failure) + if err != nil { + return nil, dels, err } - if dreq.RangeEnd == nil { - if _, found := keys[string(dreq.Key)]; found { - return rpctypes.ErrGRPCDuplicateKey + for k := range putsThen { + if _, ok := puts[k]; ok { + return nil, dels, rpctypes.ErrGRPCDuplicateKey } - } else { - lo := sort.SearchStrings(sortedKeys, string(dreq.Key)) - hi := sort.SearchStrings(sortedKeys, string(dreq.RangeEnd)) - if lo != hi { - // element between lo and hi => overlap - return rpctypes.ErrGRPCDuplicateKey + if dels.Intersects(adt.NewStringAffinePoint(k)) { + return nil, dels, rpctypes.ErrGRPCDuplicateKey } + puts[k] = struct{}{} } + for k := range putsElse { + if _, ok := puts[k]; ok { + // if key is from putsThen, overlap is OK since + // either then/else are mutually exclusive + if _, isSafe := putsThen[k]; !isSafe { + return nil, dels, rpctypes.ErrGRPCDuplicateKey + } + } + if dels.Intersects(adt.NewStringAffinePoint(k)) { + return nil, dels, rpctypes.ErrGRPCDuplicateKey + } + puts[k] = struct{}{} + } + dels.Union(delsThen, adt.NewStringAffineInterval("\x00", "")) + dels.Union(delsElse, adt.NewStringAffineInterval("\x00", "")) } - return nil + // collect and check this level's puts + for _, req := range reqs { + tv, ok := req.Request.(*pb.RequestOp_RequestPut) + if !ok || tv.RequestPut == nil { + continue + } + k := string(tv.RequestPut.Key) + if _, ok := puts[k]; ok { + return nil, dels, rpctypes.ErrGRPCDuplicateKey + } + if dels.Intersects(adt.NewStringAffinePoint(k)) { + return nil, dels, rpctypes.ErrGRPCDuplicateKey + } + puts[k] = struct{}{} + } + return puts, dels, nil } -func checkRequestOp(u *pb.RequestOp) error { +func checkRequestOp(u *pb.RequestOp, maxTxnOps int) error { // TODO: ensure only one of the field is set. switch uv := u.Request.(type) { case *pb.RequestOp_RequestRange: - if uv.RequestRange != nil { - return checkRangeRequest(uv.RequestRange) - } + return checkRangeRequest(uv.RequestRange) case *pb.RequestOp_RequestPut: - if uv.RequestPut != nil { - return checkPutRequest(uv.RequestPut) - } + return checkPutRequest(uv.RequestPut) case *pb.RequestOp_RequestDeleteRange: - if uv.RequestDeleteRange != nil { - return checkDeleteRequest(uv.RequestDeleteRange) - } + return checkDeleteRequest(uv.RequestDeleteRange) + case *pb.RequestOp_RequestTxn: + return checkTxnRequest(uv.RequestTxn, maxTxnOps) default: - // empty op - return nil + // empty op / nil entry + return rpctypes.ErrGRPCKeyNotFound } - return nil } diff --git a/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go b/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go index 3657d03608..d0a73551d7 100644 --- a/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go +++ b/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go @@ -20,6 +20,7 @@ import ( "github.com/coreos/etcd/auth" "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/mvcc" "github.com/coreos/etcd/mvcc/backend" @@ -40,9 +41,14 @@ type Alarmer interface { Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) } +type LeaderTransferrer interface { + MoveLeader(ctx context.Context, lead, target uint64) error +} + type RaftStatusGetter interface { Index() uint64 Term() uint64 + ID() types.ID Leader() types.ID } @@ -56,11 +62,12 @@ type maintenanceServer struct { kg KVGetter bg BackendGetter a Alarmer + lt LeaderTransferrer hdr header } func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer { - srv := &maintenanceServer{rg: s, kg: s, bg: s, a: s, hdr: newHeader(s)} + srv := &maintenanceServer{rg: s, kg: s, bg: s, a: s, lt: s, hdr: newHeader(s)} return &authMaintenanceServer{srv, s} } @@ -147,6 +154,17 @@ func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) ( return resp, nil } +func (ms *maintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) { + if ms.rg.ID() != ms.rg.Leader() { + return nil, rpctypes.ErrGRPCNotLeader + } + + if err := ms.lt.MoveLeader(ctx, uint64(ms.rg.Leader()), tr.TargetID); err != nil { + return nil, togRPCError(err) + } + return &pb.MoveLeaderResponse{}, nil +} + type authMaintenanceServer struct { *maintenanceServer ag AuthGetter @@ -188,3 +206,7 @@ func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) ( func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) { return ams.maintenanceServer.Status(ctx, ar) } + +func (ams *authMaintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) { + return ams.maintenanceServer.MoveLeader(ctx, tr) +} diff --git a/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go index 0907e902c6..719ad8157a 100644 --- a/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go +++ b/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go @@ -59,6 +59,7 @@ var ( ErrGRPCInvalidAuthMgmt = grpc.Errorf(codes.InvalidArgument, "etcdserver: invalid auth management") ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader") + ErrGRPCNotLeader = grpc.Errorf(codes.Unavailable, "etcdserver: not leader") ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable") ErrGRPCStopped = grpc.Errorf(codes.Unavailable, "etcdserver: server stopped") ErrGRPCTimeout = grpc.Errorf(codes.Unavailable, "etcdserver: request timed out") @@ -106,6 +107,7 @@ var ( grpc.ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt, grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader, + grpc.ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader, grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable, grpc.ErrorDesc(ErrGRPCStopped): ErrGRPCStopped, grpc.ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout, @@ -153,6 +155,7 @@ var ( ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt) ErrNoLeader = Error(ErrGRPCNoLeader) + ErrNotLeader = Error(ErrGRPCNotLeader) ErrNotCapable = Error(ErrGRPCNotCapable) ErrStopped = Error(ErrGRPCStopped) ErrTimeout = Error(ErrGRPCTimeout) diff --git a/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go b/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go index cb514a2dc1..ecbd176237 100644 --- a/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go +++ b/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go @@ -25,81 +25,51 @@ import ( "google.golang.org/grpc/codes" ) -func togRPCError(err error) error { - switch err { - case membership.ErrIDRemoved: - return rpctypes.ErrGRPCMemberNotFound - case membership.ErrIDNotFound: - return rpctypes.ErrGRPCMemberNotFound - case membership.ErrIDExists: - return rpctypes.ErrGRPCMemberExist - case membership.ErrPeerURLexists: - return rpctypes.ErrGRPCPeerURLExist - case etcdserver.ErrNotEnoughStartedMembers: - return rpctypes.ErrMemberNotEnoughStarted +var toGRPCErrorMap = map[error]error{ + membership.ErrIDRemoved: rpctypes.ErrGRPCMemberNotFound, + membership.ErrIDNotFound: rpctypes.ErrGRPCMemberNotFound, + membership.ErrIDExists: rpctypes.ErrGRPCMemberExist, + membership.ErrPeerURLexists: rpctypes.ErrGRPCPeerURLExist, + etcdserver.ErrNotEnoughStartedMembers: rpctypes.ErrMemberNotEnoughStarted, + + mvcc.ErrCompacted: rpctypes.ErrGRPCCompacted, + mvcc.ErrFutureRev: rpctypes.ErrGRPCFutureRev, + etcdserver.ErrRequestTooLarge: rpctypes.ErrGRPCRequestTooLarge, + etcdserver.ErrNoSpace: rpctypes.ErrGRPCNoSpace, + etcdserver.ErrTooManyRequests: rpctypes.ErrTooManyRequests, - case mvcc.ErrCompacted: - return rpctypes.ErrGRPCCompacted - case mvcc.ErrFutureRev: - return rpctypes.ErrGRPCFutureRev - case lease.ErrLeaseNotFound: - return rpctypes.ErrGRPCLeaseNotFound - case etcdserver.ErrRequestTooLarge: - return rpctypes.ErrGRPCRequestTooLarge - case etcdserver.ErrNoSpace: - return rpctypes.ErrGRPCNoSpace - case etcdserver.ErrTooManyRequests: - return rpctypes.ErrTooManyRequests + etcdserver.ErrNoLeader: rpctypes.ErrGRPCNoLeader, + etcdserver.ErrNotLeader: rpctypes.ErrGRPCNotLeader, + etcdserver.ErrStopped: rpctypes.ErrGRPCStopped, + etcdserver.ErrTimeout: rpctypes.ErrGRPCTimeout, + etcdserver.ErrTimeoutDueToLeaderFail: rpctypes.ErrGRPCTimeoutDueToLeaderFail, + etcdserver.ErrTimeoutDueToConnectionLost: rpctypes.ErrGRPCTimeoutDueToConnectionLost, + etcdserver.ErrUnhealthy: rpctypes.ErrGRPCUnhealthy, + etcdserver.ErrKeyNotFound: rpctypes.ErrGRPCKeyNotFound, - case etcdserver.ErrNoLeader: - return rpctypes.ErrGRPCNoLeader - case etcdserver.ErrStopped: - return rpctypes.ErrGRPCStopped - case etcdserver.ErrTimeout: - return rpctypes.ErrGRPCTimeout - case etcdserver.ErrTimeoutDueToLeaderFail: - return rpctypes.ErrGRPCTimeoutDueToLeaderFail - case etcdserver.ErrTimeoutDueToConnectionLost: - return rpctypes.ErrGRPCTimeoutDueToConnectionLost - case etcdserver.ErrUnhealthy: - return rpctypes.ErrGRPCUnhealthy - case etcdserver.ErrKeyNotFound: - return rpctypes.ErrGRPCKeyNotFound + lease.ErrLeaseNotFound: rpctypes.ErrGRPCLeaseNotFound, + lease.ErrLeaseExists: rpctypes.ErrGRPCLeaseExist, - case lease.ErrLeaseNotFound: - return rpctypes.ErrGRPCLeaseNotFound - case lease.ErrLeaseExists: - return rpctypes.ErrGRPCLeaseExist + auth.ErrRootUserNotExist: rpctypes.ErrGRPCRootUserNotExist, + auth.ErrRootRoleNotExist: rpctypes.ErrGRPCRootRoleNotExist, + auth.ErrUserAlreadyExist: rpctypes.ErrGRPCUserAlreadyExist, + auth.ErrUserEmpty: rpctypes.ErrGRPCUserEmpty, + auth.ErrUserNotFound: rpctypes.ErrGRPCUserNotFound, + auth.ErrRoleAlreadyExist: rpctypes.ErrGRPCRoleAlreadyExist, + auth.ErrRoleNotFound: rpctypes.ErrGRPCRoleNotFound, + auth.ErrAuthFailed: rpctypes.ErrGRPCAuthFailed, + auth.ErrPermissionDenied: rpctypes.ErrGRPCPermissionDenied, + auth.ErrRoleNotGranted: rpctypes.ErrGRPCRoleNotGranted, + auth.ErrPermissionNotGranted: rpctypes.ErrGRPCPermissionNotGranted, + auth.ErrAuthNotEnabled: rpctypes.ErrGRPCAuthNotEnabled, + auth.ErrInvalidAuthToken: rpctypes.ErrGRPCInvalidAuthToken, + auth.ErrInvalidAuthMgmt: rpctypes.ErrGRPCInvalidAuthMgmt, +} - case auth.ErrRootUserNotExist: - return rpctypes.ErrGRPCRootUserNotExist - case auth.ErrRootRoleNotExist: - return rpctypes.ErrGRPCRootRoleNotExist - case auth.ErrUserAlreadyExist: - return rpctypes.ErrGRPCUserAlreadyExist - case auth.ErrUserEmpty: - return rpctypes.ErrGRPCUserEmpty - case auth.ErrUserNotFound: - return rpctypes.ErrGRPCUserNotFound - case auth.ErrRoleAlreadyExist: - return rpctypes.ErrGRPCRoleAlreadyExist - case auth.ErrRoleNotFound: - return rpctypes.ErrGRPCRoleNotFound - case auth.ErrAuthFailed: - return rpctypes.ErrGRPCAuthFailed - case auth.ErrPermissionDenied: - return rpctypes.ErrGRPCPermissionDenied - case auth.ErrRoleNotGranted: - return rpctypes.ErrGRPCRoleNotGranted - case auth.ErrPermissionNotGranted: - return rpctypes.ErrGRPCPermissionNotGranted - case auth.ErrAuthNotEnabled: - return rpctypes.ErrGRPCAuthNotEnabled - case auth.ErrInvalidAuthToken: - return rpctypes.ErrGRPCInvalidAuthToken - case auth.ErrInvalidAuthMgmt: - return rpctypes.ErrGRPCInvalidAuthMgmt - default: +func togRPCError(err error) error { + grpcErr, ok := toGRPCErrorMap[err] + if !ok { return grpc.Errorf(codes.Unknown, err.Error()) } + return grpcErr } diff --git a/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go b/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go index e27c1f3748..84c0a5eac8 100644 --- a/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go +++ b/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go @@ -21,6 +21,7 @@ import ( "golang.org/x/net/context" + "github.com/coreos/etcd/auth" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" @@ -33,6 +34,8 @@ type watchServer struct { memberID int64 raftTimer etcdserver.RaftTimer watchable mvcc.WatchableKV + + ag AuthGetter } func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer { @@ -41,6 +44,7 @@ func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer { memberID: int64(s.ID()), raftTimer: s, watchable: s.Watchable(), + ag: s, } } @@ -101,6 +105,8 @@ type serverWatchStream struct { // wg waits for the send loop to complete wg sync.WaitGroup + + ag AuthGetter } func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { @@ -118,6 +124,8 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { progress: make(map[mvcc.WatchID]bool), prevKV: make(map[mvcc.WatchID]bool), closec: make(chan struct{}), + + ag: ws.ag, } sws.wg.Add(1) @@ -150,6 +158,19 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { return err } +func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool { + authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context()) + if err != nil { + return false + } + if authInfo == nil { + // if auth is enabled, IsRangePermitted() can cause an error + authInfo = &auth.AuthInfo{} + } + + return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil +} + func (sws *serverWatchStream) recvLoop() error { for { req, err := sws.gRPCStream.Recv() @@ -180,6 +201,23 @@ func (sws *serverWatchStream) recvLoop() error { // support >= key queries creq.RangeEnd = []byte{} } + + if !sws.isWatchPermitted(creq) { + wr := &pb.WatchResponse{ + Header: sws.newResponseHeader(sws.watchStream.Rev()), + WatchId: -1, + Canceled: true, + Created: true, + CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(), + } + + select { + case sws.ctrlStream <- wr: + case <-sws.closec: + } + return nil + } + filters := FiltersFromRequest(creq) wsrev := sws.watchStream.Rev() diff --git a/github.com/coreos/etcd/etcdserver/apply.go b/github.com/coreos/etcd/etcdserver/apply.go index 426f801953..3aa8d71e68 100644 --- a/github.com/coreos/etcd/etcdserver/apply.go +++ b/github.com/coreos/etcd/etcdserver/apply.go @@ -76,14 +76,31 @@ type applierV3 interface { RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) } +type checkReqFunc func(mvcc.ReadView, *pb.RequestOp) error + type applierV3backend struct { s *EtcdServer + + checkPut checkReqFunc + checkRange checkReqFunc +} + +func (s *EtcdServer) newApplierV3Backend() applierV3 { + base := &applierV3backend{s: s} + base.checkPut = func(rv mvcc.ReadView, req *pb.RequestOp) error { + return base.checkRequestPut(rv, req) + } + base.checkRange = func(rv mvcc.ReadView, req *pb.RequestOp) error { + return base.checkRequestRange(rv, req) + } + return base } func (s *EtcdServer) newApplierV3() applierV3 { return newAuthApplierV3( s.AuthStore(), - newQuotaApplierV3(s, &applierV3backend{s}), + newQuotaApplierV3(s, s.newApplierV3Backend()), + s.lessor, ) } @@ -193,18 +210,15 @@ func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.Pu func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { resp := &pb.DeleteRangeResponse{} resp.Header = &pb.ResponseHeader{} + end := mkGteRange(dr.RangeEnd) if txn == nil { txn = a.s.kv.Write() defer txn.End() } - if isGteRange(dr.RangeEnd) { - dr.RangeEnd = []byte{} - } - if dr.PrevKv { - rr, err := txn.Range(dr.Key, dr.RangeEnd, mvcc.RangeOptions{}) + rr, err := txn.Range(dr.Key, end, mvcc.RangeOptions{}) if err != nil { return nil, err } @@ -215,7 +229,7 @@ func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequ } } - resp.Deleted, resp.Header.Revision = txn.DeleteRange(dr.Key, dr.RangeEnd) + resp.Deleted, resp.Header.Revision = txn.DeleteRange(dr.Key, end) return resp, nil } @@ -228,10 +242,6 @@ func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.Rang defer txn.End() } - if isGteRange(r.RangeEnd) { - r.RangeEnd = []byte{} - } - limit := r.Limit if r.SortOrder != pb.RangeRequest_NONE || r.MinModRevision != 0 || r.MaxModRevision != 0 || @@ -250,7 +260,7 @@ func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.Rang Count: r.CountOnly, } - rr, err := txn.Range(r.Key, r.RangeEnd, ro) + rr, err := txn.Range(r.Key, mkGteRange(r.RangeEnd), ro) if err != nil { return nil, err } @@ -318,153 +328,201 @@ func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.Rang } func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { - ok := true - for _, c := range rt.Compare { - if _, ok = a.applyCompare(c); !ok { - break - } - } - - var reqs []*pb.RequestOp - if ok { - reqs = rt.Success - } else { - reqs = rt.Failure - } + isWrite := !isTxnReadonly(rt) + txn := mvcc.NewReadOnlyTxnWrite(a.s.KV().Read()) - if err := a.checkRequestPut(reqs); err != nil { - return nil, err + txnPath := compareToPath(txn, rt) + if isWrite { + if _, err := checkRequests(txn, rt, txnPath, a.checkPut); err != nil { + txn.End() + return nil, err + } } - if err := a.checkRequestRange(reqs); err != nil { + if _, err := checkRequests(txn, rt, txnPath, a.checkRange); err != nil { + txn.End() return nil, err } - resps := make([]*pb.ResponseOp, len(reqs)) + txnResp, _ := newTxnResp(rt, txnPath) - // When executing the operations of txn, etcd must hold the txn lock so - // readers do not see any intermediate results. - // TODO: use Read txn if only Ranges - txn := a.s.KV().Write() - for i := range reqs { - resps[i] = a.applyUnion(txn, reqs[i]) + // When executing mutable txn ops, etcd must hold the txn lock so + // readers do not see any intermediate results. Since writes are + // serialized on the raft loop, the revision in the read view will + // be the revision of the write txn. + if isWrite { + txn.End() + txn = a.s.KV().Write() } + a.applyTxn(txn, rt, txnPath, txnResp) rev := txn.Rev() if len(txn.Changes()) != 0 { rev++ } txn.End() - txnResp := &pb.TxnResponse{} - txnResp.Header = &pb.ResponseHeader{} txnResp.Header.Revision = rev - txnResp.Responses = resps - txnResp.Succeeded = ok return txnResp, nil } -// applyCompare applies the compare request. -// It returns the revision at which the comparison happens. If the comparison -// succeeds, the it returns true. Otherwise it returns false. -func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) { - rr, err := a.s.KV().Range(c.Key, nil, mvcc.RangeOptions{}) - rev := rr.Rev +// newTxnResp allocates a txn response for a txn request given a path. +func newTxnResp(rt *pb.TxnRequest, txnPath []bool) (txnResp *pb.TxnResponse, txnCount int) { + reqs := rt.Success + if !txnPath[0] { + reqs = rt.Failure + } + resps := make([]*pb.ResponseOp, len(reqs)) + txnResp = &pb.TxnResponse{ + Responses: resps, + Succeeded: txnPath[0], + Header: &pb.ResponseHeader{}, + } + for i, req := range reqs { + switch tv := req.Request.(type) { + case *pb.RequestOp_RequestRange: + resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{}} + case *pb.RequestOp_RequestPut: + resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{}} + case *pb.RequestOp_RequestDeleteRange: + resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{}} + case *pb.RequestOp_RequestTxn: + resp, txns := newTxnResp(tv.RequestTxn, txnPath[1:]) + resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseTxn{ResponseTxn: resp}} + txnPath = txnPath[1+txns:] + txnCount += txns + 1 + default: + } + } + return txnResp, txnCount +} + +func compareToPath(rv mvcc.ReadView, rt *pb.TxnRequest) []bool { + txnPath := make([]bool, 1) + ops := rt.Success + if txnPath[0] = applyCompares(rv, rt.Compare); !txnPath[0] { + ops = rt.Failure + } + for _, op := range ops { + tv, ok := op.Request.(*pb.RequestOp_RequestTxn) + if !ok || tv.RequestTxn == nil { + continue + } + txnPath = append(txnPath, compareToPath(rv, tv.RequestTxn)...) + } + return txnPath +} + +func applyCompares(rv mvcc.ReadView, cmps []*pb.Compare) bool { + for _, c := range cmps { + if !applyCompare(rv, c) { + return false + } + } + return true +} +// applyCompare applies the compare request. +// If the comparison succeeds, it returns true. Otherwise, returns false. +func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool { + // TOOD: possible optimizations + // * chunk reads for large ranges to conserve memory + // * rewrite rules for common patterns: + // ex. "[a, b) createrev > 0" => "limit 1 /\ kvs > 0" + // * caching + rr, err := rv.Range(c.Key, mkGteRange(c.RangeEnd), mvcc.RangeOptions{}) if err != nil { - return rev, false + return false } - var ckv mvccpb.KeyValue - if len(rr.KVs) != 0 { - ckv = rr.KVs[0] - } else { - // Use the zero value of ckv normally. However... + if len(rr.KVs) == 0 { if c.Target == pb.Compare_VALUE { - // Always fail if we're comparing a value on a key that doesn't exist. - // We can treat non-existence as the empty set explicitly, such that - // even a key with a value of length 0 bytes is still a real key - // that was written that way - return rev, false + // Always fail if comparing a value on a key/keys that doesn't exist; + // nil == empty string in grpc; no way to represent missing value + return false + } + return compareKV(c, mvccpb.KeyValue{}) + } + for _, kv := range rr.KVs { + if !compareKV(c, kv) { + return false } } + return true +} - // -1 is less, 0 is equal, 1 is greater +func compareKV(c *pb.Compare, ckv mvccpb.KeyValue) bool { var result int + rev := int64(0) switch c.Target { case pb.Compare_VALUE: - tv, _ := c.TargetUnion.(*pb.Compare_Value) - if tv != nil { - result = bytes.Compare(ckv.Value, tv.Value) + v := []byte{} + if tv, _ := c.TargetUnion.(*pb.Compare_Value); tv != nil { + v = tv.Value } + result = bytes.Compare(ckv.Value, v) case pb.Compare_CREATE: - tv, _ := c.TargetUnion.(*pb.Compare_CreateRevision) - if tv != nil { - result = compareInt64(ckv.CreateRevision, tv.CreateRevision) + if tv, _ := c.TargetUnion.(*pb.Compare_CreateRevision); tv != nil { + rev = tv.CreateRevision } - + result = compareInt64(ckv.CreateRevision, rev) case pb.Compare_MOD: - tv, _ := c.TargetUnion.(*pb.Compare_ModRevision) - if tv != nil { - result = compareInt64(ckv.ModRevision, tv.ModRevision) + if tv, _ := c.TargetUnion.(*pb.Compare_ModRevision); tv != nil { + rev = tv.ModRevision } + result = compareInt64(ckv.ModRevision, rev) case pb.Compare_VERSION: - tv, _ := c.TargetUnion.(*pb.Compare_Version) - if tv != nil { - result = compareInt64(ckv.Version, tv.Version) + if tv, _ := c.TargetUnion.(*pb.Compare_Version); tv != nil { + rev = tv.Version } + result = compareInt64(ckv.Version, rev) } - switch c.Result { case pb.Compare_EQUAL: - if result != 0 { - return rev, false - } + return result == 0 case pb.Compare_NOT_EQUAL: - if result == 0 { - return rev, false - } + return result != 0 case pb.Compare_GREATER: - if result != 1 { - return rev, false - } + return result > 0 case pb.Compare_LESS: - if result != -1 { - return rev, false - } + return result < 0 } - return rev, true + return true } -func (a *applierV3backend) applyUnion(txn mvcc.TxnWrite, union *pb.RequestOp) *pb.ResponseOp { - switch tv := union.Request.(type) { - case *pb.RequestOp_RequestRange: - if tv.RequestRange != nil { +func (a *applierV3backend) applyTxn(txn mvcc.TxnWrite, rt *pb.TxnRequest, txnPath []bool, tresp *pb.TxnResponse) (txns int) { + reqs := rt.Success + if !txnPath[0] { + reqs = rt.Failure + } + for i, req := range reqs { + respi := tresp.Responses[i].Response + switch tv := req.Request.(type) { + case *pb.RequestOp_RequestRange: resp, err := a.Range(txn, tv.RequestRange) if err != nil { plog.Panicf("unexpected error during txn: %v", err) } - return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{ResponseRange: resp}} - } - case *pb.RequestOp_RequestPut: - if tv.RequestPut != nil { + respi.(*pb.ResponseOp_ResponseRange).ResponseRange = resp + case *pb.RequestOp_RequestPut: resp, err := a.Put(txn, tv.RequestPut) if err != nil { plog.Panicf("unexpected error during txn: %v", err) } - return &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{ResponsePut: resp}} - } - case *pb.RequestOp_RequestDeleteRange: - if tv.RequestDeleteRange != nil { + respi.(*pb.ResponseOp_ResponsePut).ResponsePut = resp + case *pb.RequestOp_RequestDeleteRange: resp, err := a.DeleteRange(txn, tv.RequestDeleteRange) if err != nil { plog.Panicf("unexpected error during txn: %v", err) } - return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{ResponseDeleteRange: resp}} + respi.(*pb.ResponseOp_ResponseDeleteRange).ResponseDeleteRange = resp + case *pb.RequestOp_RequestTxn: + resp := respi.(*pb.ResponseOp_ResponseTxn).ResponseTxn + applyTxns := a.applyTxn(txn, tv.RequestTxn, txnPath[1:], resp) + txns += applyTxns + 1 + txnPath = txnPath[applyTxns+1:] + default: + // empty union } - default: - // empty union - return nil } - return nil - + return txns } func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) { @@ -770,57 +828,70 @@ func (s *kvSortByValue) Less(i, j int) bool { return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0 } -func (a *applierV3backend) checkRequestPut(reqs []*pb.RequestOp) error { - for _, requ := range reqs { - tv, ok := requ.Request.(*pb.RequestOp_RequestPut) - if !ok { - continue - } - preq := tv.RequestPut - if preq == nil { - continue - } - if preq.IgnoreValue || preq.IgnoreLease { - // expects previous key-value, error if not exist - rr, err := a.s.KV().Range(preq.Key, nil, mvcc.RangeOptions{}) +func checkRequests(rv mvcc.ReadView, rt *pb.TxnRequest, txnPath []bool, f checkReqFunc) (int, error) { + txnCount := 0 + reqs := rt.Success + if !txnPath[0] { + reqs = rt.Failure + } + for _, req := range reqs { + if tv, ok := req.Request.(*pb.RequestOp_RequestTxn); ok && tv.RequestTxn != nil { + txns, err := checkRequests(rv, tv.RequestTxn, txnPath[1:], f) if err != nil { - return err + return 0, err } - if rr == nil || len(rr.KVs) == 0 { - return ErrKeyNotFound - } - } - if lease.LeaseID(preq.Lease) == lease.NoLease { + txnCount += txns + 1 + txnPath = txnPath[txns+1:] continue } - if l := a.s.lessor.Lookup(lease.LeaseID(preq.Lease)); l == nil { - return lease.ErrLeaseNotFound + if err := f(rv, req); err != nil { + return 0, err } } - return nil + return txnCount, nil } -func (a *applierV3backend) checkRequestRange(reqs []*pb.RequestOp) error { - for _, requ := range reqs { - tv, ok := requ.Request.(*pb.RequestOp_RequestRange) - if !ok { - continue - } - greq := tv.RequestRange - if greq == nil || greq.Revision == 0 { - continue +func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqOp *pb.RequestOp) error { + tv, ok := reqOp.Request.(*pb.RequestOp_RequestPut) + if !ok || tv.RequestPut == nil { + return nil + } + req := tv.RequestPut + if req.IgnoreValue || req.IgnoreLease { + // expects previous key-value, error if not exist + rr, err := rv.Range(req.Key, nil, mvcc.RangeOptions{}) + if err != nil { + return err } - - if greq.Revision > a.s.KV().Rev() { - return mvcc.ErrFutureRev + if rr == nil || len(rr.KVs) == 0 { + return ErrKeyNotFound } - if greq.Revision < a.s.KV().FirstRev() { - return mvcc.ErrCompacted + } + if lease.LeaseID(req.Lease) != lease.NoLease { + if l := a.s.lessor.Lookup(lease.LeaseID(req.Lease)); l == nil { + return lease.ErrLeaseNotFound } } return nil } +func (a *applierV3backend) checkRequestRange(rv mvcc.ReadView, reqOp *pb.RequestOp) error { + tv, ok := reqOp.Request.(*pb.RequestOp_RequestRange) + if !ok || tv.RequestRange == nil { + return nil + } + req := tv.RequestRange + switch { + case req.Revision == 0: + return nil + case req.Revision > rv.Rev(): + return mvcc.ErrFutureRev + case req.Revision < rv.FirstRev(): + return mvcc.ErrCompacted + } + return nil +} + func compareInt64(a, b int64) int { switch { case a < b: @@ -832,10 +903,15 @@ func compareInt64(a, b int64) int { } } -// isGteRange determines if the range end is a >= range. This works around grpc +// mkGteRange determines if the range end is a >= range. This works around grpc // sending empty byte strings as nil; >= is encoded in the range end as '\0'. -func isGteRange(rangeEnd []byte) bool { - return len(rangeEnd) == 1 && rangeEnd[0] == 0 +// If it is a GTE range, then []byte{} is returned to indicate the empty byte +// string (vs nil being no byte string). +func mkGteRange(rangeEnd []byte) []byte { + if len(rangeEnd) == 1 && rangeEnd[0] == 0 { + return []byte{} + } + return rangeEnd } func noSideEffect(r *pb.InternalRaftRequest) bool { diff --git a/github.com/coreos/etcd/etcdserver/apply_auth.go b/github.com/coreos/etcd/etcdserver/apply_auth.go index 7da4ae45df..ec9391435d 100644 --- a/github.com/coreos/etcd/etcdserver/apply_auth.go +++ b/github.com/coreos/etcd/etcdserver/apply_auth.go @@ -19,12 +19,14 @@ import ( "github.com/coreos/etcd/auth" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/lease" "github.com/coreos/etcd/mvcc" ) type authApplierV3 struct { applierV3 - as auth.AuthStore + as auth.AuthStore + lessor lease.Lessor // mu serializes Apply so that user isn't corrupted and so that // serialized requests don't leak data from TOCTOU errors @@ -33,8 +35,8 @@ type authApplierV3 struct { authInfo auth.AuthInfo } -func newAuthApplierV3(as auth.AuthStore, base applierV3) *authApplierV3 { - return &authApplierV3{applierV3: base, as: as} +func newAuthApplierV3(as auth.AuthStore, base applierV3, lessor lease.Lessor) *authApplierV3 { + return &authApplierV3{applierV3: base, as: as, lessor: lessor} } func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult { @@ -63,6 +65,15 @@ func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutRespon if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil { return nil, err } + + if err := aa.checkLeasePuts(lease.LeaseID(r.Lease)); err != nil { + // The specified lease is already attached with a key that cannot + // be written by this user. It means the user cannot revoke the + // lease so attaching the lease to the newly written key should + // be forbidden. + return nil, err + } + if r.PrevKv { err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, nil) if err != nil { @@ -138,7 +149,7 @@ func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.Req func checkTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error { for _, c := range rt.Compare { - if err := as.IsRangePermitted(ai, c.Key, nil); err != nil { + if err := as.IsRangePermitted(ai, c.Key, c.RangeEnd); err != nil { return err } } @@ -158,6 +169,48 @@ func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { return aa.applierV3.Txn(rt) } +func (aa *authApplierV3) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { + if err := aa.checkLeasePuts(lease.LeaseID(lc.ID)); err != nil { + return nil, err + } + return aa.applierV3.LeaseRevoke(lc) +} + +func (aa *authApplierV3) checkLeasePuts(leaseID lease.LeaseID) error { + lease := aa.lessor.Lookup(leaseID) + if lease != nil { + for _, key := range lease.Keys() { + if err := aa.as.IsPutPermitted(&aa.authInfo, []byte(key)); err != nil { + return err + } + } + } + + return nil +} + +func (aa *authApplierV3) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { + err := aa.as.IsAdminPermitted(&aa.authInfo) + if err != nil && r.Name != aa.authInfo.Username { + aa.authInfo.Username = "" + aa.authInfo.Revision = 0 + return &pb.AuthUserGetResponse{}, err + } + + return aa.applierV3.UserGet(r) +} + +func (aa *authApplierV3) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { + err := aa.as.IsAdminPermitted(&aa.authInfo) + if err != nil && !aa.as.HasRole(aa.authInfo.Username, r.Role) { + aa.authInfo.Username = "" + aa.authInfo.Revision = 0 + return &pb.AuthRoleGetResponse{}, err + } + + return aa.applierV3.RoleGet(r) +} + func needAdminPermission(r *pb.InternalRaftRequest) bool { switch { case r.AuthEnable != nil: @@ -172,16 +225,12 @@ func needAdminPermission(r *pb.InternalRaftRequest) bool { return true case r.AuthUserGrantRole != nil: return true - case r.AuthUserGet != nil: - return true case r.AuthUserRevokeRole != nil: return true case r.AuthRoleAdd != nil: return true case r.AuthRoleGrantPermission != nil: return true - case r.AuthRoleGet != nil: - return true case r.AuthRoleRevokePermission != nil: return true case r.AuthRoleDelete != nil: diff --git a/github.com/coreos/etcd/etcdserver/backend.go b/github.com/coreos/etcd/etcdserver/backend.go new file mode 100644 index 0000000000..01a84d04d4 --- /dev/null +++ b/github.com/coreos/etcd/etcdserver/backend.go @@ -0,0 +1,81 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "fmt" + "os" + "time" + + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/snap" +) + +func newBackend(cfg ServerConfig) backend.Backend { + bcfg := backend.DefaultBackendConfig() + bcfg.Path = cfg.backendPath() + if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes { + // permit 10% excess over quota for disarm + bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10) + } + return backend.New(bcfg) +} + +// openSnapshotBackend renames a snapshot db to the current etcd db and opens it. +func openSnapshotBackend(cfg ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot) (backend.Backend, error) { + snapPath, err := ss.DBFilePath(snapshot.Metadata.Index) + if err != nil { + return nil, fmt.Errorf("database snapshot file path error: %v", err) + } + if err := os.Rename(snapPath, cfg.backendPath()); err != nil { + return nil, fmt.Errorf("rename snapshot file error: %v", err) + } + return openBackend(cfg), nil +} + +// openBackend returns a backend using the current etcd db. +func openBackend(cfg ServerConfig) backend.Backend { + fn := cfg.backendPath() + beOpened := make(chan backend.Backend) + go func() { + beOpened <- newBackend(cfg) + }() + select { + case be := <-beOpened: + return be + case <-time.After(time.Second): + plog.Warningf("another etcd process is using %q and holds the file lock.", fn) + plog.Warningf("waiting for it to exit before starting...") + } + return <-beOpened +} + +// recoverBackendSnapshot recovers the DB from a snapshot in case etcd crashes +// before updating the backend db after persisting raft snapshot to disk, +// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this +// case, replace the db with the snapshot db sent by the leader. +func recoverSnapshotBackend(cfg ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot) (backend.Backend, error) { + var cIndex consistentIndex + kv := mvcc.New(oldbe, &lease.FakeLessor{}, &cIndex) + defer kv.Close() + if snapshot.Metadata.Index <= kv.ConsistentIndex() { + return oldbe, nil + } + oldbe.Close() + return openSnapshotBackend(cfg, snap.New(cfg.SnapDir()), snapshot) +} diff --git a/github.com/coreos/etcd/etcdserver/cluster_util.go b/github.com/coreos/etcd/etcdserver/cluster_util.go index fa84ffae63..f44862a463 100644 --- a/github.com/coreos/etcd/etcdserver/cluster_util.go +++ b/github.com/coreos/etcd/etcdserver/cluster_util.go @@ -23,7 +23,6 @@ import ( "time" "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/httputil" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/version" "github.com/coreos/go-semver/semver" @@ -241,15 +240,6 @@ func getVersion(m *membership.Member, rt http.RoundTripper) (*version.Versions, plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err) continue } - // etcd 2.0 does not have version endpoint on peer url. - if resp.StatusCode == http.StatusNotFound { - httputil.GracefulClose(resp) - return &version.Versions{ - Server: "2.0.0", - Cluster: "2.0.0", - }, nil - } - var b []byte b, err = ioutil.ReadAll(resp.Body) resp.Body.Close() diff --git a/github.com/coreos/etcd/etcdserver/config.go b/github.com/coreos/etcd/etcdserver/config.go index 50bc212928..f6ed1f1bae 100644 --- a/github.com/coreos/etcd/etcdserver/config.go +++ b/github.com/coreos/etcd/etcdserver/config.go @@ -53,7 +53,12 @@ type ServerConfig struct { BootstrapTimeout time.Duration AutoCompactionRetention int + AutoCompactionMode string QuotaBackendBytes int64 + MaxTxnOps uint + + // MaxRequestBytes is the maximum request size to send over raft. + MaxRequestBytes uint StrictReconfigCheck bool @@ -113,11 +118,41 @@ func (c *ServerConfig) advertiseMatchesCluster() error { sort.Strings(apurls) ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) defer cancel() - if !netutil.URLStringsEqual(ctx, apurls, urls.StringSlice()) { - umap := map[string]types.URLs{c.Name: c.PeerURLs} - return fmt.Errorf("--initial-cluster must include %s given --initial-advertise-peer-urls=%s", types.URLsMap(umap).String(), strings.Join(apurls, ",")) + if netutil.URLStringsEqual(ctx, apurls, urls.StringSlice()) { + return nil } - return nil + + initMap, apMap := make(map[string]struct{}), make(map[string]struct{}) + for _, url := range c.PeerURLs { + apMap[url.String()] = struct{}{} + } + for _, url := range c.InitialPeerURLsMap[c.Name] { + initMap[url.String()] = struct{}{} + } + + missing := []string{} + for url := range initMap { + if _, ok := apMap[url]; !ok { + missing = append(missing, url) + } + } + if len(missing) > 0 { + for i := range missing { + missing[i] = c.Name + "=" + missing[i] + } + mstr := strings.Join(missing, ",") + apStr := strings.Join(apurls, ",") + return fmt.Errorf("--initial-cluster has %s but missing from --initial-advertise-peer-urls=%s ", mstr, apStr) + } + + for url := range apMap { + if _, ok := initMap[url]; !ok { + missing = append(missing, url) + } + } + mstr := strings.Join(missing, ",") + umap := types.URLsMap(map[string]types.URLs{c.Name: c.PeerURLs}) + return fmt.Errorf("--initial-advertise-peer-urls has %s but missing from --initial-cluster=%s", mstr, umap.String()) } func (c *ServerConfig) MemberDir() string { return filepath.Join(c.DataDir, "member") } @@ -200,3 +235,5 @@ func (c *ServerConfig) bootstrapTimeout() time.Duration { } return time.Second } + +func (c *ServerConfig) backendPath() string { return filepath.Join(c.SnapDir(), "db") } diff --git a/github.com/coreos/etcd/etcdserver/config_test.go b/github.com/coreos/etcd/etcdserver/config_test.go index bf0cd7f75d..e574ab10dc 100644 --- a/github.com/coreos/etcd/etcdserver/config_test.go +++ b/github.com/coreos/etcd/etcdserver/config_test.go @@ -107,6 +107,14 @@ func TestConfigVerifyLocalMember(t *testing.T) { true, }, + { + // Advertised peer URLs must match those in cluster-state + "node1=http://localhost:12345", + []string{"http://localhost:2380", "http://localhost:12345"}, + true, + + true, + }, { // Advertised peer URLs must match those in cluster-state "node1=http://localhost:2380", diff --git a/github.com/coreos/etcd/etcdserver/errors.go b/github.com/coreos/etcd/etcdserver/errors.go index ed749dbe8d..09571e56e3 100644 --- a/github.com/coreos/etcd/etcdserver/errors.go +++ b/github.com/coreos/etcd/etcdserver/errors.go @@ -29,6 +29,7 @@ var ( ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long") ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members") ErrNoLeader = errors.New("etcdserver: no leader") + ErrNotLeader = errors.New("etcdserver: not leader") ErrRequestTooLarge = errors.New("etcdserver: request is too large") ErrNoSpace = errors.New("etcdserver: no space") ErrTooManyRequests = errors.New("etcdserver: too many requests") diff --git a/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go index aabf90061f..febf62cf41 100644 --- a/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go +++ b/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go @@ -58,6 +58,8 @@ MemberListResponse DefragmentRequest DefragmentResponse + MoveLeaderRequest + MoveLeaderResponse AlarmRequest AlarmMember AlarmResponse diff --git a/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go b/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go similarity index 88% rename from github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go rename to github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go index 473ad582ef..2421181b85 100644 --- a/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go +++ b/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go @@ -7,9 +7,10 @@ Package etcdserverpb is a reverse proxy. It translates gRPC into RESTful JSON APIs. */ -package etcdserverpb +package gw import ( + "github.com/coreos/etcd/etcdserver/etcdserverpb" "io" "net/http" @@ -27,8 +28,8 @@ var _ io.Reader var _ = runtime.String var _ = utilities.NewDoubleArray -func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RangeRequest +func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.RangeRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -40,8 +41,8 @@ func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client } -func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PutRequest +func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.PutRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -53,8 +54,8 @@ func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client K } -func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DeleteRangeRequest +func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.DeleteRangeRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -66,8 +67,8 @@ func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, } -func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq TxnRequest +func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.TxnRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -79,8 +80,8 @@ func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client K } -func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq CompactionRequest +func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.CompactionRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -92,7 +93,7 @@ func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, clie } -func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, client WatchClient, req *http.Request, pathParams map[string]string) (Watch_WatchClient, runtime.ServerMetadata, error) { +func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.WatchClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Watch_WatchClient, runtime.ServerMetadata, error) { var metadata runtime.ServerMetadata stream, err := client.Watch(ctx) if err != nil { @@ -101,7 +102,7 @@ func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, cli } dec := marshaler.NewDecoder(req.Body) handleSend := func() error { - var protoReq WatchRequest + var protoReq etcdserverpb.WatchRequest err = dec.Decode(&protoReq) if err == io.EOF { return err @@ -144,8 +145,8 @@ func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, cli return stream, metadata, nil } -func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq LeaseGrantRequest +func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.LeaseGrantRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -157,8 +158,8 @@ func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler } -func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq LeaseRevokeRequest +func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.LeaseRevokeRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -170,7 +171,7 @@ func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshale } -func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (Lease_LeaseKeepAliveClient, runtime.ServerMetadata, error) { +func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Lease_LeaseKeepAliveClient, runtime.ServerMetadata, error) { var metadata runtime.ServerMetadata stream, err := client.LeaseKeepAlive(ctx) if err != nil { @@ -179,7 +180,7 @@ func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marsh } dec := marshaler.NewDecoder(req.Body) handleSend := func() error { - var protoReq LeaseKeepAliveRequest + var protoReq etcdserverpb.LeaseKeepAliveRequest err = dec.Decode(&protoReq) if err == io.EOF { return err @@ -222,8 +223,8 @@ func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marsh return stream, metadata, nil } -func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq LeaseTimeToLiveRequest +func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.LeaseTimeToLiveRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -235,8 +236,8 @@ func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Mars } -func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq MemberAddRequest +func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.MemberAddRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -248,8 +249,8 @@ func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshale } -func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq MemberRemoveRequest +func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.MemberRemoveRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -261,8 +262,8 @@ func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marsh } -func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq MemberUpdateRequest +func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.MemberUpdateRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -274,8 +275,8 @@ func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marsh } -func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq MemberListRequest +func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.MemberListRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -287,8 +288,8 @@ func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshal } -func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AlarmRequest +func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AlarmRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -300,8 +301,8 @@ func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshale } -func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq StatusRequest +func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.StatusRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -313,8 +314,8 @@ func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshal } -func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DefragmentRequest +func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.DefragmentRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -326,8 +327,8 @@ func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Mar } -func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq HashRequest +func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.HashRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -339,8 +340,8 @@ func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler } -func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (Maintenance_SnapshotClient, runtime.ServerMetadata, error) { - var protoReq SnapshotRequest +func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Maintenance_SnapshotClient, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.SnapshotRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -360,8 +361,21 @@ func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marsh } -func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthEnableRequest +func request_Maintenance_MoveLeader_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.MoveLeaderRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.MoveLeader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthEnableRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -373,8 +387,8 @@ func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, } -func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthDisableRequest +func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthDisableRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -386,8 +400,8 @@ func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler } -func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthenticateRequest +func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthenticateRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -399,8 +413,8 @@ func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshale } -func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserAddRequest +func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserAddRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -412,8 +426,8 @@ func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, cl } -func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserGetRequest +func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserGetRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -425,8 +439,8 @@ func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, cl } -func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserListRequest +func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserListRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -438,8 +452,8 @@ func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, c } -func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserDeleteRequest +func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserDeleteRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -451,8 +465,8 @@ func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, } -func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserChangePasswordRequest +func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserChangePasswordRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -464,8 +478,8 @@ func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Ma } -func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserGrantRoleRequest +func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserGrantRoleRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -477,8 +491,8 @@ func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshal } -func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserRevokeRoleRequest +func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserRevokeRoleRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -490,8 +504,8 @@ func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marsha } -func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleAddRequest +func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthRoleAddRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -503,8 +517,8 @@ func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, cl } -func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleGetRequest +func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthRoleGetRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -516,8 +530,8 @@ func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, cl } -func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleListRequest +func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthRoleListRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -529,8 +543,8 @@ func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, c } -func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleDeleteRequest +func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthRoleDeleteRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -542,8 +556,8 @@ func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, } -func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleGrantPermissionRequest +func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthRoleGrantPermissionRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -555,8 +569,8 @@ func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.M } -func request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleRevokePermissionRequest +func request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthRoleRevokePermissionRequest var metadata runtime.ServerMetadata if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { @@ -596,7 +610,7 @@ func RegisterKVHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, e // RegisterKVHandler registers the http handlers for service KV to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewKVClient(conn) + client := etcdserverpb.NewKVClient(conn) mux.Handle("POST", pattern_KV_Range_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -793,7 +807,7 @@ func RegisterWatchHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux // RegisterWatchHandler registers the http handlers for service Watch to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterWatchHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewWatchClient(conn) + client := etcdserverpb.NewWatchClient(conn) mux.Handle("POST", pattern_Watch_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -862,7 +876,7 @@ func RegisterLeaseHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux // RegisterLeaseHandler registers the http handlers for service Lease to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewLeaseClient(conn) + client := etcdserverpb.NewLeaseClient(conn) mux.Handle("POST", pattern_Lease_LeaseGrant_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -1027,7 +1041,7 @@ func RegisterClusterHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeM // RegisterClusterHandler registers the http handlers for service Cluster to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewClusterClient(conn) + client := etcdserverpb.NewClusterClient(conn) mux.Handle("POST", pattern_Cluster_MemberAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -1192,7 +1206,7 @@ func RegisterMaintenanceHandlerFromEndpoint(ctx context.Context, mux *runtime.Se // RegisterMaintenanceHandler registers the http handlers for service Maintenance to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewMaintenanceClient(conn) + client := etcdserverpb.NewMaintenanceClient(conn) mux.Handle("POST", pattern_Maintenance_Alarm_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) @@ -1334,6 +1348,34 @@ func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn }) + mux.Handle("POST", pattern_Maintenance_MoveLeader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, req) + if err != nil { + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + } + resp, md, err := request_Maintenance_MoveLeader_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, outboundMarshaler, w, req, err) + return + } + + forward_Maintenance_MoveLeader_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } @@ -1347,6 +1389,8 @@ var ( pattern_Maintenance_Hash_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "maintenance", "hash"}, "")) pattern_Maintenance_Snapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "maintenance", "snapshot"}, "")) + + pattern_Maintenance_MoveLeader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "maintenance", "transfer-leadership"}, "")) ) var ( @@ -1359,6 +1403,8 @@ var ( forward_Maintenance_Hash_0 = runtime.ForwardResponseMessage forward_Maintenance_Snapshot_0 = runtime.ForwardResponseStream + + forward_Maintenance_MoveLeader_0 = runtime.ForwardResponseMessage ) // RegisterAuthHandlerFromEndpoint is same as RegisterAuthHandler but @@ -1389,7 +1435,7 @@ func RegisterAuthHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, // RegisterAuthHandler registers the http handlers for service Auth to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewAuthClient(conn) + client := etcdserverpb.NewAuthClient(conn) mux.Handle("POST", pattern_Auth_AuthEnable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(ctx) diff --git a/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go b/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go index f99db6fbb5..1cef5dc9e2 100644 --- a/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go +++ b/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go @@ -204,7 +204,7 @@ func (x AlarmRequest_AlarmAction) String() string { return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x)) } func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{41, 0} + return fileDescriptorRpc, []int{43, 0} } type ResponseHeader struct { @@ -223,6 +223,34 @@ func (m *ResponseHeader) String() string { return proto.CompactTextSt func (*ResponseHeader) ProtoMessage() {} func (*ResponseHeader) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } +func (m *ResponseHeader) GetClusterId() uint64 { + if m != nil { + return m.ClusterId + } + return 0 +} + +func (m *ResponseHeader) GetMemberId() uint64 { + if m != nil { + return m.MemberId + } + return 0 +} + +func (m *ResponseHeader) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *ResponseHeader) GetRaftTerm() uint64 { + if m != nil { + return m.RaftTerm + } + return 0 +} + type RangeRequest struct { // key is the first key for the range. If range_end is not given, the request only looks up key. Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` @@ -273,6 +301,97 @@ func (m *RangeRequest) String() string { return proto.CompactTextStri func (*RangeRequest) ProtoMessage() {} func (*RangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} } +func (m *RangeRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *RangeRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *RangeRequest) GetLimit() int64 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *RangeRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder { + if m != nil { + return m.SortOrder + } + return RangeRequest_NONE +} + +func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget { + if m != nil { + return m.SortTarget + } + return RangeRequest_KEY +} + +func (m *RangeRequest) GetSerializable() bool { + if m != nil { + return m.Serializable + } + return false +} + +func (m *RangeRequest) GetKeysOnly() bool { + if m != nil { + return m.KeysOnly + } + return false +} + +func (m *RangeRequest) GetCountOnly() bool { + if m != nil { + return m.CountOnly + } + return false +} + +func (m *RangeRequest) GetMinModRevision() int64 { + if m != nil { + return m.MinModRevision + } + return 0 +} + +func (m *RangeRequest) GetMaxModRevision() int64 { + if m != nil { + return m.MaxModRevision + } + return 0 +} + +func (m *RangeRequest) GetMinCreateRevision() int64 { + if m != nil { + return m.MinCreateRevision + } + return 0 +} + +func (m *RangeRequest) GetMaxCreateRevision() int64 { + if m != nil { + return m.MaxCreateRevision + } + return 0 +} + type RangeResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // kvs is the list of key-value pairs matched by the range request. @@ -303,6 +422,20 @@ func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue { return nil } +func (m *RangeResponse) GetMore() bool { + if m != nil { + return m.More + } + return false +} + +func (m *RangeResponse) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + type PutRequest struct { // key is the key, in bytes, to put into the key-value store. Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` @@ -327,6 +460,48 @@ func (m *PutRequest) String() string { return proto.CompactTextString func (*PutRequest) ProtoMessage() {} func (*PutRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{3} } +func (m *PutRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *PutRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *PutRequest) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +func (m *PutRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + +func (m *PutRequest) GetIgnoreValue() bool { + if m != nil { + return m.IgnoreValue + } + return false +} + +func (m *PutRequest) GetIgnoreLease() bool { + if m != nil { + return m.IgnoreLease + } + return false +} + type PutResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // if prev_kv is set in the request, the previous key-value pair will be returned. @@ -371,6 +546,27 @@ func (m *DeleteRangeRequest) String() string { return proto.CompactTe func (*DeleteRangeRequest) ProtoMessage() {} func (*DeleteRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{5} } +func (m *DeleteRangeRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *DeleteRangeRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *DeleteRangeRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + type DeleteRangeResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // deleted is the number of keys deleted by the delete range request. @@ -391,6 +587,13 @@ func (m *DeleteRangeResponse) GetHeader() *ResponseHeader { return nil } +func (m *DeleteRangeResponse) GetDeleted() int64 { + if m != nil { + return m.Deleted + } + return 0 +} + func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue { if m != nil { return m.PrevKvs @@ -405,6 +608,7 @@ type RequestOp struct { // *RequestOp_RequestRange // *RequestOp_RequestPut // *RequestOp_RequestDeleteRange + // *RequestOp_RequestTxn Request isRequestOp_Request `protobuf_oneof:"request"` } @@ -428,10 +632,14 @@ type RequestOp_RequestPut struct { type RequestOp_RequestDeleteRange struct { RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,oneof"` } +type RequestOp_RequestTxn struct { + RequestTxn *TxnRequest `protobuf:"bytes,4,opt,name=request_txn,json=requestTxn,oneof"` +} func (*RequestOp_RequestRange) isRequestOp_Request() {} func (*RequestOp_RequestPut) isRequestOp_Request() {} func (*RequestOp_RequestDeleteRange) isRequestOp_Request() {} +func (*RequestOp_RequestTxn) isRequestOp_Request() {} func (m *RequestOp) GetRequest() isRequestOp_Request { if m != nil { @@ -461,12 +669,20 @@ func (m *RequestOp) GetRequestDeleteRange() *DeleteRangeRequest { return nil } +func (m *RequestOp) GetRequestTxn() *TxnRequest { + if x, ok := m.GetRequest().(*RequestOp_RequestTxn); ok { + return x.RequestTxn + } + return nil +} + // XXX_OneofFuncs is for the internal use of the proto package. func (*RequestOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _RequestOp_OneofMarshaler, _RequestOp_OneofUnmarshaler, _RequestOp_OneofSizer, []interface{}{ (*RequestOp_RequestRange)(nil), (*RequestOp_RequestPut)(nil), (*RequestOp_RequestDeleteRange)(nil), + (*RequestOp_RequestTxn)(nil), } } @@ -489,6 +705,11 @@ func _RequestOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { if err := b.EncodeMessage(x.RequestDeleteRange); err != nil { return err } + case *RequestOp_RequestTxn: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.RequestTxn); err != nil { + return err + } case nil: default: return fmt.Errorf("RequestOp.Request has unexpected type %T", x) @@ -523,6 +744,14 @@ func _RequestOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buff err := b.DecodeMessage(msg) m.Request = &RequestOp_RequestDeleteRange{msg} return true, err + case 4: // request.request_txn + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TxnRequest) + err := b.DecodeMessage(msg) + m.Request = &RequestOp_RequestTxn{msg} + return true, err default: return false, nil } @@ -547,6 +776,11 @@ func _RequestOp_OneofSizer(msg proto.Message) (n int) { n += proto.SizeVarint(3<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s + case *RequestOp_RequestTxn: + s := proto.Size(x.RequestTxn) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) @@ -561,6 +795,7 @@ type ResponseOp struct { // *ResponseOp_ResponseRange // *ResponseOp_ResponsePut // *ResponseOp_ResponseDeleteRange + // *ResponseOp_ResponseTxn Response isResponseOp_Response `protobuf_oneof:"response"` } @@ -584,10 +819,14 @@ type ResponseOp_ResponsePut struct { type ResponseOp_ResponseDeleteRange struct { ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,oneof"` } +type ResponseOp_ResponseTxn struct { + ResponseTxn *TxnResponse `protobuf:"bytes,4,opt,name=response_txn,json=responseTxn,oneof"` +} func (*ResponseOp_ResponseRange) isResponseOp_Response() {} func (*ResponseOp_ResponsePut) isResponseOp_Response() {} func (*ResponseOp_ResponseDeleteRange) isResponseOp_Response() {} +func (*ResponseOp_ResponseTxn) isResponseOp_Response() {} func (m *ResponseOp) GetResponse() isResponseOp_Response { if m != nil { @@ -617,12 +856,20 @@ func (m *ResponseOp) GetResponseDeleteRange() *DeleteRangeResponse { return nil } +func (m *ResponseOp) GetResponseTxn() *TxnResponse { + if x, ok := m.GetResponse().(*ResponseOp_ResponseTxn); ok { + return x.ResponseTxn + } + return nil +} + // XXX_OneofFuncs is for the internal use of the proto package. func (*ResponseOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _ResponseOp_OneofMarshaler, _ResponseOp_OneofUnmarshaler, _ResponseOp_OneofSizer, []interface{}{ (*ResponseOp_ResponseRange)(nil), (*ResponseOp_ResponsePut)(nil), (*ResponseOp_ResponseDeleteRange)(nil), + (*ResponseOp_ResponseTxn)(nil), } } @@ -645,6 +892,11 @@ func _ResponseOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { if err := b.EncodeMessage(x.ResponseDeleteRange); err != nil { return err } + case *ResponseOp_ResponseTxn: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResponseTxn); err != nil { + return err + } case nil: default: return fmt.Errorf("ResponseOp.Response has unexpected type %T", x) @@ -679,6 +931,14 @@ func _ResponseOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buf err := b.DecodeMessage(msg) m.Response = &ResponseOp_ResponseDeleteRange{msg} return true, err + case 4: // response.response_txn + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(TxnResponse) + err := b.DecodeMessage(msg) + m.Response = &ResponseOp_ResponseTxn{msg} + return true, err default: return false, nil } @@ -703,6 +963,11 @@ func _ResponseOp_OneofSizer(msg proto.Message) (n int) { n += proto.SizeVarint(3<<3 | proto.WireBytes) n += proto.SizeVarint(uint64(s)) n += s + case *ResponseOp_ResponseTxn: + s := proto.Size(x.ResponseTxn) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s case nil: default: panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) @@ -723,6 +988,9 @@ type Compare struct { // *Compare_ModRevision // *Compare_Value TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"` + // range_end compares the given target to all keys in the range [key, range_end). + // See RangeRequest for more details on key ranges. + RangeEnd []byte `protobuf:"bytes,8,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` } func (m *Compare) Reset() { *m = Compare{} } @@ -761,6 +1029,27 @@ func (m *Compare) GetTargetUnion() isCompare_TargetUnion { return nil } +func (m *Compare) GetResult() Compare_CompareResult { + if m != nil { + return m.Result + } + return Compare_EQUAL +} + +func (m *Compare) GetTarget() Compare_CompareTarget { + if m != nil { + return m.Target + } + return Compare_VERSION +} + +func (m *Compare) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + func (m *Compare) GetVersion() int64 { if x, ok := m.GetTargetUnion().(*Compare_Version); ok { return x.Version @@ -789,6 +1078,13 @@ func (m *Compare) GetValue() []byte { return nil } +func (m *Compare) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + // XXX_OneofFuncs is for the internal use of the proto package. func (*Compare) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _Compare_OneofMarshaler, _Compare_OneofUnmarshaler, _Compare_OneofSizer, []interface{}{ @@ -957,6 +1253,13 @@ func (m *TxnResponse) GetHeader() *ResponseHeader { return nil } +func (m *TxnResponse) GetSucceeded() bool { + if m != nil { + return m.Succeeded + } + return false +} + func (m *TxnResponse) GetResponses() []*ResponseOp { if m != nil { return m.Responses @@ -980,6 +1283,20 @@ func (m *CompactionRequest) String() string { return proto.CompactTex func (*CompactionRequest) ProtoMessage() {} func (*CompactionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{12} } +func (m *CompactionRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *CompactionRequest) GetPhysical() bool { + if m != nil { + return m.Physical + } + return false +} + type CompactionResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -1022,6 +1339,13 @@ func (m *HashResponse) GetHeader() *ResponseHeader { return nil } +func (m *HashResponse) GetHash() uint32 { + if m != nil { + return m.Hash + } + return 0 +} + type SnapshotRequest struct { } @@ -1052,6 +1376,20 @@ func (m *SnapshotResponse) GetHeader() *ResponseHeader { return nil } +func (m *SnapshotResponse) GetRemainingBytes() uint64 { + if m != nil { + return m.RemainingBytes + } + return 0 +} + +func (m *SnapshotResponse) GetBlob() []byte { + if m != nil { + return m.Blob + } + return nil +} + type WatchRequest struct { // request_union is a request to either create a new watcher or cancel an existing watcher. // @@ -1205,6 +1543,48 @@ func (m *WatchCreateRequest) String() string { return proto.CompactTe func (*WatchCreateRequest) ProtoMessage() {} func (*WatchCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{19} } +func (m *WatchCreateRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *WatchCreateRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *WatchCreateRequest) GetStartRevision() int64 { + if m != nil { + return m.StartRevision + } + return 0 +} + +func (m *WatchCreateRequest) GetProgressNotify() bool { + if m != nil { + return m.ProgressNotify + } + return false +} + +func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType { + if m != nil { + return m.Filters + } + return nil +} + +func (m *WatchCreateRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + type WatchCancelRequest struct { // watch_id is the watcher id to cancel so that no more events are transmitted. WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` @@ -1215,6 +1595,13 @@ func (m *WatchCancelRequest) String() string { return proto.CompactTe func (*WatchCancelRequest) ProtoMessage() {} func (*WatchCancelRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{20} } +func (m *WatchCancelRequest) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + type WatchResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // watch_id is the ID of the watcher that corresponds to the response. @@ -1235,8 +1622,10 @@ type WatchResponse struct { // // The client should treat the watcher as canceled and should not try to create any // watcher with the same start_revision again. - CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` - Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events" json:"events,omitempty"` + CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` + // cancel_reason indicates the reason for canceling the watcher. + CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"` + Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events" json:"events,omitempty"` } func (m *WatchResponse) Reset() { *m = WatchResponse{} } @@ -1251,6 +1640,41 @@ func (m *WatchResponse) GetHeader() *ResponseHeader { return nil } +func (m *WatchResponse) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + +func (m *WatchResponse) GetCreated() bool { + if m != nil { + return m.Created + } + return false +} + +func (m *WatchResponse) GetCanceled() bool { + if m != nil { + return m.Canceled + } + return false +} + +func (m *WatchResponse) GetCompactRevision() int64 { + if m != nil { + return m.CompactRevision + } + return 0 +} + +func (m *WatchResponse) GetCancelReason() string { + if m != nil { + return m.CancelReason + } + return "" +} + func (m *WatchResponse) GetEvents() []*mvccpb.Event { if m != nil { return m.Events @@ -1270,6 +1694,20 @@ func (m *LeaseGrantRequest) String() string { return proto.CompactTex func (*LeaseGrantRequest) ProtoMessage() {} func (*LeaseGrantRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{22} } +func (m *LeaseGrantRequest) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseGrantRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + type LeaseGrantResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // ID is the lease ID for the granted lease. @@ -1291,6 +1729,27 @@ func (m *LeaseGrantResponse) GetHeader() *ResponseHeader { return nil } +func (m *LeaseGrantResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseGrantResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseGrantResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + type LeaseRevokeRequest struct { // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1301,6 +1760,13 @@ func (m *LeaseRevokeRequest) String() string { return proto.CompactTe func (*LeaseRevokeRequest) ProtoMessage() {} func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{24} } +func (m *LeaseRevokeRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + type LeaseRevokeResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -1327,6 +1793,13 @@ func (m *LeaseKeepAliveRequest) String() string { return proto.Compac func (*LeaseKeepAliveRequest) ProtoMessage() {} func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{26} } +func (m *LeaseKeepAliveRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + type LeaseKeepAliveResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // ID is the lease ID from the keep alive request. @@ -1347,6 +1820,20 @@ func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader { return nil } +func (m *LeaseKeepAliveResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseKeepAliveResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + type LeaseTimeToLiveRequest struct { // ID is the lease ID for the lease. ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1359,6 +1846,20 @@ func (m *LeaseTimeToLiveRequest) String() string { return proto.Compa func (*LeaseTimeToLiveRequest) ProtoMessage() {} func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{28} } +func (m *LeaseTimeToLiveRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseTimeToLiveRequest) GetKeys() bool { + if m != nil { + return m.Keys + } + return false +} + type LeaseTimeToLiveResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // ID is the lease ID from the keep alive request. @@ -1371,14 +1872,42 @@ type LeaseTimeToLiveResponse struct { Keys [][]byte `protobuf:"bytes,5,rep,name=keys" json:"keys,omitempty"` } -func (m *LeaseTimeToLiveResponse) Reset() { *m = LeaseTimeToLiveResponse{} } -func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) } -func (*LeaseTimeToLiveResponse) ProtoMessage() {} -func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{29} } +func (m *LeaseTimeToLiveResponse) Reset() { *m = LeaseTimeToLiveResponse{} } +func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) } +func (*LeaseTimeToLiveResponse) ProtoMessage() {} +func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{29} } + +func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseTimeToLiveResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 { + if m != nil { + return m.GrantedTTL + } + return 0 +} -func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader { +func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte { if m != nil { - return m.Header + return m.Keys } return nil } @@ -1399,6 +1928,34 @@ func (m *Member) String() string { return proto.CompactTextString(m) func (*Member) ProtoMessage() {} func (*Member) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{30} } +func (m *Member) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *Member) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Member) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + +func (m *Member) GetClientURLs() []string { + if m != nil { + return m.ClientURLs + } + return nil +} + type MemberAddRequest struct { // peerURLs is the list of URLs the added member will use to communicate with the cluster. PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs" json:"peerURLs,omitempty"` @@ -1409,6 +1966,13 @@ func (m *MemberAddRequest) String() string { return proto.CompactText func (*MemberAddRequest) ProtoMessage() {} func (*MemberAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{31} } +func (m *MemberAddRequest) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + type MemberAddResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // member is the member information for the added member. @@ -1453,6 +2017,13 @@ func (m *MemberRemoveRequest) String() string { return proto.CompactT func (*MemberRemoveRequest) ProtoMessage() {} func (*MemberRemoveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{33} } +func (m *MemberRemoveRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + type MemberRemoveResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // members is a list of all members after removing the member. @@ -1490,6 +2061,20 @@ func (m *MemberUpdateRequest) String() string { return proto.CompactT func (*MemberUpdateRequest) ProtoMessage() {} func (*MemberUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{35} } +func (m *MemberUpdateRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *MemberUpdateRequest) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + type MemberUpdateResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // members is a list of all members after updating the member. @@ -1572,6 +2157,39 @@ func (m *DefragmentResponse) GetHeader() *ResponseHeader { return nil } +type MoveLeaderRequest struct { + // targetID is the node ID for the new leader. + TargetID uint64 `protobuf:"varint,1,opt,name=targetID,proto3" json:"targetID,omitempty"` +} + +func (m *MoveLeaderRequest) Reset() { *m = MoveLeaderRequest{} } +func (m *MoveLeaderRequest) String() string { return proto.CompactTextString(m) } +func (*MoveLeaderRequest) ProtoMessage() {} +func (*MoveLeaderRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{41} } + +func (m *MoveLeaderRequest) GetTargetID() uint64 { + if m != nil { + return m.TargetID + } + return 0 +} + +type MoveLeaderResponse struct { + Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *MoveLeaderResponse) Reset() { *m = MoveLeaderResponse{} } +func (m *MoveLeaderResponse) String() string { return proto.CompactTextString(m) } +func (*MoveLeaderResponse) ProtoMessage() {} +func (*MoveLeaderResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{42} } + +func (m *MoveLeaderResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + type AlarmRequest struct { // action is the kind of alarm request to issue. The action // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a @@ -1587,7 +2205,28 @@ type AlarmRequest struct { func (m *AlarmRequest) Reset() { *m = AlarmRequest{} } func (m *AlarmRequest) String() string { return proto.CompactTextString(m) } func (*AlarmRequest) ProtoMessage() {} -func (*AlarmRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{41} } +func (*AlarmRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{43} } + +func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction { + if m != nil { + return m.Action + } + return AlarmRequest_GET +} + +func (m *AlarmRequest) GetMemberID() uint64 { + if m != nil { + return m.MemberID + } + return 0 +} + +func (m *AlarmRequest) GetAlarm() AlarmType { + if m != nil { + return m.Alarm + } + return AlarmType_NONE +} type AlarmMember struct { // memberID is the ID of the member associated with the raised alarm. @@ -1599,7 +2238,21 @@ type AlarmMember struct { func (m *AlarmMember) Reset() { *m = AlarmMember{} } func (m *AlarmMember) String() string { return proto.CompactTextString(m) } func (*AlarmMember) ProtoMessage() {} -func (*AlarmMember) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{42} } +func (*AlarmMember) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{44} } + +func (m *AlarmMember) GetMemberID() uint64 { + if m != nil { + return m.MemberID + } + return 0 +} + +func (m *AlarmMember) GetAlarm() AlarmType { + if m != nil { + return m.Alarm + } + return AlarmType_NONE +} type AlarmResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` @@ -1610,7 +2263,7 @@ type AlarmResponse struct { func (m *AlarmResponse) Reset() { *m = AlarmResponse{} } func (m *AlarmResponse) String() string { return proto.CompactTextString(m) } func (*AlarmResponse) ProtoMessage() {} -func (*AlarmResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{43} } +func (*AlarmResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{45} } func (m *AlarmResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1632,7 +2285,7 @@ type StatusRequest struct { func (m *StatusRequest) Reset() { *m = StatusRequest{} } func (m *StatusRequest) String() string { return proto.CompactTextString(m) } func (*StatusRequest) ProtoMessage() {} -func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{44} } +func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{46} } type StatusResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` @@ -1651,7 +2304,7 @@ type StatusResponse struct { func (m *StatusResponse) Reset() { *m = StatusResponse{} } func (m *StatusResponse) String() string { return proto.CompactTextString(m) } func (*StatusResponse) ProtoMessage() {} -func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{45} } +func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{47} } func (m *StatusResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1660,13 +2313,48 @@ func (m *StatusResponse) GetHeader() *ResponseHeader { return nil } +func (m *StatusResponse) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *StatusResponse) GetDbSize() int64 { + if m != nil { + return m.DbSize + } + return 0 +} + +func (m *StatusResponse) GetLeader() uint64 { + if m != nil { + return m.Leader + } + return 0 +} + +func (m *StatusResponse) GetRaftIndex() uint64 { + if m != nil { + return m.RaftIndex + } + return 0 +} + +func (m *StatusResponse) GetRaftTerm() uint64 { + if m != nil { + return m.RaftTerm + } + return 0 +} + type AuthEnableRequest struct { } func (m *AuthEnableRequest) Reset() { *m = AuthEnableRequest{} } func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) } func (*AuthEnableRequest) ProtoMessage() {} -func (*AuthEnableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{46} } +func (*AuthEnableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{48} } type AuthDisableRequest struct { } @@ -1674,7 +2362,7 @@ type AuthDisableRequest struct { func (m *AuthDisableRequest) Reset() { *m = AuthDisableRequest{} } func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) } func (*AuthDisableRequest) ProtoMessage() {} -func (*AuthDisableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{47} } +func (*AuthDisableRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{49} } type AuthenticateRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1684,7 +2372,21 @@ type AuthenticateRequest struct { func (m *AuthenticateRequest) Reset() { *m = AuthenticateRequest{} } func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) } func (*AuthenticateRequest) ProtoMessage() {} -func (*AuthenticateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{48} } +func (*AuthenticateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{50} } + +func (m *AuthenticateRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthenticateRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} type AuthUserAddRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1694,7 +2396,21 @@ type AuthUserAddRequest struct { func (m *AuthUserAddRequest) Reset() { *m = AuthUserAddRequest{} } func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) } func (*AuthUserAddRequest) ProtoMessage() {} -func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{49} } +func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{51} } + +func (m *AuthUserAddRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserAddRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} type AuthUserGetRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1703,7 +2419,14 @@ type AuthUserGetRequest struct { func (m *AuthUserGetRequest) Reset() { *m = AuthUserGetRequest{} } func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) } func (*AuthUserGetRequest) ProtoMessage() {} -func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{50} } +func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{52} } + +func (m *AuthUserGetRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} type AuthUserDeleteRequest struct { // name is the name of the user to delete. @@ -1713,7 +2436,14 @@ type AuthUserDeleteRequest struct { func (m *AuthUserDeleteRequest) Reset() { *m = AuthUserDeleteRequest{} } func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) } func (*AuthUserDeleteRequest) ProtoMessage() {} -func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{51} } +func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{53} } + +func (m *AuthUserDeleteRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} type AuthUserChangePasswordRequest struct { // name is the name of the user whose password is being changed. @@ -1726,7 +2456,21 @@ func (m *AuthUserChangePasswordRequest) Reset() { *m = AuthUserChangePas func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) } func (*AuthUserChangePasswordRequest) ProtoMessage() {} func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{52} + return fileDescriptorRpc, []int{54} +} + +func (m *AuthUserChangePasswordRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserChangePasswordRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" } type AuthUserGrantRoleRequest struct { @@ -1739,7 +2483,21 @@ type AuthUserGrantRoleRequest struct { func (m *AuthUserGrantRoleRequest) Reset() { *m = AuthUserGrantRoleRequest{} } func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) } func (*AuthUserGrantRoleRequest) ProtoMessage() {} -func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{53} } +func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{55} } + +func (m *AuthUserGrantRoleRequest) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *AuthUserGrantRoleRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} type AuthUserRevokeRoleRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1749,7 +2507,21 @@ type AuthUserRevokeRoleRequest struct { func (m *AuthUserRevokeRoleRequest) Reset() { *m = AuthUserRevokeRoleRequest{} } func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) } func (*AuthUserRevokeRoleRequest) ProtoMessage() {} -func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{54} } +func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{56} } + +func (m *AuthUserRevokeRoleRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserRevokeRoleRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} type AuthRoleAddRequest struct { // name is the name of the role to add to the authentication system. @@ -1759,7 +2531,14 @@ type AuthRoleAddRequest struct { func (m *AuthRoleAddRequest) Reset() { *m = AuthRoleAddRequest{} } func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) } func (*AuthRoleAddRequest) ProtoMessage() {} -func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{55} } +func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{57} } + +func (m *AuthRoleAddRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} type AuthRoleGetRequest struct { Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` @@ -1768,7 +2547,14 @@ type AuthRoleGetRequest struct { func (m *AuthRoleGetRequest) Reset() { *m = AuthRoleGetRequest{} } func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) } func (*AuthRoleGetRequest) ProtoMessage() {} -func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{56} } +func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{58} } + +func (m *AuthRoleGetRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} type AuthUserListRequest struct { } @@ -1776,7 +2562,7 @@ type AuthUserListRequest struct { func (m *AuthUserListRequest) Reset() { *m = AuthUserListRequest{} } func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) } func (*AuthUserListRequest) ProtoMessage() {} -func (*AuthUserListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{57} } +func (*AuthUserListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{59} } type AuthRoleListRequest struct { } @@ -1784,7 +2570,7 @@ type AuthRoleListRequest struct { func (m *AuthRoleListRequest) Reset() { *m = AuthRoleListRequest{} } func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) } func (*AuthRoleListRequest) ProtoMessage() {} -func (*AuthRoleListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{58} } +func (*AuthRoleListRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{60} } type AuthRoleDeleteRequest struct { Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` @@ -1793,7 +2579,14 @@ type AuthRoleDeleteRequest struct { func (m *AuthRoleDeleteRequest) Reset() { *m = AuthRoleDeleteRequest{} } func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) } func (*AuthRoleDeleteRequest) ProtoMessage() {} -func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{59} } +func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{61} } + +func (m *AuthRoleDeleteRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} type AuthRoleGrantPermissionRequest struct { // name is the name of the role which will be granted the permission. @@ -1806,7 +2599,14 @@ func (m *AuthRoleGrantPermissionRequest) Reset() { *m = AuthRoleGrantPer func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) } func (*AuthRoleGrantPermissionRequest) ProtoMessage() {} func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{60} + return fileDescriptorRpc, []int{62} +} + +func (m *AuthRoleGrantPermissionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" } func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission { @@ -1826,7 +2626,28 @@ func (m *AuthRoleRevokePermissionRequest) Reset() { *m = AuthRoleRevokeP func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) } func (*AuthRoleRevokePermissionRequest) ProtoMessage() {} func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{61} + return fileDescriptorRpc, []int{63} +} + +func (m *AuthRoleRevokePermissionRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +func (m *AuthRoleRevokePermissionRequest) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() string { + if m != nil { + return m.RangeEnd + } + return "" } type AuthEnableResponse struct { @@ -1836,7 +2657,7 @@ type AuthEnableResponse struct { func (m *AuthEnableResponse) Reset() { *m = AuthEnableResponse{} } func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) } func (*AuthEnableResponse) ProtoMessage() {} -func (*AuthEnableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{62} } +func (*AuthEnableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{64} } func (m *AuthEnableResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1852,7 +2673,7 @@ type AuthDisableResponse struct { func (m *AuthDisableResponse) Reset() { *m = AuthDisableResponse{} } func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) } func (*AuthDisableResponse) ProtoMessage() {} -func (*AuthDisableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{63} } +func (*AuthDisableResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{65} } func (m *AuthDisableResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1870,7 +2691,7 @@ type AuthenticateResponse struct { func (m *AuthenticateResponse) Reset() { *m = AuthenticateResponse{} } func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) } func (*AuthenticateResponse) ProtoMessage() {} -func (*AuthenticateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{64} } +func (*AuthenticateResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{66} } func (m *AuthenticateResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1879,6 +2700,13 @@ func (m *AuthenticateResponse) GetHeader() *ResponseHeader { return nil } +func (m *AuthenticateResponse) GetToken() string { + if m != nil { + return m.Token + } + return "" +} + type AuthUserAddResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -1886,7 +2714,7 @@ type AuthUserAddResponse struct { func (m *AuthUserAddResponse) Reset() { *m = AuthUserAddResponse{} } func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) } func (*AuthUserAddResponse) ProtoMessage() {} -func (*AuthUserAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{65} } +func (*AuthUserAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{67} } func (m *AuthUserAddResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1903,7 +2731,7 @@ type AuthUserGetResponse struct { func (m *AuthUserGetResponse) Reset() { *m = AuthUserGetResponse{} } func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) } func (*AuthUserGetResponse) ProtoMessage() {} -func (*AuthUserGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{66} } +func (*AuthUserGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{68} } func (m *AuthUserGetResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1912,6 +2740,13 @@ func (m *AuthUserGetResponse) GetHeader() *ResponseHeader { return nil } +func (m *AuthUserGetResponse) GetRoles() []string { + if m != nil { + return m.Roles + } + return nil +} + type AuthUserDeleteResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -1919,7 +2754,7 @@ type AuthUserDeleteResponse struct { func (m *AuthUserDeleteResponse) Reset() { *m = AuthUserDeleteResponse{} } func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) } func (*AuthUserDeleteResponse) ProtoMessage() {} -func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{67} } +func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{69} } func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1936,7 +2771,7 @@ func (m *AuthUserChangePasswordResponse) Reset() { *m = AuthUserChangePa func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) } func (*AuthUserChangePasswordResponse) ProtoMessage() {} func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{68} + return fileDescriptorRpc, []int{70} } func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader { @@ -1953,7 +2788,7 @@ type AuthUserGrantRoleResponse struct { func (m *AuthUserGrantRoleResponse) Reset() { *m = AuthUserGrantRoleResponse{} } func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) } func (*AuthUserGrantRoleResponse) ProtoMessage() {} -func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{69} } +func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{71} } func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1969,7 +2804,7 @@ type AuthUserRevokeRoleResponse struct { func (m *AuthUserRevokeRoleResponse) Reset() { *m = AuthUserRevokeRoleResponse{} } func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) } func (*AuthUserRevokeRoleResponse) ProtoMessage() {} -func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{70} } +func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{72} } func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader { if m != nil { @@ -1985,7 +2820,7 @@ type AuthRoleAddResponse struct { func (m *AuthRoleAddResponse) Reset() { *m = AuthRoleAddResponse{} } func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) } func (*AuthRoleAddResponse) ProtoMessage() {} -func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{71} } +func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{73} } func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2002,7 +2837,7 @@ type AuthRoleGetResponse struct { func (m *AuthRoleGetResponse) Reset() { *m = AuthRoleGetResponse{} } func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) } func (*AuthRoleGetResponse) ProtoMessage() {} -func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{72} } +func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{74} } func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2026,7 +2861,7 @@ type AuthRoleListResponse struct { func (m *AuthRoleListResponse) Reset() { *m = AuthRoleListResponse{} } func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) } func (*AuthRoleListResponse) ProtoMessage() {} -func (*AuthRoleListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{73} } +func (*AuthRoleListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{75} } func (m *AuthRoleListResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2035,6 +2870,13 @@ func (m *AuthRoleListResponse) GetHeader() *ResponseHeader { return nil } +func (m *AuthRoleListResponse) GetRoles() []string { + if m != nil { + return m.Roles + } + return nil +} + type AuthUserListResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` Users []string `protobuf:"bytes,2,rep,name=users" json:"users,omitempty"` @@ -2043,7 +2885,7 @@ type AuthUserListResponse struct { func (m *AuthUserListResponse) Reset() { *m = AuthUserListResponse{} } func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) } func (*AuthUserListResponse) ProtoMessage() {} -func (*AuthUserListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{74} } +func (*AuthUserListResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{76} } func (m *AuthUserListResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2052,6 +2894,13 @@ func (m *AuthUserListResponse) GetHeader() *ResponseHeader { return nil } +func (m *AuthUserListResponse) GetUsers() []string { + if m != nil { + return m.Users + } + return nil +} + type AuthRoleDeleteResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -2059,7 +2908,7 @@ type AuthRoleDeleteResponse struct { func (m *AuthRoleDeleteResponse) Reset() { *m = AuthRoleDeleteResponse{} } func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) } func (*AuthRoleDeleteResponse) ProtoMessage() {} -func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{75} } +func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{77} } func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader { if m != nil { @@ -2076,7 +2925,7 @@ func (m *AuthRoleGrantPermissionResponse) Reset() { *m = AuthRoleGrantPe func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) } func (*AuthRoleGrantPermissionResponse) ProtoMessage() {} func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{76} + return fileDescriptorRpc, []int{78} } func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader { @@ -2094,7 +2943,7 @@ func (m *AuthRoleRevokePermissionResponse) Reset() { *m = AuthRoleRevoke func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) } func (*AuthRoleRevokePermissionResponse) ProtoMessage() {} func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) { - return fileDescriptorRpc, []int{77} + return fileDescriptorRpc, []int{79} } func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader { @@ -2146,6 +2995,8 @@ func init() { proto.RegisterType((*MemberListResponse)(nil), "etcdserverpb.MemberListResponse") proto.RegisterType((*DefragmentRequest)(nil), "etcdserverpb.DefragmentRequest") proto.RegisterType((*DefragmentResponse)(nil), "etcdserverpb.DefragmentResponse") + proto.RegisterType((*MoveLeaderRequest)(nil), "etcdserverpb.MoveLeaderRequest") + proto.RegisterType((*MoveLeaderResponse)(nil), "etcdserverpb.MoveLeaderResponse") proto.RegisterType((*AlarmRequest)(nil), "etcdserverpb.AlarmRequest") proto.RegisterType((*AlarmMember)(nil), "etcdserverpb.AlarmMember") proto.RegisterType((*AlarmResponse)(nil), "etcdserverpb.AlarmResponse") @@ -2926,6 +3777,8 @@ type MaintenanceClient interface { Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) + // MoveLeader requests current leader node to transfer its leadership to transferee. + MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) } type maintenanceClient struct { @@ -3004,6 +3857,15 @@ func (x *maintenanceSnapshotClient) Recv() (*SnapshotResponse, error) { return m, nil } +func (c *maintenanceClient) MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) { + out := new(MoveLeaderResponse) + err := grpc.Invoke(ctx, "/etcdserverpb.Maintenance/MoveLeader", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // Server API for Maintenance service type MaintenanceServer interface { @@ -3019,6 +3881,8 @@ type MaintenanceServer interface { Hash(context.Context, *HashRequest) (*HashResponse, error) // Snapshot sends a snapshot of the entire backend from a member over a stream to a client. Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error + // MoveLeader requests current leader node to transfer its leadership to transferee. + MoveLeader(context.Context, *MoveLeaderRequest) (*MoveLeaderResponse, error) } func RegisterMaintenanceServer(s *grpc.Server, srv MaintenanceServer) { @@ -3118,6 +3982,24 @@ func (x *maintenanceSnapshotServer) Send(m *SnapshotResponse) error { return x.ServerStream.SendMsg(m) } +func _Maintenance_MoveLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MoveLeaderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MaintenanceServer).MoveLeader(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/etcdserverpb.Maintenance/MoveLeader", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MaintenanceServer).MoveLeader(ctx, req.(*MoveLeaderRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Maintenance_serviceDesc = grpc.ServiceDesc{ ServiceName: "etcdserverpb.Maintenance", HandlerType: (*MaintenanceServer)(nil), @@ -3138,6 +4020,10 @@ var _Maintenance_serviceDesc = grpc.ServiceDesc{ MethodName: "Hash", Handler: _Maintenance_Hash_Handler, }, + { + MethodName: "MoveLeader", + Handler: _Maintenance_MoveLeader_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -4188,6 +5074,20 @@ func (m *RequestOp_RequestDeleteRange) MarshalTo(dAtA []byte) (int, error) { } return i, nil } +func (m *RequestOp_RequestTxn) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.RequestTxn != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.RequestTxn.Size())) + n9, err := m.RequestTxn.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} func (m *ResponseOp) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -4204,11 +5104,11 @@ func (m *ResponseOp) MarshalTo(dAtA []byte) (int, error) { var l int _ = l if m.Response != nil { - nn9, err := m.Response.MarshalTo(dAtA[i:]) + nn10, err := m.Response.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += nn9 + i += nn10 } return i, nil } @@ -4219,11 +5119,11 @@ func (m *ResponseOp_ResponseRange) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.ResponseRange.Size())) - n10, err := m.ResponseRange.MarshalTo(dAtA[i:]) + n11, err := m.ResponseRange.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n11 } return i, nil } @@ -4233,11 +5133,11 @@ func (m *ResponseOp_ResponsePut) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintRpc(dAtA, i, uint64(m.ResponsePut.Size())) - n11, err := m.ResponsePut.MarshalTo(dAtA[i:]) + n12, err := m.ResponsePut.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n12 } return i, nil } @@ -4247,11 +5147,25 @@ func (m *ResponseOp_ResponseDeleteRange) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintRpc(dAtA, i, uint64(m.ResponseDeleteRange.Size())) - n12, err := m.ResponseDeleteRange.MarshalTo(dAtA[i:]) + n13, err := m.ResponseDeleteRange.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n12 + i += n13 + } + return i, nil +} +func (m *ResponseOp_ResponseTxn) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ResponseTxn != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.ResponseTxn.Size())) + n14, err := m.ResponseTxn.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 } return i, nil } @@ -4287,11 +5201,17 @@ func (m *Compare) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], m.Key) } if m.TargetUnion != nil { - nn13, err := m.TargetUnion.MarshalTo(dAtA[i:]) + nn15, err := m.TargetUnion.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += nn13 + i += nn15 + } + if len(m.RangeEnd) > 0 { + dAtA[i] = 0x42 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd))) + i += copy(dAtA[i:], m.RangeEnd) } return i, nil } @@ -4400,11 +5320,11 @@ func (m *TxnResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n14, err := m.Header.MarshalTo(dAtA[i:]) + n16, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n14 + i += n16 } if m.Succeeded { dAtA[i] = 0x10 @@ -4483,11 +5403,11 @@ func (m *CompactionResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n15, err := m.Header.MarshalTo(dAtA[i:]) + n17, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n15 + i += n17 } return i, nil } @@ -4529,11 +5449,11 @@ func (m *HashResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n16, err := m.Header.MarshalTo(dAtA[i:]) + n18, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n16 + i += n18 } if m.Hash != 0 { dAtA[i] = 0x10 @@ -4580,11 +5500,11 @@ func (m *SnapshotResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n17, err := m.Header.MarshalTo(dAtA[i:]) + n19, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n17 + i += n19 } if m.RemainingBytes != 0 { dAtA[i] = 0x10 @@ -4616,11 +5536,11 @@ func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) { var l int _ = l if m.RequestUnion != nil { - nn18, err := m.RequestUnion.MarshalTo(dAtA[i:]) + nn20, err := m.RequestUnion.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += nn18 + i += nn20 } return i, nil } @@ -4631,11 +5551,11 @@ func (m *WatchRequest_CreateRequest) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.CreateRequest.Size())) - n19, err := m.CreateRequest.MarshalTo(dAtA[i:]) + n21, err := m.CreateRequest.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n19 + i += n21 } return i, nil } @@ -4645,11 +5565,11 @@ func (m *WatchRequest_CancelRequest) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintRpc(dAtA, i, uint64(m.CancelRequest.Size())) - n20, err := m.CancelRequest.MarshalTo(dAtA[i:]) + n22, err := m.CancelRequest.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n20 + i += n22 } return i, nil } @@ -4696,21 +5616,21 @@ func (m *WatchCreateRequest) MarshalTo(dAtA []byte) (int, error) { i++ } if len(m.Filters) > 0 { - dAtA22 := make([]byte, len(m.Filters)*10) - var j21 int + dAtA24 := make([]byte, len(m.Filters)*10) + var j23 int for _, num := range m.Filters { for num >= 1<<7 { - dAtA22[j21] = uint8(uint64(num)&0x7f | 0x80) + dAtA24[j23] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j21++ + j23++ } - dAtA22[j21] = uint8(num) - j21++ + dAtA24[j23] = uint8(num) + j23++ } dAtA[i] = 0x2a i++ - i = encodeVarintRpc(dAtA, i, uint64(j21)) - i += copy(dAtA[i:], dAtA22[:j21]) + i = encodeVarintRpc(dAtA, i, uint64(j23)) + i += copy(dAtA[i:], dAtA24[:j23]) } if m.PrevKv { dAtA[i] = 0x30 @@ -4767,11 +5687,11 @@ func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n23, err := m.Header.MarshalTo(dAtA[i:]) + n25, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n23 + i += n25 } if m.WatchId != 0 { dAtA[i] = 0x10 @@ -4803,6 +5723,12 @@ func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) } + if len(m.CancelReason) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason))) + i += copy(dAtA[i:], m.CancelReason) + } if len(m.Events) > 0 { for _, msg := range m.Events { dAtA[i] = 0x5a @@ -4865,11 +5791,11 @@ func (m *LeaseGrantResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n24, err := m.Header.MarshalTo(dAtA[i:]) + n26, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n24 + i += n26 } if m.ID != 0 { dAtA[i] = 0x10 @@ -4932,11 +5858,11 @@ func (m *LeaseRevokeResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n25, err := m.Header.MarshalTo(dAtA[i:]) + n27, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n25 + i += n27 } return i, nil } @@ -4983,11 +5909,11 @@ func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n26, err := m.Header.MarshalTo(dAtA[i:]) + n28, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n26 + i += n28 } if m.ID != 0 { dAtA[i] = 0x10 @@ -5054,11 +5980,11 @@ func (m *LeaseTimeToLiveResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n27, err := m.Header.MarshalTo(dAtA[i:]) + n29, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n27 + i += n29 } if m.ID != 0 { dAtA[i] = 0x10 @@ -5197,21 +6123,21 @@ func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n28, err := m.Header.MarshalTo(dAtA[i:]) + n30, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n28 + i += n30 } if m.Member != nil { dAtA[i] = 0x12 i++ i = encodeVarintRpc(dAtA, i, uint64(m.Member.Size())) - n29, err := m.Member.MarshalTo(dAtA[i:]) + n31, err := m.Member.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n29 + i += n31 } if len(m.Members) > 0 { for _, msg := range m.Members { @@ -5270,11 +6196,11 @@ func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n30, err := m.Header.MarshalTo(dAtA[i:]) + n32, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n30 + i += n32 } if len(m.Members) > 0 { for _, msg := range m.Members { @@ -5348,11 +6274,11 @@ func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n31, err := m.Header.MarshalTo(dAtA[i:]) + n33, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n31 + i += n33 } if len(m.Members) > 0 { for _, msg := range m.Members { @@ -5406,11 +6332,11 @@ func (m *MemberListResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n32, err := m.Header.MarshalTo(dAtA[i:]) + n34, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n32 + i += n34 } if len(m.Members) > 0 { for _, msg := range m.Members { @@ -5464,11 +6390,62 @@ func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n33, err := m.Header.MarshalTo(dAtA[i:]) + n35, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n33 + i += n35 + } + return i, nil +} + +func (m *MoveLeaderRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MoveLeaderRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.TargetID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.TargetID)) + } + return i, nil +} + +func (m *MoveLeaderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MoveLeaderResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) + n36, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 } return i, nil } @@ -5553,11 +6530,11 @@ func (m *AlarmResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n34, err := m.Header.MarshalTo(dAtA[i:]) + n37, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n34 + i += n37 } if len(m.Alarms) > 0 { for _, msg := range m.Alarms { @@ -5611,11 +6588,11 @@ func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n35, err := m.Header.MarshalTo(dAtA[i:]) + n38, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n35 + i += n38 } if len(m.Version) > 0 { dAtA[i] = 0x12 @@ -6013,11 +6990,11 @@ func (m *AuthRoleGrantPermissionRequest) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintRpc(dAtA, i, uint64(m.Perm.Size())) - n36, err := m.Perm.MarshalTo(dAtA[i:]) + n39, err := m.Perm.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n36 + i += n39 } return i, nil } @@ -6077,11 +7054,11 @@ func (m *AuthEnableResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n37, err := m.Header.MarshalTo(dAtA[i:]) + n40, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n37 + i += n40 } return i, nil } @@ -6105,11 +7082,11 @@ func (m *AuthDisableResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n38, err := m.Header.MarshalTo(dAtA[i:]) + n41, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n38 + i += n41 } return i, nil } @@ -6133,11 +7110,11 @@ func (m *AuthenticateResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n39, err := m.Header.MarshalTo(dAtA[i:]) + n42, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n42 } if len(m.Token) > 0 { dAtA[i] = 0x12 @@ -6167,11 +7144,11 @@ func (m *AuthUserAddResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n40, err := m.Header.MarshalTo(dAtA[i:]) + n43, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n43 } return i, nil } @@ -6195,11 +7172,11 @@ func (m *AuthUserGetResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n41, err := m.Header.MarshalTo(dAtA[i:]) + n44, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n41 + i += n44 } if len(m.Roles) > 0 { for _, s := range m.Roles { @@ -6238,11 +7215,11 @@ func (m *AuthUserDeleteResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n42, err := m.Header.MarshalTo(dAtA[i:]) + n45, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n42 + i += n45 } return i, nil } @@ -6266,11 +7243,11 @@ func (m *AuthUserChangePasswordResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n43, err := m.Header.MarshalTo(dAtA[i:]) + n46, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n43 + i += n46 } return i, nil } @@ -6294,11 +7271,11 @@ func (m *AuthUserGrantRoleResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n44, err := m.Header.MarshalTo(dAtA[i:]) + n47, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n44 + i += n47 } return i, nil } @@ -6322,11 +7299,11 @@ func (m *AuthUserRevokeRoleResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n45, err := m.Header.MarshalTo(dAtA[i:]) + n48, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n45 + i += n48 } return i, nil } @@ -6350,11 +7327,11 @@ func (m *AuthRoleAddResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n46, err := m.Header.MarshalTo(dAtA[i:]) + n49, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n46 + i += n49 } return i, nil } @@ -6378,11 +7355,11 @@ func (m *AuthRoleGetResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n47, err := m.Header.MarshalTo(dAtA[i:]) + n50, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n47 + i += n50 } if len(m.Perm) > 0 { for _, msg := range m.Perm { @@ -6418,11 +7395,11 @@ func (m *AuthRoleListResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n48, err := m.Header.MarshalTo(dAtA[i:]) + n51, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n48 + i += n51 } if len(m.Roles) > 0 { for _, s := range m.Roles { @@ -6461,11 +7438,11 @@ func (m *AuthUserListResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n49, err := m.Header.MarshalTo(dAtA[i:]) + n52, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n49 + i += n52 } if len(m.Users) > 0 { for _, s := range m.Users { @@ -6504,11 +7481,11 @@ func (m *AuthRoleDeleteResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n50, err := m.Header.MarshalTo(dAtA[i:]) + n53, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n50 + i += n53 } return i, nil } @@ -6532,11 +7509,11 @@ func (m *AuthRoleGrantPermissionResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n51, err := m.Header.MarshalTo(dAtA[i:]) + n54, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n51 + i += n54 } return i, nil } @@ -6560,11 +7537,11 @@ func (m *AuthRoleRevokePermissionResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintRpc(dAtA, i, uint64(m.Header.Size())) - n52, err := m.Header.MarshalTo(dAtA[i:]) + n55, err := m.Header.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n52 + i += n55 } return i, nil } @@ -6795,6 +7772,15 @@ func (m *RequestOp_RequestDeleteRange) Size() (n int) { } return n } +func (m *RequestOp_RequestTxn) Size() (n int) { + var l int + _ = l + if m.RequestTxn != nil { + l = m.RequestTxn.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} func (m *ResponseOp) Size() (n int) { var l int _ = l @@ -6831,6 +7817,15 @@ func (m *ResponseOp_ResponseDeleteRange) Size() (n int) { } return n } +func (m *ResponseOp_ResponseTxn) Size() (n int) { + var l int + _ = l + if m.ResponseTxn != nil { + l = m.ResponseTxn.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} func (m *Compare) Size() (n int) { var l int _ = l @@ -6847,6 +7842,10 @@ func (m *Compare) Size() (n int) { if m.TargetUnion != nil { n += m.TargetUnion.Size() } + l = len(m.RangeEnd) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } return n } @@ -7069,6 +8068,10 @@ func (m *WatchResponse) Size() (n int) { if m.CompactRevision != 0 { n += 1 + sovRpc(uint64(m.CompactRevision)) } + l = len(m.CancelReason) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } if len(m.Events) > 0 { for _, e := range m.Events { l = e.Size() @@ -7342,6 +8345,25 @@ func (m *DefragmentResponse) Size() (n int) { return n } +func (m *MoveLeaderRequest) Size() (n int) { + var l int + _ = l + if m.TargetID != 0 { + n += 1 + sovRpc(uint64(m.TargetID)) + } + return n +} + +func (m *MoveLeaderResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} + func (m *AlarmRequest) Size() (n int) { var l int _ = l @@ -9094,6 +10116,38 @@ func (m *RequestOp) Unmarshal(dAtA []byte) error { } m.Request = &RequestOp_RequestDeleteRange{v} iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestTxn", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &TxnRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Request = &RequestOp_RequestTxn{v} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -9146,7 +10200,39 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResponseRange", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResponseRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RangeResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Response = &ResponseOp_ResponseRange{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponsePut", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9170,15 +10256,15 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RangeResponse{} + v := &PutResponse{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Response = &ResponseOp_ResponseRange{v} + m.Response = &ResponseOp_ResponsePut{v} iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResponsePut", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResponseDeleteRange", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9202,15 +10288,15 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &PutResponse{} + v := &DeleteRangeResponse{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Response = &ResponseOp_ResponsePut{v} + m.Response = &ResponseOp_ResponseDeleteRange{v} iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResponseDeleteRange", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResponseTxn", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9234,11 +10320,11 @@ func (m *ResponseOp) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &DeleteRangeResponse{} + v := &TxnResponse{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Response = &ResponseOp_ResponseDeleteRange{v} + m.Response = &ResponseOp_ResponseTxn{v} iNdEx = postIndex default: iNdEx = preIndex @@ -9449,6 +10535,37 @@ func (m *Compare) Unmarshal(dAtA []byte) error { copy(v, dAtA[iNdEx:postIndex]) m.TargetUnion = &Compare_Value{v} iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...) + if m.RangeEnd == nil { + m.RangeEnd = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -10499,7 +11616,24 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } m.ProgressNotify = bool(v != 0) case 5: - if wireType == 2 { + if wireType == 0 { + var v WatchCreateRequest_FilterType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Filters = append(m.Filters, v) + } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { @@ -10540,23 +11674,6 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } m.Filters = append(m.Filters, v) } - } else if wireType == 0 { - var v WatchCreateRequest_FilterType - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Filters = append(m.Filters, v) } else { return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) } @@ -10810,6 +11927,35 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { break } } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CancelReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 11: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) @@ -12774,6 +13920,158 @@ func (m *DefragmentResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MoveLeaderRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MoveLeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetID", wireType) + } + m.TargetID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TargetID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MoveLeaderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MoveLeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *AlarmRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -16288,220 +17586,227 @@ var ( func init() { proto.RegisterFile("rpc.proto", fileDescriptorRpc) } var fileDescriptorRpc = []byte{ - // 3436 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x3b, 0x5b, 0x6f, 0x1b, 0xc7, - 0xb9, 0x5a, 0x5e, 0xc5, 0x8f, 0x17, 0xd1, 0x23, 0xd9, 0xa6, 0x68, 0x5b, 0x96, 0xc7, 0x37, 0xd9, - 0x4e, 0xa4, 0x44, 0xc9, 0x39, 0x0f, 0x3e, 0x41, 0x70, 0x64, 0x89, 0xb1, 0x74, 0x24, 0x4b, 0xce, - 0x4a, 0x76, 0x72, 0x80, 0xa0, 0xc4, 0x8a, 0x1c, 0x53, 0x0b, 0x91, 0xbb, 0xcc, 0xee, 0x92, 0x96, - 0xd2, 0x14, 0x28, 0xd2, 0x04, 0x45, 0xfb, 0xd8, 0x3c, 0xf4, 0xf6, 0x58, 0x14, 0x45, 0x7e, 0x40, - 0xdf, 0xfa, 0x03, 0x8a, 0xbe, 0xb4, 0x40, 0xff, 0x40, 0x91, 0xf6, 0xa1, 0x0f, 0x7d, 0xef, 0x53, - 0xd1, 0x62, 0x6e, 0xbb, 0xb3, 0xcb, 0x5d, 0x4a, 0x29, 0x9b, 0xbc, 0x58, 0x3b, 0xdf, 0x7c, 0xf3, - 0xdd, 0x66, 0xbe, 0xcb, 0x7c, 0x43, 0x43, 0xc1, 0xe9, 0xb7, 0x96, 0xfb, 0x8e, 0xed, 0xd9, 0xa8, - 0x44, 0xbc, 0x56, 0xdb, 0x25, 0xce, 0x90, 0x38, 0xfd, 0xc3, 0xfa, 0x5c, 0xc7, 0xee, 0xd8, 0x6c, - 0x62, 0x85, 0x7e, 0x71, 0x9c, 0xfa, 0x3c, 0xc5, 0x59, 0xe9, 0x0d, 0x5b, 0x2d, 0xf6, 0x4f, 0xff, - 0x70, 0xe5, 0x78, 0x28, 0xa6, 0xae, 0xb0, 0x29, 0x63, 0xe0, 0x1d, 0xb1, 0x7f, 0xfa, 0x87, 0xec, - 0x8f, 0x98, 0xbc, 0xda, 0xb1, 0xed, 0x4e, 0x97, 0xac, 0x18, 0x7d, 0x73, 0xc5, 0xb0, 0x2c, 0xdb, - 0x33, 0x3c, 0xd3, 0xb6, 0x5c, 0x3e, 0x8b, 0x3f, 0xd3, 0xa0, 0xa2, 0x13, 0xb7, 0x6f, 0x5b, 0x2e, - 0xd9, 0x24, 0x46, 0x9b, 0x38, 0xe8, 0x1a, 0x40, 0xab, 0x3b, 0x70, 0x3d, 0xe2, 0x34, 0xcd, 0x76, - 0x4d, 0x5b, 0xd4, 0x96, 0x32, 0x7a, 0x41, 0x40, 0xb6, 0xda, 0xe8, 0x0a, 0x14, 0x7a, 0xa4, 0x77, - 0xc8, 0x67, 0x53, 0x6c, 0x76, 0x9a, 0x03, 0xb6, 0xda, 0xa8, 0x0e, 0xd3, 0x0e, 0x19, 0x9a, 0xae, - 0x69, 0x5b, 0xb5, 0xf4, 0xa2, 0xb6, 0x94, 0xd6, 0xfd, 0x31, 0x5d, 0xe8, 0x18, 0x2f, 0xbc, 0xa6, - 0x47, 0x9c, 0x5e, 0x2d, 0xc3, 0x17, 0x52, 0xc0, 0x01, 0x71, 0x7a, 0xf8, 0xd3, 0x2c, 0x94, 0x74, - 0xc3, 0xea, 0x10, 0x9d, 0x7c, 0x38, 0x20, 0xae, 0x87, 0xaa, 0x90, 0x3e, 0x26, 0xa7, 0x8c, 0x7d, - 0x49, 0xa7, 0x9f, 0x7c, 0xbd, 0xd5, 0x21, 0x4d, 0x62, 0x71, 0xc6, 0x25, 0xba, 0xde, 0xea, 0x90, - 0x86, 0xd5, 0x46, 0x73, 0x90, 0xed, 0x9a, 0x3d, 0xd3, 0x13, 0x5c, 0xf9, 0x20, 0x24, 0x4e, 0x26, - 0x22, 0xce, 0x3a, 0x80, 0x6b, 0x3b, 0x5e, 0xd3, 0x76, 0xda, 0xc4, 0xa9, 0x65, 0x17, 0xb5, 0xa5, - 0xca, 0xea, 0xad, 0x65, 0x75, 0x23, 0x96, 0x55, 0x81, 0x96, 0xf7, 0x6d, 0xc7, 0xdb, 0xa3, 0xb8, - 0x7a, 0xc1, 0x95, 0x9f, 0xe8, 0x1d, 0x28, 0x32, 0x22, 0x9e, 0xe1, 0x74, 0x88, 0x57, 0xcb, 0x31, - 0x2a, 0xb7, 0xcf, 0xa0, 0x72, 0xc0, 0x90, 0x75, 0xc6, 0x9e, 0x7f, 0x23, 0x0c, 0x25, 0x97, 0x38, - 0xa6, 0xd1, 0x35, 0x3f, 0x32, 0x0e, 0xbb, 0xa4, 0x96, 0x5f, 0xd4, 0x96, 0xa6, 0xf5, 0x10, 0x8c, - 0xea, 0x7f, 0x4c, 0x4e, 0xdd, 0xa6, 0x6d, 0x75, 0x4f, 0x6b, 0xd3, 0x0c, 0x61, 0x9a, 0x02, 0xf6, - 0xac, 0xee, 0x29, 0xdb, 0x34, 0x7b, 0x60, 0x79, 0x7c, 0xb6, 0xc0, 0x66, 0x0b, 0x0c, 0xc2, 0xa6, - 0x97, 0xa0, 0xda, 0x33, 0xad, 0x66, 0xcf, 0x6e, 0x37, 0x7d, 0x83, 0x00, 0x33, 0x48, 0xa5, 0x67, - 0x5a, 0x4f, 0xec, 0xb6, 0x2e, 0xcd, 0x42, 0x31, 0x8d, 0x93, 0x30, 0x66, 0x51, 0x60, 0x1a, 0x27, - 0x2a, 0xe6, 0x32, 0xcc, 0x52, 0x9a, 0x2d, 0x87, 0x18, 0x1e, 0x09, 0x90, 0x4b, 0x0c, 0xf9, 0x42, - 0xcf, 0xb4, 0xd6, 0xd9, 0x4c, 0x08, 0xdf, 0x38, 0x19, 0xc1, 0x2f, 0x0b, 0x7c, 0xe3, 0x24, 0x8c, - 0x8f, 0x97, 0xa1, 0xe0, 0xdb, 0x1c, 0x4d, 0x43, 0x66, 0x77, 0x6f, 0xb7, 0x51, 0x9d, 0x42, 0x00, - 0xb9, 0xb5, 0xfd, 0xf5, 0xc6, 0xee, 0x46, 0x55, 0x43, 0x45, 0xc8, 0x6f, 0x34, 0xf8, 0x20, 0x85, - 0x1f, 0x01, 0x04, 0xd6, 0x45, 0x79, 0x48, 0x6f, 0x37, 0xfe, 0xbf, 0x3a, 0x45, 0x71, 0x9e, 0x37, - 0xf4, 0xfd, 0xad, 0xbd, 0xdd, 0xaa, 0x46, 0x17, 0xaf, 0xeb, 0x8d, 0xb5, 0x83, 0x46, 0x35, 0x45, - 0x31, 0x9e, 0xec, 0x6d, 0x54, 0xd3, 0xa8, 0x00, 0xd9, 0xe7, 0x6b, 0x3b, 0xcf, 0x1a, 0xd5, 0x0c, - 0xfe, 0x5c, 0x83, 0xb2, 0xd8, 0x2f, 0xee, 0x13, 0xe8, 0x4d, 0xc8, 0x1d, 0x31, 0xbf, 0x60, 0x47, - 0xb1, 0xb8, 0x7a, 0x35, 0xb2, 0xb9, 0x21, 0xdf, 0xd1, 0x05, 0x2e, 0xc2, 0x90, 0x3e, 0x1e, 0xba, - 0xb5, 0xd4, 0x62, 0x7a, 0xa9, 0xb8, 0x5a, 0x5d, 0xe6, 0x0e, 0xbb, 0xbc, 0x4d, 0x4e, 0x9f, 0x1b, - 0xdd, 0x01, 0xd1, 0xe9, 0x24, 0x42, 0x90, 0xe9, 0xd9, 0x0e, 0x61, 0x27, 0x76, 0x5a, 0x67, 0xdf, - 0xf4, 0x18, 0xb3, 0x4d, 0x13, 0xa7, 0x95, 0x0f, 0xf0, 0x17, 0x1a, 0xc0, 0xd3, 0x81, 0x97, 0xec, - 0x1a, 0x73, 0x90, 0x1d, 0x52, 0xc2, 0xc2, 0x2d, 0xf8, 0x80, 0xf9, 0x04, 0x31, 0x5c, 0xe2, 0xfb, - 0x04, 0x1d, 0xa0, 0xcb, 0x90, 0xef, 0x3b, 0x64, 0xd8, 0x3c, 0x1e, 0x32, 0x26, 0xd3, 0x7a, 0x8e, - 0x0e, 0xb7, 0x87, 0xe8, 0x06, 0x94, 0xcc, 0x8e, 0x65, 0x3b, 0xa4, 0xc9, 0x69, 0x65, 0xd9, 0x6c, - 0x91, 0xc3, 0x98, 0xdc, 0x0a, 0x0a, 0x27, 0x9c, 0x53, 0x51, 0x76, 0x28, 0x08, 0x5b, 0x50, 0x64, - 0xa2, 0x4e, 0x64, 0xbe, 0x7b, 0x81, 0x8c, 0x29, 0xb6, 0x6c, 0xd4, 0x84, 0x42, 0x6a, 0xfc, 0x01, - 0xa0, 0x0d, 0xd2, 0x25, 0x1e, 0x99, 0x24, 0x7a, 0x28, 0x36, 0x49, 0xab, 0x36, 0xc1, 0x3f, 0xd2, - 0x60, 0x36, 0x44, 0x7e, 0x22, 0xb5, 0x6a, 0x90, 0x6f, 0x33, 0x62, 0x5c, 0x82, 0xb4, 0x2e, 0x87, - 0xe8, 0x01, 0x4c, 0x0b, 0x01, 0xdc, 0x5a, 0x3a, 0xe1, 0xd0, 0xe4, 0xb9, 0x4c, 0x2e, 0xfe, 0x9b, - 0x06, 0x05, 0xa1, 0xe8, 0x5e, 0x1f, 0xad, 0x41, 0xd9, 0xe1, 0x83, 0x26, 0xd3, 0x47, 0x48, 0x54, - 0x4f, 0x0e, 0x42, 0x9b, 0x53, 0x7a, 0x49, 0x2c, 0x61, 0x60, 0xf4, 0x3f, 0x50, 0x94, 0x24, 0xfa, - 0x03, 0x4f, 0x98, 0xbc, 0x16, 0x26, 0x10, 0x9c, 0xbf, 0xcd, 0x29, 0x1d, 0x04, 0xfa, 0xd3, 0x81, - 0x87, 0x0e, 0x60, 0x4e, 0x2e, 0xe6, 0xda, 0x08, 0x31, 0xd2, 0x8c, 0xca, 0x62, 0x98, 0xca, 0xe8, - 0x56, 0x6d, 0x4e, 0xe9, 0x48, 0xac, 0x57, 0x26, 0x1f, 0x15, 0x20, 0x2f, 0xa0, 0xf8, 0xef, 0x1a, - 0x80, 0x34, 0xe8, 0x5e, 0x1f, 0x6d, 0x40, 0xc5, 0x11, 0xa3, 0x90, 0xc2, 0x57, 0x62, 0x15, 0x16, - 0xfb, 0x30, 0xa5, 0x97, 0xe5, 0x22, 0xae, 0xf2, 0xdb, 0x50, 0xf2, 0xa9, 0x04, 0x3a, 0xcf, 0xc7, - 0xe8, 0xec, 0x53, 0x28, 0xca, 0x05, 0x54, 0xeb, 0xf7, 0xe0, 0xa2, 0xbf, 0x3e, 0x46, 0xed, 0x1b, - 0x63, 0xd4, 0xf6, 0x09, 0xce, 0x4a, 0x0a, 0xaa, 0xe2, 0x40, 0x53, 0x16, 0x07, 0xe3, 0x2f, 0xd2, - 0x90, 0x5f, 0xb7, 0x7b, 0x7d, 0xc3, 0xa1, 0x7b, 0x94, 0x73, 0x88, 0x3b, 0xe8, 0x7a, 0x4c, 0xdd, - 0xca, 0xea, 0xcd, 0x30, 0x07, 0x81, 0x26, 0xff, 0xea, 0x0c, 0x55, 0x17, 0x4b, 0xe8, 0x62, 0x91, - 0xa1, 0x52, 0xe7, 0x58, 0x2c, 0xf2, 0x93, 0x58, 0x22, 0x7d, 0x29, 0x1d, 0xf8, 0x52, 0x1d, 0xf2, - 0x43, 0xe2, 0x04, 0x59, 0x75, 0x73, 0x4a, 0x97, 0x00, 0x74, 0x0f, 0x66, 0xa2, 0x11, 0x3e, 0x2b, - 0x70, 0x2a, 0xad, 0x70, 0x42, 0xb8, 0x09, 0xa5, 0x50, 0x9a, 0xc9, 0x09, 0xbc, 0x62, 0x4f, 0xc9, - 0x32, 0x97, 0x64, 0x68, 0xa3, 0x29, 0xb1, 0xb4, 0x39, 0x25, 0x82, 0x1b, 0xfe, 0x5f, 0x28, 0x87, - 0x74, 0xa5, 0x51, 0xbc, 0xf1, 0xee, 0xb3, 0xb5, 0x1d, 0x1e, 0xf2, 0x1f, 0xb3, 0x28, 0xaf, 0x57, - 0x35, 0x9a, 0x39, 0x76, 0x1a, 0xfb, 0xfb, 0xd5, 0x14, 0x2a, 0x43, 0x61, 0x77, 0xef, 0xa0, 0xc9, - 0xb1, 0xd2, 0xf8, 0x2d, 0x9f, 0x82, 0x48, 0x19, 0x4a, 0xa6, 0x98, 0x52, 0x32, 0x85, 0x26, 0x33, - 0x45, 0x2a, 0xc8, 0x14, 0xe9, 0x47, 0x15, 0x28, 0x71, 0xfb, 0x34, 0x07, 0x16, 0xcd, 0x56, 0xbf, - 0xd0, 0x00, 0x0e, 0x4e, 0x2c, 0x19, 0x80, 0x56, 0x20, 0xdf, 0xe2, 0xc4, 0x6b, 0x1a, 0xf3, 0xe7, - 0x8b, 0xb1, 0x26, 0xd7, 0x25, 0x16, 0x7a, 0x1d, 0xf2, 0xee, 0xa0, 0xd5, 0x22, 0xae, 0xcc, 0x1a, - 0x97, 0xa3, 0x21, 0x45, 0x38, 0xbc, 0x2e, 0xf1, 0xe8, 0x92, 0x17, 0x86, 0xd9, 0x1d, 0xb0, 0x1c, - 0x32, 0x7e, 0x89, 0xc0, 0xc3, 0x3f, 0xd5, 0xa0, 0xc8, 0xa4, 0x9c, 0x28, 0x8e, 0x5d, 0x85, 0x02, - 0x93, 0x81, 0xb4, 0x45, 0x24, 0x9b, 0xd6, 0x03, 0x00, 0xfa, 0x6f, 0x28, 0xc8, 0x13, 0x2c, 0x83, - 0x59, 0x2d, 0x9e, 0xec, 0x5e, 0x5f, 0x0f, 0x50, 0xf1, 0x36, 0x5c, 0x60, 0x56, 0x69, 0xd1, 0xfa, - 0x54, 0xda, 0x51, 0xad, 0xe0, 0xb4, 0x48, 0x05, 0x57, 0x87, 0xe9, 0xfe, 0xd1, 0xa9, 0x6b, 0xb6, - 0x8c, 0xae, 0x90, 0xc2, 0x1f, 0xe3, 0xff, 0x03, 0xa4, 0x12, 0x9b, 0x44, 0x5d, 0x5c, 0x86, 0xe2, - 0xa6, 0xe1, 0x1e, 0x09, 0x91, 0xf0, 0xfb, 0x50, 0xe2, 0xc3, 0x89, 0x6c, 0x88, 0x20, 0x73, 0x64, - 0xb8, 0x47, 0x4c, 0xf0, 0xb2, 0xce, 0xbe, 0xf1, 0x05, 0x98, 0xd9, 0xb7, 0x8c, 0xbe, 0x7b, 0x64, - 0xcb, 0x58, 0x4b, 0xeb, 0xf3, 0x6a, 0x00, 0x9b, 0x88, 0xe3, 0x5d, 0x98, 0x71, 0x48, 0xcf, 0x30, - 0x2d, 0xd3, 0xea, 0x34, 0x0f, 0x4f, 0x3d, 0xe2, 0x8a, 0xf2, 0xbd, 0xe2, 0x83, 0x1f, 0x51, 0x28, - 0x15, 0xed, 0xb0, 0x6b, 0x1f, 0x0a, 0x8f, 0x67, 0xdf, 0xf8, 0xd7, 0x1a, 0x94, 0xde, 0x33, 0xbc, - 0x96, 0xb4, 0x02, 0xda, 0x82, 0x8a, 0xef, 0xe7, 0x0c, 0x22, 0x64, 0x89, 0x04, 0x7c, 0xb6, 0x46, - 0x16, 0x76, 0x32, 0xe0, 0x97, 0x5b, 0x2a, 0x80, 0x91, 0x32, 0xac, 0x16, 0xe9, 0xfa, 0xa4, 0x52, - 0xc9, 0xa4, 0x18, 0xa2, 0x4a, 0x4a, 0x05, 0x3c, 0x9a, 0x09, 0x92, 0x21, 0x77, 0xcb, 0x9f, 0xa5, - 0x00, 0x8d, 0xca, 0xf0, 0x55, 0xeb, 0x83, 0xdb, 0x50, 0x71, 0x3d, 0xc3, 0xf1, 0x9a, 0x91, 0xcb, - 0x4d, 0x99, 0x41, 0xfd, 0x58, 0x75, 0x17, 0x66, 0xfa, 0x8e, 0xdd, 0x71, 0x88, 0xeb, 0x36, 0x2d, - 0xdb, 0x33, 0x5f, 0x9c, 0x8a, 0x12, 0xab, 0x22, 0xc1, 0xbb, 0x0c, 0x8a, 0x1a, 0x90, 0x7f, 0x61, - 0x76, 0x3d, 0xe2, 0xb8, 0xb5, 0xec, 0x62, 0x7a, 0xa9, 0xb2, 0xfa, 0xe0, 0x2c, 0xab, 0x2d, 0xbf, - 0xc3, 0xf0, 0x0f, 0x4e, 0xfb, 0x44, 0x97, 0x6b, 0xd5, 0xb2, 0x25, 0x17, 0x2a, 0x5b, 0x6e, 0x03, - 0x04, 0xf8, 0x34, 0x6a, 0xed, 0xee, 0x3d, 0x7d, 0x76, 0x50, 0x9d, 0x42, 0x25, 0x98, 0xde, 0xdd, - 0xdb, 0x68, 0xec, 0x34, 0x68, 0x5c, 0xc3, 0x2b, 0xd2, 0x36, 0xaa, 0x0d, 0xd1, 0x3c, 0x4c, 0xbf, - 0xa4, 0x50, 0x79, 0xfb, 0x4b, 0xeb, 0x79, 0x36, 0xde, 0x6a, 0xe3, 0xbf, 0x6a, 0x50, 0x16, 0xa7, - 0x60, 0xa2, 0xa3, 0xa8, 0xb2, 0x48, 0x85, 0x58, 0xd0, 0x1a, 0x89, 0x9f, 0x8e, 0xb6, 0x28, 0xc5, - 0xe4, 0x90, 0xba, 0x3b, 0xdf, 0x6c, 0xd2, 0x16, 0x66, 0xf5, 0xc7, 0xe8, 0x1e, 0x54, 0x5b, 0xdc, - 0xdd, 0x23, 0x69, 0x47, 0x9f, 0x11, 0x70, 0x7f, 0x93, 0x6e, 0x43, 0x8e, 0x0c, 0x89, 0xe5, 0xb9, - 0xb5, 0x22, 0x8b, 0x4d, 0x65, 0x59, 0x68, 0x35, 0x28, 0x54, 0x17, 0x93, 0xf8, 0xbf, 0xe0, 0x02, - 0x2b, 0x68, 0x1f, 0x3b, 0x86, 0xa5, 0x56, 0xde, 0x07, 0x07, 0x3b, 0xc2, 0x2a, 0xf4, 0x13, 0x55, - 0x20, 0xb5, 0xb5, 0x21, 0x74, 0x48, 0x6d, 0x6d, 0xe0, 0x4f, 0x34, 0x40, 0xea, 0xba, 0x89, 0xcc, - 0x14, 0x21, 0x2e, 0xd9, 0xa7, 0x03, 0xf6, 0x73, 0x90, 0x25, 0x8e, 0x63, 0x3b, 0xcc, 0x20, 0x05, - 0x9d, 0x0f, 0xf0, 0x2d, 0x21, 0x83, 0x4e, 0x86, 0xf6, 0xb1, 0x7f, 0xe6, 0x39, 0x35, 0xcd, 0x17, - 0x75, 0x1b, 0x66, 0x43, 0x58, 0x13, 0xc5, 0xc8, 0xbb, 0x70, 0x91, 0x11, 0xdb, 0x26, 0xa4, 0xbf, - 0xd6, 0x35, 0x87, 0x89, 0x5c, 0xfb, 0x70, 0x29, 0x8a, 0xf8, 0xf5, 0xda, 0x08, 0xbf, 0x25, 0x38, - 0x1e, 0x98, 0x3d, 0x72, 0x60, 0xef, 0x24, 0xcb, 0x46, 0x03, 0x1f, 0xbd, 0x50, 0x8b, 0x64, 0xc2, - 0xbe, 0xf1, 0x2f, 0x35, 0xb8, 0x3c, 0xb2, 0xfc, 0x6b, 0xde, 0xd5, 0x05, 0x80, 0x0e, 0x3d, 0x3e, - 0xa4, 0x4d, 0x27, 0xf8, 0x55, 0x50, 0x81, 0xf8, 0x72, 0xd2, 0xd8, 0x51, 0x12, 0x72, 0x1e, 0x41, - 0xee, 0x09, 0xeb, 0xc2, 0x28, 0x5a, 0x65, 0xa4, 0x56, 0x96, 0xd1, 0xe3, 0x77, 0xc3, 0x82, 0xce, - 0xbe, 0x59, 0xea, 0x24, 0xc4, 0x79, 0xa6, 0xef, 0xf0, 0x14, 0x5d, 0xd0, 0xfd, 0x31, 0xe5, 0xde, - 0xea, 0x9a, 0xc4, 0xf2, 0xd8, 0x6c, 0x86, 0xcd, 0x2a, 0x10, 0xbc, 0x0c, 0x55, 0xce, 0x69, 0xad, - 0xdd, 0x56, 0xd2, 0xb4, 0x4f, 0x4f, 0x0b, 0xd3, 0xc3, 0xbf, 0xd2, 0xe0, 0x82, 0xb2, 0x60, 0x22, - 0xdb, 0xbd, 0x02, 0x39, 0xde, 0x6b, 0x12, 0x29, 0x62, 0x2e, 0xbc, 0x8a, 0xb3, 0xd1, 0x05, 0x0e, - 0x5a, 0x86, 0x3c, 0xff, 0x92, 0x75, 0x48, 0x3c, 0xba, 0x44, 0xc2, 0xb7, 0x61, 0x56, 0x80, 0x48, - 0xcf, 0x8e, 0x3b, 0x26, 0xcc, 0xa0, 0xf8, 0x63, 0x98, 0x0b, 0xa3, 0x4d, 0xa4, 0x92, 0x22, 0x64, - 0xea, 0x3c, 0x42, 0xae, 0x49, 0x21, 0x9f, 0xf5, 0xdb, 0x4a, 0x46, 0x8b, 0xee, 0xba, 0xba, 0x23, - 0xa9, 0xc8, 0x8e, 0xf8, 0x0a, 0x48, 0x12, 0xdf, 0xa8, 0x02, 0xb3, 0xf2, 0x38, 0xec, 0x98, 0xae, - 0x5f, 0xe7, 0x7c, 0x04, 0x48, 0x05, 0x7e, 0xd3, 0x02, 0x6d, 0x90, 0x17, 0x8e, 0xd1, 0xe9, 0x11, - 0x3f, 0xd4, 0xd3, 0x02, 0x52, 0x05, 0x4e, 0x14, 0x1c, 0x7f, 0xaf, 0x41, 0x69, 0xad, 0x6b, 0x38, - 0x3d, 0xb9, 0x59, 0x6f, 0x43, 0x8e, 0x57, 0xa6, 0xe2, 0x32, 0x77, 0x27, 0x4c, 0x46, 0xc5, 0xe5, - 0x83, 0x35, 0x5e, 0xc7, 0x8a, 0x55, 0x74, 0x73, 0x45, 0xcb, 0x75, 0x23, 0xd2, 0x82, 0xdd, 0x40, - 0xaf, 0x42, 0xd6, 0xa0, 0x4b, 0x58, 0x40, 0xa9, 0x44, 0xef, 0x04, 0x8c, 0x1a, 0xab, 0x22, 0x38, - 0x16, 0x7e, 0x13, 0x8a, 0x0a, 0x07, 0x7a, 0xd5, 0x79, 0xdc, 0x10, 0x95, 0xc2, 0xda, 0xfa, 0xc1, - 0xd6, 0x73, 0x7e, 0x03, 0xaa, 0x00, 0x6c, 0x34, 0xfc, 0x71, 0x0a, 0xbf, 0x2f, 0x56, 0x89, 0x90, - 0xa3, 0xca, 0xa3, 0x25, 0xc9, 0x93, 0x3a, 0x97, 0x3c, 0x27, 0x50, 0x16, 0xea, 0x4f, 0x74, 0x06, - 0x5e, 0x87, 0x1c, 0xa3, 0x27, 0x8f, 0xc0, 0x7c, 0x0c, 0x5b, 0x19, 0x2d, 0x38, 0x22, 0x9e, 0x81, - 0xf2, 0xbe, 0x67, 0x78, 0x03, 0x57, 0x1e, 0x81, 0xdf, 0x69, 0x50, 0x91, 0x90, 0x49, 0xfb, 0x3e, - 0xf2, 0xbe, 0xcc, 0x83, 0xb0, 0x7f, 0x5b, 0xbe, 0x04, 0xb9, 0xf6, 0xe1, 0xbe, 0xf9, 0x91, 0xec, - 0xd1, 0x89, 0x11, 0x85, 0x77, 0x39, 0x1f, 0xde, 0x28, 0x17, 0x23, 0x7a, 0xf3, 0x72, 0x8c, 0x17, - 0xde, 0x96, 0xd5, 0x26, 0x27, 0xac, 0xc0, 0xc9, 0xe8, 0x01, 0x80, 0x5d, 0x96, 0x44, 0x43, 0x9d, - 0x15, 0x84, 0x6a, 0x83, 0x7d, 0x16, 0x2e, 0xac, 0x0d, 0xbc, 0xa3, 0x86, 0x65, 0x1c, 0x76, 0x65, - 0xd0, 0xc0, 0x73, 0x80, 0x28, 0x70, 0xc3, 0x74, 0x55, 0x68, 0x03, 0x66, 0x29, 0x94, 0x58, 0x9e, - 0xd9, 0x52, 0x22, 0x8c, 0xcc, 0x23, 0x5a, 0x24, 0x8f, 0x18, 0xae, 0xfb, 0xd2, 0x76, 0xda, 0x42, - 0x35, 0x7f, 0x8c, 0x37, 0x38, 0xf1, 0x67, 0x6e, 0x28, 0x53, 0x7c, 0x55, 0x2a, 0x4b, 0x01, 0x95, - 0xc7, 0xc4, 0x1b, 0x43, 0x05, 0x3f, 0x80, 0x8b, 0x12, 0x53, 0x34, 0x54, 0xc6, 0x20, 0xef, 0xc1, - 0x35, 0x89, 0xbc, 0x7e, 0x44, 0xcb, 0xfc, 0xa7, 0x82, 0xe1, 0xbf, 0x2b, 0xe7, 0x23, 0xa8, 0xf9, - 0x72, 0xb2, 0xd2, 0xcf, 0xee, 0xaa, 0x02, 0x0c, 0x5c, 0x71, 0x66, 0x0a, 0x3a, 0xfb, 0xa6, 0x30, - 0xc7, 0xee, 0xfa, 0x59, 0x99, 0x7e, 0xe3, 0x75, 0x98, 0x97, 0x34, 0x44, 0x51, 0x16, 0x26, 0x32, - 0x22, 0x50, 0x1c, 0x11, 0x61, 0x30, 0xba, 0x74, 0xbc, 0xd9, 0x55, 0xcc, 0xb0, 0x69, 0x19, 0x4d, - 0x4d, 0xa1, 0x79, 0x91, 0x9f, 0x08, 0x2a, 0x98, 0x1a, 0xb4, 0x05, 0x98, 0x12, 0x50, 0xc1, 0x62, - 0x23, 0x28, 0x78, 0x64, 0x23, 0x46, 0x48, 0x7f, 0x00, 0x0b, 0xbe, 0x10, 0xd4, 0x6e, 0x4f, 0x89, - 0xd3, 0x33, 0x5d, 0x57, 0x69, 0x01, 0xc4, 0x29, 0x7e, 0x07, 0x32, 0x7d, 0x22, 0x62, 0x4a, 0x71, - 0x15, 0x2d, 0xf3, 0x67, 0xaf, 0x65, 0x65, 0x31, 0x9b, 0xc7, 0x6d, 0xb8, 0x2e, 0xa9, 0x73, 0x8b, - 0xc6, 0x92, 0x8f, 0x0a, 0x25, 0xaf, 0x87, 0xdc, 0xac, 0xa3, 0xd7, 0xc3, 0x34, 0xdf, 0x7b, 0x79, - 0x3d, 0xa4, 0xb9, 0x42, 0xf5, 0xad, 0x89, 0x72, 0xc5, 0x36, 0xb7, 0xa9, 0xef, 0x92, 0x13, 0x11, - 0x3b, 0x84, 0xb9, 0xb0, 0x27, 0x4f, 0x14, 0xc6, 0xe6, 0x20, 0xeb, 0xd9, 0xc7, 0x44, 0x06, 0x31, - 0x3e, 0x90, 0x02, 0xfb, 0x6e, 0x3e, 0x91, 0xc0, 0x46, 0x40, 0x8c, 0x1d, 0xc9, 0x49, 0xe5, 0xa5, - 0xbb, 0x29, 0xeb, 0x1f, 0x3e, 0xc0, 0xbb, 0x70, 0x29, 0x1a, 0x26, 0x26, 0x12, 0xf9, 0x39, 0x3f, - 0xc0, 0x71, 0x91, 0x64, 0x22, 0xba, 0xef, 0x06, 0xc1, 0x40, 0x09, 0x28, 0x13, 0x91, 0xd4, 0xa1, - 0x1e, 0x17, 0x5f, 0xfe, 0x13, 0xe7, 0xd5, 0x0f, 0x37, 0x13, 0x11, 0x73, 0x03, 0x62, 0x93, 0x6f, - 0x7f, 0x10, 0x23, 0xd2, 0x63, 0x63, 0x84, 0x70, 0x92, 0x20, 0x8a, 0x7d, 0x0d, 0x87, 0x4e, 0xf0, - 0x08, 0x02, 0xe8, 0xa4, 0x3c, 0x68, 0x0e, 0xf1, 0x79, 0xb0, 0x81, 0x3c, 0xd8, 0x6a, 0xd8, 0x9d, - 0x68, 0x33, 0xde, 0x0b, 0x62, 0xe7, 0x48, 0x64, 0x9e, 0x88, 0xf0, 0xfb, 0xb0, 0x98, 0x1c, 0x94, - 0x27, 0xa1, 0x7c, 0x1f, 0x43, 0xc1, 0x2f, 0x28, 0x95, 0x27, 0xe3, 0x22, 0xe4, 0x77, 0xf7, 0xf6, - 0x9f, 0xae, 0xad, 0x37, 0xaa, 0xda, 0xea, 0x3f, 0xd2, 0x90, 0xda, 0x7e, 0x8e, 0xbe, 0x05, 0x59, - 0xfe, 0x12, 0x34, 0xe6, 0xa1, 0xac, 0x3e, 0xee, 0x4d, 0x09, 0x5f, 0xfd, 0xe4, 0x8f, 0x7f, 0xf9, - 0x3c, 0x75, 0x09, 0x5f, 0x58, 0x19, 0xbe, 0x61, 0x74, 0xfb, 0x47, 0xc6, 0xca, 0xf1, 0x70, 0x85, - 0xe5, 0x84, 0x87, 0xda, 0x7d, 0xf4, 0x1c, 0xd2, 0x4f, 0x07, 0x1e, 0x4a, 0x7c, 0x45, 0xab, 0x27, - 0xbf, 0x35, 0xe1, 0x3a, 0xa3, 0x3c, 0x87, 0x67, 0x54, 0xca, 0xfd, 0x81, 0x47, 0xe9, 0x0e, 0xa1, - 0xa8, 0x3c, 0x17, 0xa1, 0x33, 0xdf, 0xd7, 0xea, 0x67, 0x3f, 0x45, 0x61, 0xcc, 0xf8, 0x5d, 0xc5, - 0x97, 0x55, 0x7e, 0xfc, 0x55, 0x4b, 0xd5, 0xe7, 0xe0, 0xc4, 0x8a, 0xea, 0x13, 0xbc, 0x78, 0x44, - 0xf5, 0x51, 0x5e, 0x19, 0xe2, 0xf5, 0xf1, 0x4e, 0x2c, 0x4a, 0xd7, 0x16, 0x4f, 0x5c, 0x2d, 0x0f, - 0x5d, 0x8f, 0x79, 0x22, 0x51, 0x1f, 0x03, 0xea, 0x8b, 0xc9, 0x08, 0x82, 0xd3, 0x0d, 0xc6, 0xe9, - 0x0a, 0xbe, 0xa4, 0x72, 0x6a, 0xf9, 0x78, 0x0f, 0xb5, 0xfb, 0xab, 0x47, 0x90, 0x65, 0x2d, 0x4c, - 0xd4, 0x94, 0x1f, 0xf5, 0x98, 0xe6, 0x6b, 0xc2, 0x09, 0x08, 0x35, 0x3f, 0xf1, 0x3c, 0xe3, 0x36, - 0x8b, 0x2b, 0x3e, 0x37, 0xd6, 0xc5, 0x7c, 0xa8, 0xdd, 0x5f, 0xd2, 0x5e, 0xd3, 0x56, 0xbf, 0x97, - 0x81, 0x2c, 0x6b, 0x1d, 0xa1, 0x3e, 0x40, 0xd0, 0x14, 0x8c, 0xea, 0x39, 0xd2, 0x66, 0x8c, 0xea, - 0x39, 0xda, 0x4f, 0xc4, 0xd7, 0x19, 0xe7, 0x79, 0x3c, 0xe7, 0x73, 0x66, 0x0f, 0xf2, 0x2b, 0xac, - 0x49, 0x44, 0xcd, 0xfa, 0x12, 0x8a, 0x4a, 0x73, 0x0f, 0xc5, 0x51, 0x0c, 0x75, 0x07, 0xa3, 0xc7, - 0x24, 0xa6, 0x33, 0x88, 0x6f, 0x32, 0xa6, 0xd7, 0x70, 0x4d, 0x35, 0x2e, 0xe7, 0xeb, 0x30, 0x4c, - 0xca, 0xf8, 0x53, 0x0d, 0x2a, 0xe1, 0x06, 0x1f, 0xba, 0x19, 0x43, 0x3a, 0xda, 0x27, 0xac, 0xdf, - 0x1a, 0x8f, 0x94, 0x28, 0x02, 0xe7, 0x7f, 0x4c, 0x48, 0xdf, 0xa0, 0x98, 0xc2, 0xf6, 0xe8, 0xfb, - 0x1a, 0xcc, 0x44, 0xda, 0x76, 0x28, 0x8e, 0xc5, 0x48, 0x53, 0xb0, 0x7e, 0xfb, 0x0c, 0x2c, 0x21, - 0xc9, 0x5d, 0x26, 0xc9, 0x0d, 0x7c, 0x75, 0xd4, 0x18, 0x9e, 0xd9, 0x23, 0x9e, 0x2d, 0xa4, 0x59, - 0xfd, 0x67, 0x1a, 0xf2, 0xeb, 0xfc, 0xd7, 0x53, 0xc8, 0x83, 0x82, 0xdf, 0x09, 0x43, 0x0b, 0x71, - 0x5d, 0x89, 0xa0, 0x64, 0xaf, 0x5f, 0x4f, 0x9c, 0x17, 0x22, 0xdc, 0x61, 0x22, 0x2c, 0xe2, 0x2b, - 0xbe, 0x08, 0xe2, 0x57, 0x5a, 0x2b, 0xfc, 0xf2, 0xbd, 0x62, 0xb4, 0xdb, 0x74, 0x4b, 0xbe, 0xab, - 0x41, 0x49, 0x6d, 0x58, 0xa1, 0x1b, 0xb1, 0xfd, 0x10, 0xb5, 0xe7, 0x55, 0xc7, 0xe3, 0x50, 0x04, - 0xff, 0x7b, 0x8c, 0xff, 0x4d, 0xbc, 0x90, 0xc4, 0xdf, 0x61, 0xf8, 0x61, 0x11, 0x78, 0xcb, 0x29, - 0x5e, 0x84, 0x50, 0x47, 0x2b, 0x5e, 0x84, 0x70, 0xc7, 0xea, 0x6c, 0x11, 0x06, 0x0c, 0x9f, 0x8a, - 0x70, 0x02, 0x10, 0x74, 0x98, 0x50, 0xac, 0x71, 0x95, 0x4b, 0x4c, 0xd4, 0x07, 0x47, 0x9b, 0x53, - 0x31, 0x27, 0x20, 0xc2, 0xbb, 0x6b, 0xba, 0xd4, 0x17, 0x57, 0x7f, 0x93, 0x81, 0xe2, 0x13, 0xc3, - 0xb4, 0x3c, 0x62, 0x19, 0x56, 0x8b, 0xa0, 0x0e, 0x64, 0x59, 0x96, 0x8a, 0x06, 0x1e, 0xb5, 0xed, - 0x13, 0x0d, 0x3c, 0xa1, 0x9e, 0x08, 0xbe, 0xcd, 0x58, 0x5f, 0xc7, 0x75, 0x9f, 0x75, 0x2f, 0xa0, - 0xbf, 0xc2, 0xfa, 0x19, 0x54, 0xe5, 0x63, 0xc8, 0xf1, 0xfe, 0x05, 0x8a, 0x50, 0x0b, 0xf5, 0x39, - 0xea, 0x57, 0xe3, 0x27, 0x13, 0x4f, 0x99, 0xca, 0xcb, 0x65, 0xc8, 0x94, 0xd9, 0xb7, 0x01, 0x82, - 0x86, 0x59, 0xd4, 0xbe, 0x23, 0xfd, 0xb5, 0xfa, 0x62, 0x32, 0x82, 0x60, 0x7c, 0x9f, 0x31, 0xbe, - 0x85, 0xaf, 0xc7, 0x32, 0x6e, 0xfb, 0x0b, 0x28, 0xf3, 0x16, 0x64, 0x36, 0x0d, 0xf7, 0x08, 0x45, - 0x92, 0x90, 0xf2, 0x6c, 0x5b, 0xaf, 0xc7, 0x4d, 0x09, 0x56, 0xb7, 0x18, 0xab, 0x05, 0x3c, 0x1f, - 0xcb, 0xea, 0xc8, 0x70, 0x69, 0x4c, 0x47, 0x03, 0x98, 0x96, 0x4f, 0xb1, 0xe8, 0x5a, 0xc4, 0x66, - 0xe1, 0x67, 0xdb, 0xfa, 0x42, 0xd2, 0xb4, 0x60, 0xb8, 0xc4, 0x18, 0x62, 0x7c, 0x2d, 0xde, 0xa8, - 0x02, 0xfd, 0xa1, 0x76, 0xff, 0x35, 0x6d, 0xf5, 0x87, 0x55, 0xc8, 0xd0, 0x7a, 0x89, 0x66, 0x91, - 0xe0, 0x9a, 0x19, 0xb5, 0xf0, 0x48, 0x73, 0x27, 0x6a, 0xe1, 0xd1, 0x1b, 0x6a, 0x4c, 0x16, 0x61, - 0xbf, 0x21, 0x25, 0x0c, 0x8b, 0x6a, 0xec, 0x41, 0x51, 0xb9, 0x8c, 0xa2, 0x18, 0x8a, 0xe1, 0xd6, - 0x51, 0x34, 0x8b, 0xc4, 0xdc, 0x64, 0xf1, 0x22, 0x63, 0x5a, 0xc7, 0x17, 0xc3, 0x4c, 0xdb, 0x1c, - 0x8d, 0x72, 0xfd, 0x18, 0x4a, 0xea, 0xad, 0x15, 0xc5, 0x10, 0x8d, 0xf4, 0xa6, 0xa2, 0xb1, 0x22, - 0xee, 0xd2, 0x1b, 0xe3, 0x34, 0xfe, 0x2f, 0x66, 0x25, 0x2e, 0xe5, 0xfe, 0x21, 0xe4, 0xc5, 0x5d, - 0x36, 0x4e, 0xdf, 0x70, 0x37, 0x2b, 0x4e, 0xdf, 0xc8, 0x45, 0x38, 0xa6, 0x24, 0x61, 0x6c, 0x69, - 0xcd, 0x2e, 0x03, 0xb4, 0x60, 0xf9, 0x98, 0x78, 0x49, 0x2c, 0x83, 0xfe, 0x4c, 0x12, 0x4b, 0xe5, - 0xbe, 0x34, 0x96, 0x65, 0x87, 0x78, 0xe2, 0x2c, 0xcb, 0xcb, 0x08, 0x4a, 0xa0, 0xa8, 0x46, 0x43, - 0x3c, 0x0e, 0x25, 0xb1, 0x8a, 0x0c, 0xb8, 0x8a, 0x50, 0x88, 0xbe, 0x03, 0x10, 0x5c, 0xbc, 0xa3, - 0x85, 0x41, 0x6c, 0xf7, 0x2e, 0x5a, 0x18, 0xc4, 0xdf, 0xdd, 0x63, 0x3c, 0x38, 0x60, 0xce, 0x2b, - 0x59, 0xca, 0xfe, 0xc7, 0x1a, 0xa0, 0xd1, 0x8b, 0x3a, 0x7a, 0x10, 0xcf, 0x22, 0xb6, 0x31, 0x58, - 0x7f, 0xe5, 0x7c, 0xc8, 0x89, 0xd1, 0x33, 0x90, 0xab, 0xc5, 0x96, 0xf4, 0x5f, 0x52, 0xc9, 0x3e, - 0xd3, 0xa0, 0x1c, 0xba, 0xea, 0xa3, 0x3b, 0x09, 0xfb, 0x1c, 0x69, 0x2e, 0xd6, 0xef, 0x9e, 0x89, - 0x97, 0x58, 0x3b, 0x29, 0xa7, 0x42, 0xd6, 0x8d, 0x3f, 0xd0, 0xa0, 0x12, 0xee, 0x0f, 0xa0, 0x04, - 0x06, 0x23, 0x1d, 0xca, 0xfa, 0xd2, 0xd9, 0x88, 0xe7, 0xd8, 0xad, 0xa0, 0x94, 0xfc, 0x10, 0xf2, - 0xa2, 0xad, 0x10, 0xe7, 0x16, 0xe1, 0x06, 0x67, 0x9c, 0x5b, 0x44, 0x7a, 0x12, 0x49, 0x6e, 0x41, - 0x6f, 0xe8, 0x8a, 0x27, 0x8a, 0xe6, 0x43, 0x12, 0xcb, 0xf1, 0x9e, 0x18, 0xe9, 0x5c, 0x8c, 0x65, - 0x19, 0x78, 0xa2, 0x6c, 0x3d, 0xa0, 0x04, 0x8a, 0x67, 0x78, 0x62, 0xb4, 0x73, 0x91, 0xe4, 0x89, - 0x8c, 0xab, 0xe2, 0x89, 0x41, 0xa7, 0x20, 0xce, 0x13, 0x47, 0xda, 0xb7, 0x71, 0x9e, 0x38, 0xda, - 0x6c, 0x48, 0xda, 0x5b, 0xc6, 0x3c, 0xe4, 0x89, 0xb3, 0x31, 0x9d, 0x05, 0xf4, 0x4a, 0x82, 0x4d, - 0x63, 0x5b, 0xc3, 0xf5, 0x57, 0xcf, 0x89, 0x3d, 0xde, 0x03, 0xf8, 0x6e, 0x48, 0x0f, 0xf8, 0xb9, - 0x06, 0x73, 0x71, 0xad, 0x09, 0x94, 0xc0, 0x2c, 0xa1, 0xaf, 0x5c, 0x5f, 0x3e, 0x2f, 0xfa, 0x39, - 0xec, 0xe6, 0xfb, 0xc4, 0xa3, 0xea, 0x6f, 0xbf, 0x5c, 0xd0, 0xfe, 0xf0, 0xe5, 0x82, 0xf6, 0xa7, - 0x2f, 0x17, 0xb4, 0x9f, 0xfc, 0x79, 0x61, 0xea, 0x30, 0xc7, 0xfe, 0x23, 0xc7, 0x1b, 0xff, 0x0a, - 0x00, 0x00, 0xff, 0xff, 0x71, 0x5f, 0x47, 0x71, 0x4f, 0x32, 0x00, 0x00, + // 3549 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0x5f, 0x6f, 0x1b, 0xc7, + 0xb5, 0xd7, 0x92, 0x22, 0x29, 0x1e, 0xfe, 0x11, 0x35, 0x92, 0x6d, 0x6a, 0x6d, 0xcb, 0xf2, 0xf8, + 0x9f, 0x6c, 0xc7, 0x52, 0xa2, 0xe4, 0xde, 0x07, 0xdf, 0x20, 0xb8, 0xb2, 0xc4, 0x58, 0x8a, 0x64, + 0xc9, 0x59, 0xc9, 0x4e, 0x2e, 0x10, 0x5c, 0x62, 0x45, 0x8e, 0xa5, 0x85, 0xc8, 0x5d, 0x66, 0x77, + 0x49, 0x4b, 0x69, 0x0a, 0x14, 0x69, 0x82, 0xa2, 0x05, 0xfa, 0xd2, 0x3c, 0xf4, 0xdf, 0x63, 0x51, + 0x14, 0xf9, 0x00, 0x45, 0x3f, 0x40, 0x81, 0xa2, 0xe8, 0x4b, 0x0b, 0xf4, 0x0b, 0x14, 0x69, 0xbf, + 0x46, 0xd1, 0x62, 0xfe, 0xed, 0xce, 0x2e, 0x77, 0x25, 0x25, 0x6c, 0xf2, 0x62, 0xed, 0x9c, 0x39, + 0x73, 0x7e, 0x67, 0xce, 0xcc, 0x39, 0x67, 0xe6, 0x0c, 0x0d, 0x45, 0xb7, 0xd7, 0x5a, 0xec, 0xb9, + 0x8e, 0xef, 0xa0, 0x32, 0xf1, 0x5b, 0x6d, 0x8f, 0xb8, 0x03, 0xe2, 0xf6, 0xf6, 0xf5, 0x99, 0x03, + 0xe7, 0xc0, 0x61, 0x1d, 0x4b, 0xf4, 0x8b, 0xf3, 0xe8, 0xb3, 0x94, 0x67, 0xa9, 0x3b, 0x68, 0xb5, + 0xd8, 0x3f, 0xbd, 0xfd, 0xa5, 0xa3, 0x81, 0xe8, 0xba, 0xcc, 0xba, 0xcc, 0xbe, 0x7f, 0xc8, 0xfe, + 0xe9, 0xed, 0xb3, 0x3f, 0xa2, 0xf3, 0xca, 0x81, 0xe3, 0x1c, 0x74, 0xc8, 0x92, 0xd9, 0xb3, 0x96, + 0x4c, 0xdb, 0x76, 0x7c, 0xd3, 0xb7, 0x1c, 0xdb, 0xe3, 0xbd, 0xf8, 0x33, 0x0d, 0xaa, 0x06, 0xf1, + 0x7a, 0x8e, 0xed, 0x91, 0x75, 0x62, 0xb6, 0x89, 0x8b, 0xae, 0x02, 0xb4, 0x3a, 0x7d, 0xcf, 0x27, + 0x6e, 0xd3, 0x6a, 0xd7, 0xb5, 0x79, 0x6d, 0x61, 0xdc, 0x28, 0x0a, 0xca, 0x46, 0x1b, 0x5d, 0x86, + 0x62, 0x97, 0x74, 0xf7, 0x79, 0x6f, 0x86, 0xf5, 0x4e, 0x70, 0xc2, 0x46, 0x1b, 0xe9, 0x30, 0xe1, + 0x92, 0x81, 0xe5, 0x59, 0x8e, 0x5d, 0xcf, 0xce, 0x6b, 0x0b, 0x59, 0x23, 0x68, 0xd3, 0x81, 0xae, + 0xf9, 0xc2, 0x6f, 0xfa, 0xc4, 0xed, 0xd6, 0xc7, 0xf9, 0x40, 0x4a, 0xd8, 0x23, 0x6e, 0x17, 0x7f, + 0x9a, 0x83, 0xb2, 0x61, 0xda, 0x07, 0xc4, 0x20, 0x1f, 0xf6, 0x89, 0xe7, 0xa3, 0x1a, 0x64, 0x8f, + 0xc8, 0x09, 0x83, 0x2f, 0x1b, 0xf4, 0x93, 0x8f, 0xb7, 0x0f, 0x48, 0x93, 0xd8, 0x1c, 0xb8, 0x4c, + 0xc7, 0xdb, 0x07, 0xa4, 0x61, 0xb7, 0xd1, 0x0c, 0xe4, 0x3a, 0x56, 0xd7, 0xf2, 0x05, 0x2a, 0x6f, + 0x44, 0xd4, 0x19, 0x8f, 0xa9, 0xb3, 0x0a, 0xe0, 0x39, 0xae, 0xdf, 0x74, 0xdc, 0x36, 0x71, 0xeb, + 0xb9, 0x79, 0x6d, 0xa1, 0xba, 0x7c, 0x73, 0x51, 0x5d, 0x88, 0x45, 0x55, 0xa1, 0xc5, 0x5d, 0xc7, + 0xf5, 0x77, 0x28, 0xaf, 0x51, 0xf4, 0xe4, 0x27, 0x7a, 0x1b, 0x4a, 0x4c, 0x88, 0x6f, 0xba, 0x07, + 0xc4, 0xaf, 0xe7, 0x99, 0x94, 0x5b, 0x67, 0x48, 0xd9, 0x63, 0xcc, 0x06, 0x83, 0xe7, 0xdf, 0x08, + 0x43, 0xd9, 0x23, 0xae, 0x65, 0x76, 0xac, 0x8f, 0xcc, 0xfd, 0x0e, 0xa9, 0x17, 0xe6, 0xb5, 0x85, + 0x09, 0x23, 0x42, 0xa3, 0xf3, 0x3f, 0x22, 0x27, 0x5e, 0xd3, 0xb1, 0x3b, 0x27, 0xf5, 0x09, 0xc6, + 0x30, 0x41, 0x09, 0x3b, 0x76, 0xe7, 0x84, 0x2d, 0x9a, 0xd3, 0xb7, 0x7d, 0xde, 0x5b, 0x64, 0xbd, + 0x45, 0x46, 0x61, 0xdd, 0x0b, 0x50, 0xeb, 0x5a, 0x76, 0xb3, 0xeb, 0xb4, 0x9b, 0x81, 0x41, 0x80, + 0x19, 0xa4, 0xda, 0xb5, 0xec, 0x27, 0x4e, 0xdb, 0x90, 0x66, 0xa1, 0x9c, 0xe6, 0x71, 0x94, 0xb3, + 0x24, 0x38, 0xcd, 0x63, 0x95, 0x73, 0x11, 0xa6, 0xa9, 0xcc, 0x96, 0x4b, 0x4c, 0x9f, 0x84, 0xcc, + 0x65, 0xc6, 0x3c, 0xd5, 0xb5, 0xec, 0x55, 0xd6, 0x13, 0xe1, 0x37, 0x8f, 0x87, 0xf8, 0x2b, 0x82, + 0xdf, 0x3c, 0x8e, 0xf2, 0xe3, 0x45, 0x28, 0x06, 0x36, 0x47, 0x13, 0x30, 0xbe, 0xbd, 0xb3, 0xdd, + 0xa8, 0x8d, 0x21, 0x80, 0xfc, 0xca, 0xee, 0x6a, 0x63, 0x7b, 0xad, 0xa6, 0xa1, 0x12, 0x14, 0xd6, + 0x1a, 0xbc, 0x91, 0xc1, 0x8f, 0x00, 0x42, 0xeb, 0xa2, 0x02, 0x64, 0x37, 0x1b, 0xff, 0x57, 0x1b, + 0xa3, 0x3c, 0xcf, 0x1b, 0xc6, 0xee, 0xc6, 0xce, 0x76, 0x4d, 0xa3, 0x83, 0x57, 0x8d, 0xc6, 0xca, + 0x5e, 0xa3, 0x96, 0xa1, 0x1c, 0x4f, 0x76, 0xd6, 0x6a, 0x59, 0x54, 0x84, 0xdc, 0xf3, 0x95, 0xad, + 0x67, 0x8d, 0xda, 0x38, 0xfe, 0x5c, 0x83, 0x8a, 0x58, 0x2f, 0xee, 0x13, 0xe8, 0x0d, 0xc8, 0x1f, + 0x32, 0xbf, 0x60, 0x5b, 0xb1, 0xb4, 0x7c, 0x25, 0xb6, 0xb8, 0x11, 0xdf, 0x31, 0x04, 0x2f, 0xc2, + 0x90, 0x3d, 0x1a, 0x78, 0xf5, 0xcc, 0x7c, 0x76, 0xa1, 0xb4, 0x5c, 0x5b, 0xe4, 0x0e, 0xbb, 0xb8, + 0x49, 0x4e, 0x9e, 0x9b, 0x9d, 0x3e, 0x31, 0x68, 0x27, 0x42, 0x30, 0xde, 0x75, 0x5c, 0xc2, 0x76, + 0xec, 0x84, 0xc1, 0xbe, 0xe9, 0x36, 0x66, 0x8b, 0x26, 0x76, 0x2b, 0x6f, 0xe0, 0x2f, 0x34, 0x80, + 0xa7, 0x7d, 0x3f, 0xdd, 0x35, 0x66, 0x20, 0x37, 0xa0, 0x82, 0x85, 0x5b, 0xf0, 0x06, 0xf3, 0x09, + 0x62, 0x7a, 0x24, 0xf0, 0x09, 0xda, 0x40, 0x97, 0xa0, 0xd0, 0x73, 0xc9, 0xa0, 0x79, 0x34, 0x60, + 0x20, 0x13, 0x46, 0x9e, 0x36, 0x37, 0x07, 0xe8, 0x3a, 0x94, 0xad, 0x03, 0xdb, 0x71, 0x49, 0x93, + 0xcb, 0xca, 0xb1, 0xde, 0x12, 0xa7, 0x31, 0xbd, 0x15, 0x16, 0x2e, 0x38, 0xaf, 0xb2, 0x6c, 0x51, + 0x12, 0xb6, 0xa1, 0xc4, 0x54, 0x1d, 0xc9, 0x7c, 0x77, 0x43, 0x1d, 0x33, 0x6c, 0xd8, 0xb0, 0x09, + 0x85, 0xd6, 0xf8, 0x03, 0x40, 0x6b, 0xa4, 0x43, 0x7c, 0x32, 0x4a, 0xf4, 0x50, 0x6c, 0x92, 0x55, + 0x6d, 0x82, 0x7f, 0xa2, 0xc1, 0x74, 0x44, 0xfc, 0x48, 0xd3, 0xaa, 0x43, 0xa1, 0xcd, 0x84, 0x71, + 0x0d, 0xb2, 0x86, 0x6c, 0xa2, 0xfb, 0x30, 0x21, 0x14, 0xf0, 0xea, 0xd9, 0x94, 0x4d, 0x53, 0xe0, + 0x3a, 0x79, 0xf8, 0x8b, 0x0c, 0x14, 0xc5, 0x44, 0x77, 0x7a, 0x68, 0x05, 0x2a, 0x2e, 0x6f, 0x34, + 0xd9, 0x7c, 0x84, 0x46, 0x7a, 0x7a, 0x10, 0x5a, 0x1f, 0x33, 0xca, 0x62, 0x08, 0x23, 0xa3, 0xff, + 0x81, 0x92, 0x14, 0xd1, 0xeb, 0xfb, 0xc2, 0xe4, 0xf5, 0xa8, 0x80, 0x70, 0xff, 0xad, 0x8f, 0x19, + 0x20, 0xd8, 0x9f, 0xf6, 0x7d, 0xb4, 0x07, 0x33, 0x72, 0x30, 0x9f, 0x8d, 0x50, 0x23, 0xcb, 0xa4, + 0xcc, 0x47, 0xa5, 0x0c, 0x2f, 0xd5, 0xfa, 0x98, 0x81, 0xc4, 0x78, 0xa5, 0x53, 0x55, 0xc9, 0x3f, + 0xe6, 0xc1, 0x7b, 0x48, 0xa5, 0xbd, 0x63, 0x7b, 0x58, 0xa5, 0xbd, 0x63, 0xfb, 0x51, 0x11, 0x0a, + 0xa2, 0x85, 0x7f, 0x97, 0x01, 0x90, 0xab, 0xb1, 0xd3, 0x43, 0x6b, 0x50, 0x75, 0x45, 0x2b, 0x62, + 0xad, 0xcb, 0x89, 0xd6, 0x12, 0x8b, 0x38, 0x66, 0x54, 0xe4, 0x20, 0xae, 0xdc, 0x5b, 0x50, 0x0e, + 0xa4, 0x84, 0x06, 0x9b, 0x4d, 0x30, 0x58, 0x20, 0xa1, 0x24, 0x07, 0x50, 0x93, 0xbd, 0x07, 0x17, + 0x82, 0xf1, 0x09, 0x36, 0xbb, 0x7e, 0x8a, 0xcd, 0x02, 0x81, 0xd3, 0x52, 0x82, 0x6a, 0x35, 0x55, + 0xb1, 0xd0, 0x6c, 0xb3, 0x09, 0x66, 0x1b, 0x56, 0x8c, 0x1a, 0x0e, 0x68, 0xbe, 0xe4, 0x4d, 0xfc, + 0x87, 0x2c, 0x14, 0x56, 0x9d, 0x6e, 0xcf, 0x74, 0xe9, 0x6a, 0xe4, 0x5d, 0xe2, 0xf5, 0x3b, 0x3e, + 0x33, 0x57, 0x75, 0xf9, 0x46, 0x54, 0xa2, 0x60, 0x93, 0x7f, 0x0d, 0xc6, 0x6a, 0x88, 0x21, 0x74, + 0xb0, 0x48, 0x8f, 0x99, 0x73, 0x0c, 0x16, 0xc9, 0x51, 0x0c, 0x91, 0x8e, 0x9c, 0x0d, 0x1d, 0x59, + 0x87, 0xc2, 0x80, 0xb8, 0x61, 0x4a, 0x5f, 0x1f, 0x33, 0x24, 0x01, 0xdd, 0x85, 0xc9, 0x78, 0x7a, + 0xc9, 0x09, 0x9e, 0x6a, 0x2b, 0x9a, 0x8d, 0x6e, 0x40, 0x39, 0x92, 0xe3, 0xf2, 0x82, 0xaf, 0xd4, + 0x55, 0x52, 0xdc, 0x45, 0x19, 0x57, 0x69, 0x3e, 0x2e, 0xaf, 0x8f, 0xc9, 0xc8, 0x1a, 0x09, 0x26, + 0x13, 0xd1, 0x60, 0x82, 0xff, 0x17, 0x2a, 0x11, 0x43, 0xd0, 0xfc, 0xd2, 0x78, 0xf7, 0xd9, 0xca, + 0x16, 0x4f, 0x46, 0x8f, 0x59, 0xfe, 0x31, 0x6a, 0x1a, 0xcd, 0x69, 0x5b, 0x8d, 0xdd, 0xdd, 0x5a, + 0x06, 0x55, 0xa0, 0xb8, 0xbd, 0xb3, 0xd7, 0xe4, 0x5c, 0x59, 0xfc, 0x66, 0x20, 0x41, 0x24, 0x33, + 0x25, 0x87, 0x8d, 0x29, 0x39, 0x4c, 0x93, 0x39, 0x2c, 0x13, 0xe6, 0xb0, 0xec, 0xa3, 0x2a, 0x94, + 0xb9, 0xf1, 0x9a, 0x7d, 0x9b, 0xe6, 0xd1, 0x5f, 0x69, 0x00, 0xa1, 0xab, 0xa0, 0x25, 0x28, 0xb4, + 0xb8, 0xf0, 0xba, 0xc6, 0x22, 0xcd, 0x85, 0xc4, 0xf5, 0x30, 0x24, 0x17, 0x7a, 0x0d, 0x0a, 0x5e, + 0xbf, 0xd5, 0x22, 0x9e, 0xcc, 0x67, 0x97, 0xe2, 0xc1, 0x4e, 0x84, 0x22, 0x43, 0xf2, 0xd1, 0x21, + 0x2f, 0x4c, 0xab, 0xd3, 0x67, 0xd9, 0xed, 0xf4, 0x21, 0x82, 0x0f, 0xff, 0x5c, 0x83, 0x92, 0xb2, + 0x33, 0xbf, 0x66, 0x84, 0xbd, 0x02, 0x45, 0xa6, 0x03, 0x69, 0x8b, 0x18, 0x3b, 0x61, 0x84, 0x04, + 0xf4, 0xdf, 0x50, 0x94, 0xdb, 0x5b, 0x86, 0xd9, 0x7a, 0xb2, 0xd8, 0x9d, 0x9e, 0x11, 0xb2, 0xe2, + 0x4d, 0x98, 0x62, 0x56, 0x69, 0xd1, 0x93, 0xb3, 0xb4, 0xa3, 0x7a, 0xb6, 0xd4, 0x62, 0x67, 0x4b, + 0x1d, 0x26, 0x7a, 0x87, 0x27, 0x9e, 0xd5, 0x32, 0x3b, 0x42, 0x8b, 0xa0, 0x8d, 0xdf, 0x01, 0xa4, + 0x0a, 0x1b, 0x65, 0xba, 0xb8, 0x02, 0xa5, 0x75, 0xd3, 0x3b, 0x14, 0x2a, 0xe1, 0xf7, 0xa1, 0xcc, + 0x9b, 0x23, 0xd9, 0x10, 0xc1, 0xf8, 0xa1, 0xe9, 0x1d, 0x32, 0xc5, 0x2b, 0x06, 0xfb, 0xc6, 0x53, + 0x30, 0xb9, 0x6b, 0x9b, 0x3d, 0xef, 0xd0, 0x91, 0x59, 0x80, 0xde, 0x1c, 0x6a, 0x21, 0x6d, 0x24, + 0xc4, 0x3b, 0x30, 0xe9, 0x92, 0xae, 0x69, 0xd9, 0x96, 0x7d, 0xd0, 0xdc, 0x3f, 0xf1, 0x89, 0x27, + 0x2e, 0x16, 0xd5, 0x80, 0xfc, 0x88, 0x52, 0xa9, 0x6a, 0xfb, 0x1d, 0x67, 0x5f, 0x84, 0x03, 0xf6, + 0x8d, 0x7f, 0xab, 0x41, 0xf9, 0x3d, 0xd3, 0x6f, 0x49, 0x2b, 0xa0, 0x0d, 0xa8, 0x06, 0x41, 0x80, + 0x51, 0x84, 0x2e, 0xb1, 0x54, 0xc4, 0xc6, 0xc8, 0x23, 0xa7, 0xcc, 0x22, 0x95, 0x96, 0x4a, 0x60, + 0xa2, 0x4c, 0xbb, 0x45, 0x3a, 0x81, 0xa8, 0x4c, 0xba, 0x28, 0xc6, 0xa8, 0x8a, 0x52, 0x09, 0x8f, + 0x26, 0xc3, 0x34, 0xcd, 0xdd, 0xf2, 0x17, 0x19, 0x40, 0xc3, 0x3a, 0x7c, 0xd5, 0x93, 0xcb, 0x2d, + 0xa8, 0x7a, 0xbe, 0xe9, 0xfa, 0xcd, 0xd8, 0xb5, 0xab, 0xc2, 0xa8, 0x41, 0x20, 0xbb, 0x03, 0x93, + 0x3d, 0xd7, 0x39, 0x70, 0x89, 0xe7, 0x35, 0x6d, 0xc7, 0xb7, 0x5e, 0x9c, 0x88, 0xc3, 0x5f, 0x55, + 0x92, 0xb7, 0x19, 0x15, 0x35, 0xa0, 0xf0, 0xc2, 0xea, 0xf8, 0xc4, 0xf5, 0xea, 0xb9, 0xf9, 0xec, + 0x42, 0x75, 0xf9, 0xfe, 0x59, 0x56, 0x5b, 0x7c, 0x9b, 0xf1, 0xef, 0x9d, 0xf4, 0x88, 0x21, 0xc7, + 0xaa, 0x07, 0xaa, 0x7c, 0xe4, 0x40, 0x75, 0x0b, 0x20, 0xe4, 0xa7, 0x51, 0x6b, 0x7b, 0xe7, 0xe9, + 0xb3, 0xbd, 0xda, 0x18, 0x2a, 0xc3, 0xc4, 0xf6, 0xce, 0x5a, 0x63, 0xab, 0x41, 0xe3, 0x1a, 0x5e, + 0x92, 0xb6, 0x51, 0x6d, 0x88, 0x66, 0x61, 0xe2, 0x25, 0xa5, 0xca, 0x7b, 0x69, 0xd6, 0x28, 0xb0, + 0xf6, 0x46, 0x1b, 0xff, 0x38, 0x03, 0x15, 0xb1, 0x0b, 0x46, 0xda, 0x8a, 0x2a, 0x44, 0x26, 0x02, + 0x41, 0x4f, 0x6f, 0x7c, 0x77, 0xb4, 0xc5, 0x21, 0x51, 0x36, 0xa9, 0xbb, 0xf3, 0xc5, 0x26, 0x6d, + 0x61, 0xd6, 0xa0, 0x8d, 0xee, 0x42, 0xad, 0xc5, 0xdd, 0x3d, 0x96, 0x93, 0x8c, 0x49, 0x41, 0x57, + 0x52, 0x52, 0x25, 0xd8, 0x6d, 0xa6, 0x27, 0x72, 0x52, 0xd1, 0x28, 0xcb, 0x8d, 0x44, 0x69, 0xe8, + 0x16, 0xe4, 0xc9, 0x80, 0xd8, 0xbe, 0x57, 0x2f, 0xb1, 0x00, 0x56, 0x91, 0xe7, 0xc4, 0x06, 0xa5, + 0x1a, 0xa2, 0x13, 0xff, 0x17, 0x4c, 0xb1, 0xf3, 0xf8, 0x63, 0xd7, 0xb4, 0xd5, 0x8b, 0xc3, 0xde, + 0xde, 0x96, 0x30, 0x1d, 0xfd, 0x44, 0x55, 0xc8, 0x6c, 0xac, 0x89, 0x89, 0x66, 0x36, 0xd6, 0xf0, + 0x27, 0x1a, 0x20, 0x75, 0xdc, 0x48, 0xb6, 0x8c, 0x09, 0x97, 0xf0, 0xd9, 0x10, 0x7e, 0x06, 0x72, + 0xc4, 0x75, 0x1d, 0x97, 0x59, 0xad, 0x68, 0xf0, 0x06, 0xbe, 0x29, 0x74, 0x30, 0xc8, 0xc0, 0x39, + 0x0a, 0x1c, 0x83, 0x4b, 0xd3, 0x02, 0x55, 0x37, 0x61, 0x3a, 0xc2, 0x35, 0x52, 0x20, 0xbd, 0x03, + 0x17, 0x98, 0xb0, 0x4d, 0x42, 0x7a, 0x2b, 0x1d, 0x6b, 0x90, 0x8a, 0xda, 0x83, 0x8b, 0x71, 0xc6, + 0x6f, 0xd6, 0x46, 0xf8, 0x4d, 0x81, 0xb8, 0x67, 0x75, 0xc9, 0x9e, 0xb3, 0x95, 0xae, 0x1b, 0x8d, + 0x8e, 0x47, 0xe4, 0xc4, 0x13, 0x19, 0x87, 0x7d, 0xe3, 0x5f, 0x6b, 0x70, 0x69, 0x68, 0xf8, 0x37, + 0xbc, 0xaa, 0x73, 0x00, 0x07, 0x74, 0xfb, 0x90, 0x36, 0xed, 0xe0, 0x37, 0x59, 0x85, 0x12, 0xe8, + 0x49, 0x03, 0x4c, 0x59, 0xe8, 0x79, 0x08, 0xf9, 0x27, 0xac, 0x88, 0xa4, 0xcc, 0x6a, 0x5c, 0xce, + 0xca, 0x36, 0xbb, 0xfc, 0x6a, 0x5b, 0x34, 0xd8, 0x37, 0xcb, 0xaf, 0x84, 0xb8, 0xcf, 0x8c, 0x2d, + 0x9e, 0xc7, 0x8b, 0x46, 0xd0, 0xa6, 0xe8, 0xad, 0x8e, 0x45, 0x6c, 0x9f, 0xf5, 0x8e, 0xb3, 0x5e, + 0x85, 0x82, 0x17, 0xa1, 0xc6, 0x91, 0x56, 0xda, 0x6d, 0x25, 0x97, 0x07, 0xf2, 0xb4, 0xa8, 0x3c, + 0xfc, 0x1b, 0x0d, 0xa6, 0x94, 0x01, 0x23, 0xd9, 0xee, 0x15, 0xc8, 0xf3, 0x52, 0x99, 0xc8, 0x23, + 0x33, 0xd1, 0x51, 0x1c, 0xc6, 0x10, 0x3c, 0x68, 0x11, 0x0a, 0xfc, 0x4b, 0x1e, 0x56, 0x92, 0xd9, + 0x25, 0x13, 0xbe, 0x05, 0xd3, 0x82, 0x44, 0xba, 0x4e, 0xd2, 0x36, 0x61, 0x06, 0xc5, 0x1f, 0xc3, + 0x4c, 0x94, 0x6d, 0xa4, 0x29, 0x29, 0x4a, 0x66, 0xce, 0xa3, 0xe4, 0x8a, 0x54, 0xf2, 0x59, 0xaf, + 0xad, 0xa4, 0xbd, 0xf8, 0xaa, 0xab, 0x2b, 0x92, 0x89, 0xad, 0x48, 0x30, 0x01, 0x29, 0xe2, 0x5b, + 0x9d, 0xc0, 0xb4, 0xdc, 0x0e, 0x5b, 0x96, 0x17, 0x1c, 0x86, 0x3e, 0x02, 0xa4, 0x12, 0xbf, 0x6d, + 0x85, 0xd6, 0xc8, 0x0b, 0xd7, 0x3c, 0xe8, 0x92, 0x20, 0xd4, 0xd3, 0x53, 0xa6, 0x4a, 0x1c, 0x29, + 0x38, 0x2e, 0xc1, 0xd4, 0x13, 0x67, 0x40, 0xb6, 0x38, 0x35, 0x74, 0x19, 0x7e, 0xcb, 0x08, 0x96, + 0x2d, 0x68, 0x53, 0x70, 0x75, 0xc0, 0x48, 0xe0, 0x7f, 0xd6, 0xa0, 0xbc, 0xd2, 0x31, 0xdd, 0xae, + 0x04, 0x7e, 0x0b, 0xf2, 0xfc, 0xec, 0x2c, 0xee, 0xa2, 0xb7, 0xa3, 0x62, 0x54, 0x5e, 0xde, 0x58, + 0xe1, 0x27, 0x6d, 0x31, 0x8a, 0x2a, 0x2e, 0xca, 0xd5, 0x6b, 0xb1, 0xf2, 0xf5, 0x1a, 0x7a, 0x00, + 0x39, 0x93, 0x0e, 0x61, 0xd1, 0xac, 0x1a, 0xbf, 0xb5, 0x30, 0x69, 0xec, 0x9c, 0xc3, 0xb9, 0xf0, + 0x1b, 0x50, 0x52, 0x10, 0xe8, 0x65, 0xec, 0x71, 0x43, 0x9c, 0x65, 0x56, 0x56, 0xf7, 0x36, 0x9e, + 0xf3, 0x3b, 0x5a, 0x15, 0x60, 0xad, 0x11, 0xb4, 0x33, 0xf8, 0x7d, 0x31, 0x4a, 0xc4, 0x3b, 0x55, + 0x1f, 0x2d, 0x4d, 0x9f, 0xcc, 0xb9, 0xf4, 0x39, 0x86, 0x8a, 0x98, 0xfe, 0x48, 0x1b, 0xf0, 0x35, + 0xc8, 0x33, 0x79, 0x72, 0xff, 0xcd, 0x26, 0xc0, 0xca, 0x50, 0xc5, 0x19, 0xf1, 0x24, 0x54, 0x76, + 0x7d, 0xd3, 0xef, 0x7b, 0x72, 0xff, 0xfd, 0x49, 0x83, 0xaa, 0xa4, 0x8c, 0x5a, 0x33, 0x93, 0xd7, + 0x7d, 0x9e, 0x01, 0x82, 0xcb, 0xfe, 0x45, 0xc8, 0xb7, 0xf7, 0x77, 0xad, 0x8f, 0x64, 0x7d, 0x53, + 0xb4, 0x28, 0xbd, 0xc3, 0x71, 0xf8, 0x23, 0x83, 0x68, 0xd1, 0xbb, 0xa1, 0x6b, 0xbe, 0xf0, 0x37, + 0xec, 0x36, 0x39, 0x66, 0x47, 0xb0, 0x71, 0x23, 0x24, 0xb0, 0xeb, 0x9c, 0x78, 0x8c, 0x60, 0xe7, + 0x2e, 0xf5, 0x71, 0x62, 0x1a, 0xa6, 0x56, 0xfa, 0xfe, 0x61, 0xc3, 0x36, 0xf7, 0x3b, 0x32, 0x62, + 0xe1, 0x19, 0x40, 0x94, 0xb8, 0x66, 0x79, 0x2a, 0xb5, 0x01, 0xd3, 0x94, 0x4a, 0x6c, 0xdf, 0x6a, + 0x29, 0xe1, 0x4d, 0x26, 0x31, 0x2d, 0x96, 0xc4, 0x4c, 0xcf, 0x7b, 0xe9, 0xb8, 0x6d, 0x31, 0xb5, + 0xa0, 0x8d, 0xd7, 0xb8, 0xf0, 0x67, 0x5e, 0x24, 0x4d, 0x7d, 0x55, 0x29, 0x0b, 0xa1, 0x94, 0xc7, + 0xc4, 0x3f, 0x45, 0x0a, 0xbe, 0x0f, 0x17, 0x24, 0xa7, 0xa8, 0x27, 0x9d, 0xc2, 0xbc, 0x03, 0x57, + 0x25, 0xf3, 0xea, 0x21, 0xbd, 0x88, 0x3c, 0x15, 0x80, 0x5f, 0x57, 0xcf, 0x47, 0x50, 0x0f, 0xf4, + 0x64, 0xe7, 0x4e, 0xa7, 0xa3, 0x2a, 0xd0, 0xf7, 0xc4, 0x9e, 0x29, 0x1a, 0xec, 0x9b, 0xd2, 0x5c, + 0xa7, 0x13, 0x1c, 0x09, 0xe8, 0x37, 0x5e, 0x85, 0x59, 0x29, 0x43, 0x9c, 0x08, 0xa3, 0x42, 0x86, + 0x14, 0x4a, 0x12, 0x22, 0x0c, 0x46, 0x87, 0x9e, 0x6e, 0x76, 0x95, 0x33, 0x6a, 0x5a, 0x26, 0x53, + 0x53, 0x64, 0x5e, 0xe0, 0x3b, 0x82, 0x2a, 0xa6, 0x66, 0x0c, 0x41, 0xa6, 0x02, 0x54, 0xb2, 0x58, + 0x08, 0x4a, 0x1e, 0x5a, 0x88, 0x21, 0xd1, 0x1f, 0xc0, 0x5c, 0xa0, 0x04, 0xb5, 0xdb, 0x53, 0xe2, + 0x76, 0x2d, 0xcf, 0x53, 0x8a, 0x14, 0x49, 0x13, 0xbf, 0x0d, 0xe3, 0x3d, 0x22, 0x62, 0x4a, 0x69, + 0x19, 0x2d, 0xf2, 0x27, 0xc3, 0x45, 0x65, 0x30, 0xeb, 0xc7, 0x6d, 0xb8, 0x26, 0xa5, 0x73, 0x8b, + 0x26, 0x8a, 0x8f, 0x2b, 0x25, 0x2f, 0xb0, 0xdc, 0xac, 0xc3, 0x17, 0xd8, 0x2c, 0x5f, 0xfb, 0xa0, + 0x5a, 0xf6, 0x0e, 0x37, 0xa4, 0xf4, 0xad, 0x91, 0x72, 0xc5, 0x26, 0xb7, 0x69, 0xe0, 0x92, 0x23, + 0x09, 0xdb, 0x87, 0x99, 0xa8, 0x27, 0x8f, 0x14, 0xc6, 0x66, 0x20, 0xe7, 0x3b, 0x47, 0x44, 0x06, + 0x31, 0xde, 0x90, 0x0a, 0x07, 0x6e, 0x3e, 0x92, 0xc2, 0x66, 0x28, 0x8c, 0x6d, 0xc9, 0x51, 0xf5, + 0xa5, 0xab, 0x29, 0x0f, 0x5f, 0xbc, 0x81, 0xb7, 0xe1, 0x62, 0x3c, 0x4c, 0x8c, 0xa4, 0xf2, 0x73, + 0xbe, 0x81, 0x93, 0x22, 0xc9, 0x48, 0x72, 0xdf, 0x0d, 0x83, 0x81, 0x12, 0x50, 0x46, 0x12, 0x69, + 0x80, 0x9e, 0x14, 0x5f, 0xfe, 0x13, 0xfb, 0x35, 0x08, 0x37, 0x23, 0x09, 0xf3, 0x42, 0x61, 0xa3, + 0x2f, 0x7f, 0x18, 0x23, 0xb2, 0xa7, 0xc6, 0x08, 0xe1, 0x24, 0x61, 0x14, 0xfb, 0x06, 0x36, 0x9d, + 0xc0, 0x08, 0x03, 0xe8, 0xa8, 0x18, 0x34, 0x87, 0x04, 0x18, 0xac, 0x21, 0x37, 0xb6, 0x1a, 0x76, + 0x47, 0x5a, 0x8c, 0xf7, 0xc2, 0xd8, 0x39, 0x14, 0x99, 0x47, 0x12, 0xfc, 0x3e, 0xcc, 0xa7, 0x07, + 0xe5, 0x51, 0x24, 0xdf, 0xc3, 0x50, 0x0c, 0x0e, 0x94, 0xca, 0x73, 0x7b, 0x09, 0x0a, 0xdb, 0x3b, + 0xbb, 0x4f, 0x57, 0x56, 0x1b, 0x35, 0x6d, 0xf9, 0x9f, 0x59, 0xc8, 0x6c, 0x3e, 0x47, 0xff, 0x0f, + 0x39, 0xfe, 0xde, 0x74, 0xca, 0x23, 0xa3, 0x7e, 0xda, 0x93, 0x1a, 0xbe, 0xf2, 0xc9, 0x5f, 0xff, + 0xf1, 0x79, 0xe6, 0x22, 0x9e, 0x5a, 0x1a, 0xbc, 0x6e, 0x76, 0x7a, 0x87, 0xe6, 0xd2, 0xd1, 0x60, + 0x89, 0xe5, 0x84, 0x87, 0xda, 0x3d, 0xf4, 0x1c, 0xb2, 0x4f, 0xfb, 0x3e, 0x4a, 0x7d, 0x81, 0xd4, + 0xd3, 0x9f, 0xda, 0xb0, 0xce, 0x24, 0xcf, 0xe0, 0x49, 0x55, 0x72, 0xaf, 0xef, 0x53, 0xb9, 0x03, + 0x28, 0xa9, 0xaf, 0x65, 0x67, 0xbe, 0x4d, 0xea, 0x67, 0xbf, 0xc4, 0x61, 0xcc, 0xf0, 0xae, 0xe0, + 0x4b, 0x2a, 0x1e, 0x7f, 0xd4, 0x53, 0xe7, 0xb3, 0x77, 0x6c, 0xa3, 0xd4, 0xe7, 0x4b, 0x3d, 0xfd, + 0x85, 0x2e, 0x79, 0x3e, 0xfe, 0xb1, 0x4d, 0xe5, 0x3a, 0xe2, 0x85, 0xae, 0xe5, 0xa3, 0x6b, 0x09, + 0x8f, 0x38, 0xea, 0x73, 0x85, 0x3e, 0x9f, 0xce, 0x20, 0x90, 0xae, 0x33, 0xa4, 0xcb, 0xf8, 0xa2, + 0x8a, 0xd4, 0x0a, 0xf8, 0x1e, 0x6a, 0xf7, 0x96, 0x0f, 0x21, 0xc7, 0x8a, 0xac, 0xa8, 0x29, 0x3f, + 0xf4, 0x84, 0xf2, 0x70, 0xca, 0x0e, 0x88, 0x94, 0x67, 0xf1, 0x2c, 0x43, 0x9b, 0xc6, 0xd5, 0x00, + 0x8d, 0xd5, 0x59, 0x1f, 0x6a, 0xf7, 0x16, 0xb4, 0x57, 0xb5, 0xe5, 0xef, 0x8f, 0x43, 0x8e, 0xd5, + 0xad, 0x50, 0x0f, 0x20, 0xac, 0x48, 0xc6, 0xe7, 0x39, 0x54, 0xe3, 0x8c, 0xcf, 0x73, 0xb8, 0x98, + 0x89, 0xaf, 0x31, 0xe4, 0x59, 0x3c, 0x13, 0x20, 0xb3, 0x1f, 0x33, 0x2c, 0xb1, 0x0a, 0x15, 0x35, + 0xeb, 0x4b, 0x28, 0x29, 0x95, 0x45, 0x94, 0x24, 0x31, 0x52, 0x9a, 0x8c, 0x6f, 0x93, 0x84, 0xb2, + 0x24, 0xbe, 0xc1, 0x40, 0xaf, 0xe2, 0xba, 0x6a, 0x5c, 0x8e, 0xeb, 0x32, 0x4e, 0x0a, 0xfc, 0xa9, + 0x06, 0xd5, 0x68, 0x75, 0x11, 0xdd, 0x48, 0x10, 0x1d, 0x2f, 0x52, 0xea, 0x37, 0x4f, 0x67, 0x4a, + 0x55, 0x81, 0xe3, 0x1f, 0x11, 0xd2, 0x33, 0x29, 0xa7, 0xb0, 0x3d, 0xfa, 0x81, 0x06, 0x93, 0xb1, + 0x9a, 0x21, 0x4a, 0x82, 0x18, 0xaa, 0x48, 0xea, 0xb7, 0xce, 0xe0, 0x12, 0x9a, 0xdc, 0x61, 0x9a, + 0x5c, 0xc7, 0x57, 0x86, 0x8d, 0xe1, 0x5b, 0x5d, 0xe2, 0x3b, 0x42, 0x9b, 0xe5, 0x7f, 0x65, 0xa1, + 0xb0, 0xca, 0x7f, 0x79, 0x86, 0x7c, 0x28, 0x06, 0x65, 0x38, 0x34, 0x97, 0x54, 0x12, 0x09, 0x8f, + 0xec, 0xfa, 0xb5, 0xd4, 0x7e, 0xa1, 0xc2, 0x6d, 0xa6, 0xc2, 0x3c, 0xbe, 0x1c, 0xa8, 0x20, 0x7e, + 0xe1, 0xb6, 0xc4, 0x2f, 0xdf, 0x4b, 0x66, 0xbb, 0x4d, 0x97, 0xe4, 0x7b, 0x1a, 0x94, 0xd5, 0x6a, + 0x19, 0xba, 0x9e, 0x58, 0x8c, 0x51, 0x0b, 0x6e, 0x3a, 0x3e, 0x8d, 0x45, 0xe0, 0xdf, 0x65, 0xf8, + 0x37, 0xf0, 0x5c, 0x1a, 0xbe, 0xcb, 0xf8, 0xa3, 0x2a, 0xf0, 0x7a, 0x57, 0xb2, 0x0a, 0x91, 0x72, + 0x5a, 0xb2, 0x0a, 0xd1, 0x72, 0xd9, 0xd9, 0x2a, 0xf4, 0x19, 0x3f, 0x55, 0xe1, 0x18, 0x20, 0x2c, + 0x6f, 0xa1, 0x44, 0xe3, 0x2a, 0x97, 0x98, 0xb8, 0x0f, 0x0e, 0x57, 0xc6, 0x12, 0x76, 0x40, 0x0c, + 0xbb, 0x63, 0x79, 0xd4, 0x17, 0x97, 0x7f, 0x9f, 0x83, 0xd2, 0x13, 0xd3, 0xb2, 0x7d, 0x62, 0x9b, + 0x76, 0x8b, 0xa0, 0x03, 0xc8, 0xb1, 0x2c, 0x15, 0x0f, 0x3c, 0x6a, 0xd9, 0x27, 0x1e, 0x78, 0x22, + 0x35, 0x11, 0x7c, 0x8b, 0x41, 0x5f, 0xc3, 0x7a, 0x00, 0xdd, 0x0d, 0xe5, 0x2f, 0xb1, 0x7a, 0x06, + 0x9d, 0xf2, 0x11, 0xe4, 0x79, 0xfd, 0x02, 0xc5, 0xa4, 0x45, 0xea, 0x1c, 0xfa, 0x95, 0xe4, 0xce, + 0xd4, 0x5d, 0xa6, 0x62, 0x79, 0x8c, 0x99, 0x82, 0x7d, 0x07, 0x20, 0xac, 0xd6, 0xc5, 0xed, 0x3b, + 0x54, 0xdc, 0xd3, 0xe7, 0xd3, 0x19, 0x04, 0xf0, 0x3d, 0x06, 0x7c, 0x13, 0x5f, 0x4b, 0x04, 0x6e, + 0x07, 0x03, 0x28, 0x78, 0x0b, 0xc6, 0xd7, 0x4d, 0xef, 0x10, 0xc5, 0x92, 0x90, 0xf2, 0xb0, 0xac, + 0xeb, 0x49, 0x5d, 0x02, 0xea, 0x26, 0x83, 0x9a, 0xc3, 0xb3, 0x89, 0x50, 0x87, 0xa6, 0x47, 0x63, + 0x3a, 0xea, 0xc3, 0x84, 0x7c, 0x2c, 0x46, 0x57, 0x63, 0x36, 0x8b, 0x3e, 0x2c, 0xeb, 0x73, 0x69, + 0xdd, 0x02, 0x70, 0x81, 0x01, 0x62, 0x7c, 0x35, 0xd9, 0xa8, 0x82, 0xfd, 0xa1, 0x76, 0xef, 0x55, + 0x8d, 0x46, 0x54, 0x08, 0x4b, 0x91, 0x43, 0x3b, 0x37, 0x5e, 0xd5, 0x1c, 0xda, 0xb9, 0x43, 0x55, + 0x4c, 0xfc, 0x3a, 0x43, 0x7f, 0x80, 0x17, 0x12, 0xd1, 0x7d, 0xd7, 0xb4, 0xbd, 0x17, 0xc4, 0x7d, + 0xc0, 0x6b, 0x4e, 0xde, 0xa1, 0xd5, 0xa3, 0xbb, 0xf8, 0x47, 0x35, 0x18, 0xa7, 0xc7, 0x36, 0x9a, + 0xcc, 0xc2, 0xdb, 0x6e, 0x5c, 0x9d, 0xa1, 0x1a, 0x53, 0x5c, 0x9d, 0xe1, 0x8b, 0x72, 0x42, 0x32, + 0x63, 0x3f, 0x03, 0x26, 0x8c, 0x8b, 0x1a, 0xde, 0x87, 0x92, 0x72, 0x27, 0x46, 0x09, 0x12, 0xa3, + 0x15, 0xac, 0x78, 0x32, 0x4b, 0xb8, 0x50, 0xe3, 0x79, 0x06, 0xaa, 0xe3, 0x0b, 0x51, 0xd0, 0x36, + 0x67, 0xa3, 0xa8, 0x1f, 0x43, 0x59, 0xbd, 0x3c, 0xa3, 0x04, 0xa1, 0xb1, 0x12, 0x59, 0x3c, 0x64, + 0x25, 0xdd, 0xbd, 0x13, 0x7c, 0x37, 0xf8, 0xd1, 0xb3, 0xe4, 0xa5, 0xe8, 0x1f, 0x42, 0x41, 0x5c, + 0xa9, 0x93, 0xe6, 0x1b, 0x2d, 0xaa, 0x25, 0xcd, 0x37, 0x76, 0x1f, 0x4f, 0x38, 0x19, 0x31, 0x58, + 0x7a, 0x75, 0x90, 0x79, 0x42, 0x40, 0x3e, 0x26, 0x7e, 0x1a, 0x64, 0x58, 0x26, 0x4a, 0x83, 0x54, + 0xae, 0x6d, 0xa7, 0x42, 0x1e, 0x10, 0x5f, 0xb8, 0x94, 0xbc, 0x13, 0xa1, 0x14, 0x89, 0x6a, 0x50, + 0xc6, 0xa7, 0xb1, 0xa4, 0x1e, 0x66, 0x43, 0x54, 0x11, 0x91, 0xd1, 0x77, 0x01, 0xc2, 0xfb, 0x7f, + 0xfc, 0x7c, 0x92, 0x58, 0x44, 0x8c, 0x9f, 0x4f, 0x92, 0x4b, 0x08, 0x09, 0x81, 0x24, 0x04, 0xe7, + 0x07, 0x6a, 0x0a, 0xff, 0x53, 0x0d, 0xd0, 0x70, 0xbd, 0x00, 0xdd, 0x4f, 0x86, 0x48, 0xac, 0x4f, + 0xea, 0xaf, 0x9c, 0x8f, 0x39, 0x35, 0x88, 0x87, 0x7a, 0xb5, 0xd8, 0x90, 0xde, 0x4b, 0xaa, 0xd9, + 0x67, 0x1a, 0x54, 0x22, 0x15, 0x07, 0x74, 0x3b, 0x65, 0x9d, 0x63, 0x35, 0x4e, 0xfd, 0xce, 0x99, + 0x7c, 0xa9, 0x47, 0x38, 0x65, 0x57, 0xc8, 0xe3, 0xeb, 0x0f, 0x35, 0xa8, 0x46, 0xcb, 0x14, 0x28, + 0x05, 0x60, 0xa8, 0x50, 0xaa, 0x2f, 0x9c, 0xcd, 0x78, 0x8e, 0xd5, 0x0a, 0x4f, 0xb4, 0x1f, 0x42, + 0x41, 0x54, 0x37, 0x92, 0xdc, 0x22, 0x5a, 0x67, 0x4d, 0x72, 0x8b, 0x58, 0x69, 0x24, 0xcd, 0x2d, + 0x5c, 0xa7, 0x43, 0x14, 0x4f, 0x14, 0x35, 0x90, 0x34, 0xc8, 0xd3, 0x3d, 0x31, 0x56, 0x40, 0x39, + 0x15, 0x32, 0xf4, 0x44, 0x59, 0x01, 0x41, 0x29, 0x12, 0xcf, 0xf0, 0xc4, 0x78, 0x01, 0x25, 0xcd, + 0x13, 0x19, 0xaa, 0xe2, 0x89, 0x61, 0xc1, 0x22, 0xc9, 0x13, 0x87, 0xaa, 0xc8, 0x49, 0x9e, 0x38, + 0x5c, 0xf3, 0x48, 0x5b, 0x5b, 0x06, 0x1e, 0xf1, 0xc4, 0xe9, 0x84, 0x02, 0x07, 0x7a, 0x25, 0xc5, + 0xa6, 0x89, 0x15, 0x6a, 0xfd, 0xc1, 0x39, 0xb9, 0x4f, 0xf7, 0x00, 0xbe, 0x1a, 0xd2, 0x03, 0x7e, + 0xa9, 0xc1, 0x4c, 0x52, 0x85, 0x04, 0xa5, 0x80, 0xa5, 0x94, 0xb7, 0xf5, 0xc5, 0xf3, 0xb2, 0x9f, + 0xc3, 0x6e, 0x81, 0x4f, 0x3c, 0xaa, 0xfd, 0xf1, 0xcb, 0x39, 0xed, 0x2f, 0x5f, 0xce, 0x69, 0x7f, + 0xfb, 0x72, 0x4e, 0xfb, 0xd9, 0xdf, 0xe7, 0xc6, 0xf6, 0xf3, 0xec, 0xff, 0xe2, 0xbc, 0xfe, 0xef, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x2a, 0x70, 0xa1, 0x12, 0x34, 0x00, 0x00, } diff --git a/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto b/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto index 26d8fa8ada..37916c03f1 100644 --- a/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto +++ b/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto @@ -191,6 +191,14 @@ service Maintenance { body: "*" }; } + + // MoveLeader requests current leader node to transfer its leadership to transferee. + rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) { + option (google.api.http) = { + post: "/v3alpha/maintenance/transfer-leadership" + body: "*" + }; + } } service Auth { @@ -380,7 +388,7 @@ message RangeRequest { // keys_only when set returns only the keys and not the values. bool keys_only = 8; - + // count_only when set returns only the count of the keys in the range. bool count_only = 9; @@ -469,6 +477,7 @@ message RequestOp { RangeRequest request_range = 1; PutRequest request_put = 2; DeleteRangeRequest request_delete_range = 3; + TxnRequest request_txn = 4; } } @@ -478,6 +487,7 @@ message ResponseOp { RangeResponse response_range = 1; PutResponse response_put = 2; DeleteRangeResponse response_delete_range = 3; + TxnResponse response_txn = 4; } } @@ -510,6 +520,10 @@ message Compare { // value is the value of the given key, in bytes. bytes value = 7; } + // range_end compares the given target to all keys in the range [key, range_end). + // See RangeRequest for more details on key ranges. + bytes range_end = 8; + // TODO: fill out with most of the rest of RangeRequest fields when needed. } // From google paxosdb paper: @@ -552,7 +566,7 @@ message TxnResponse { // CompactionRequest compacts the key-value store up to a given revision. All superseded keys // with a revision less than the compaction revision will be removed. message CompactionRequest { - // revision is the key-value store revision for the compaction operation. + // revision is the key-value store revision for the compaction operation. int64 revision = 1; // physical is set so the RPC will wait until the compaction is physically // applied to the local database such that compacted entries are totally @@ -648,12 +662,15 @@ message WatchResponse { // at a compacted index. // // This happens when creating a watcher at a compacted revision or the watcher cannot - // catch up with the progress of the key-value store. + // catch up with the progress of the key-value store. // // The client should treat the watcher as canceled and should not try to create any // watcher with the same start_revision again. int64 compact_revision = 5; + // cancel_reason indicates the reason for canceling the watcher. + string cancel_reason = 6; + repeated mvccpb.Event events = 11; } @@ -778,6 +795,15 @@ message DefragmentResponse { ResponseHeader header = 1; } +message MoveLeaderRequest { + // targetID is the node ID for the new leader. + uint64 targetID = 1; +} + +message MoveLeaderResponse { + ResponseHeader header = 1; +} + enum AlarmType { NONE = 0; // default, used to query if any alarm is active NOSPACE = 1; // space quota is exhausted diff --git a/github.com/coreos/etcd/etcdserver/membership/doc.go b/github.com/coreos/etcd/etcdserver/membership/doc.go new file mode 100644 index 0000000000..b07fb2d928 --- /dev/null +++ b/github.com/coreos/etcd/etcdserver/membership/doc.go @@ -0,0 +1,16 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package membership describes individual etcd members and clusters of members. +package membership diff --git a/github.com/coreos/etcd/etcdserver/metrics.go b/github.com/coreos/etcd/etcdserver/metrics.go index 2b549f738f..90bbd3632a 100644 --- a/github.com/coreos/etcd/etcdserver/metrics.go +++ b/github.com/coreos/etcd/etcdserver/metrics.go @@ -58,6 +58,12 @@ var ( Name: "proposals_failed_total", Help: "The total number of failed proposals seen.", }) + leaseExpired = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "server", + Name: "lease_expired_total", + Help: "The total number of expired leases.", + }) ) func init() { @@ -67,6 +73,7 @@ func init() { prometheus.MustRegister(proposalsApplied) prometheus.MustRegister(proposalsPending) prometheus.MustRegister(proposalsFailed) + prometheus.MustRegister(leaseExpired) } func monitorFileDescriptor(done <-chan struct{}) { diff --git a/github.com/coreos/etcd/etcdserver/raft.go b/github.com/coreos/etcd/etcdserver/raft.go index a9825d0a54..8e9070149b 100644 --- a/github.com/coreos/etcd/etcdserver/raft.go +++ b/github.com/coreos/etcd/etcdserver/raft.go @@ -83,7 +83,8 @@ type RaftTimer interface { type apply struct { entries []raftpb.Entry snapshot raftpb.Snapshot - raftDone <-chan struct{} // rx {} after raft has persisted messages + // notifyc synchronizes etcd server applies with the raft node + notifyc chan struct{} } type raftNode struct { @@ -94,14 +95,7 @@ type raftNode struct { term uint64 lead uint64 - mu sync.Mutex - // last lead elected time - lt time.Time - - // to check if msg receiver is removed from cluster - isIDRemoved func(id uint64) bool - - raft.Node + raftNodeConfig // a chan to send/receive snapshot msgSnapC chan raftpb.Message @@ -115,26 +109,49 @@ type raftNode struct { // utility ticker *time.Ticker // contention detectors for raft heartbeat message - td *contention.TimeoutDetector - heartbeat time.Duration // for logging + td *contention.TimeoutDetector + + stopped chan struct{} + done chan struct{} +} + +type raftNodeConfig struct { + // to check if msg receiver is removed from cluster + isIDRemoved func(id uint64) bool + raft.Node raftStorage *raft.MemoryStorage storage Storage + heartbeat time.Duration // for logging // transport specifies the transport to send and receive msgs to members. // Sending messages MUST NOT block. It is okay to drop messages, since // clients should timeout and reissue their messages. // If transport is nil, server will panic. transport rafthttp.Transporter +} - stopped chan struct{} - done chan struct{} +func newRaftNode(cfg raftNodeConfig) *raftNode { + r := &raftNode{ + raftNodeConfig: cfg, + // set up contention detectors for raft heartbeat message. + // expect to send a heartbeat within 2 heartbeat intervals. + td: contention.NewTimeoutDetector(2 * cfg.heartbeat), + readStateC: make(chan raft.ReadState, 1), + msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap), + applyc: make(chan apply), + stopped: make(chan struct{}), + done: make(chan struct{}), + } + if r.heartbeat == 0 { + r.ticker = &time.Ticker{} + } else { + r.ticker = time.NewTicker(r.heartbeat) + } + return r } // start prepares and starts raftNode in a new goroutine. It is no longer safe // to modify the fields after it has been started. func (r *raftNode) start(rh *raftReadyHandler) { - r.applyc = make(chan apply) - r.stopped = make(chan struct{}) - r.done = make(chan struct{}) internalTimeout := time.Second go func() { @@ -147,10 +164,8 @@ func (r *raftNode) start(rh *raftReadyHandler) { r.Tick() case rd := <-r.Ready(): if rd.SoftState != nil { - if lead := atomic.LoadUint64(&r.lead); rd.SoftState.Lead != raft.None && lead != rd.SoftState.Lead { - r.mu.Lock() - r.lt = time.Now() - r.mu.Unlock() + newLeader := rd.SoftState.Lead != raft.None && atomic.LoadUint64(&r.lead) != rd.SoftState.Lead + if newLeader { leaderChanges.Inc() } @@ -162,7 +177,8 @@ func (r *raftNode) start(rh *raftReadyHandler) { atomic.StoreUint64(&r.lead, rd.SoftState.Lead) islead = rd.RaftState == raft.StateLeader - rh.updateLeadership() + rh.updateLeadership(newLeader) + r.td.Reset() } if len(rd.ReadStates) != 0 { @@ -175,11 +191,11 @@ func (r *raftNode) start(rh *raftReadyHandler) { } } - raftDone := make(chan struct{}, 1) + notifyc := make(chan struct{}, 1) ap := apply{ entries: rd.CommittedEntries, snapshot: rd.Snapshot, - raftDone: raftDone, + notifyc: notifyc, } updateCommittedIndex(&ap, rh) @@ -212,6 +228,9 @@ func (r *raftNode) start(rh *raftReadyHandler) { if err := r.storage.SaveSnap(rd.Snapshot); err != nil { plog.Fatalf("raft save snapshot error: %v", err) } + // etcdserver now claim the snapshot has been persisted onto the disk + notifyc <- struct{}{} + // gofail: var raftAfterSaveSnap struct{} r.raftStorage.ApplySnapshot(rd.Snapshot) plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index) @@ -225,7 +244,7 @@ func (r *raftNode) start(rh *raftReadyHandler) { msgs := r.processMessages(rd.Messages) // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots - raftDone <- struct{}{} + notifyc <- struct{}{} // Candidate or follower needs to wait for all pending configuration // changes to be applied before sending messages. @@ -242,14 +261,21 @@ func (r *raftNode) start(rh *raftReadyHandler) { } } if waitApply { - rh.waitForApply() + // blocks until 'applyAll' calls 'applyWait.Trigger' + // to be in sync with scheduled config-change job + // (assume notifyc has cap of 1) + select { + case notifyc <- struct{}{}: + case <-r.stopped: + return + } } // gofail: var raftBeforeFollowerSend struct{} r.transport.Send(msgs) } else { // leader already processed 'MsgSnap' and signaled - raftDone <- struct{}{} + notifyc <- struct{}{} } r.Advance() @@ -316,12 +342,6 @@ func (r *raftNode) apply() chan apply { return r.applyc } -func (r *raftNode) leadElectedTime() time.Time { - r.mu.Lock() - defer r.mu.Unlock() - return r.lt -} - func (r *raftNode) stop() { r.stopped <- struct{}{} <-r.done @@ -358,7 +378,7 @@ func advanceTicksForElection(n raft.Node, electionTicks int) { } } -func startNode(cfg *ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) { +func startNode(cfg ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) { var err error member := cl.MemberByName(cfg.Name) metadata := pbutil.MustMarshal( @@ -399,7 +419,7 @@ func startNode(cfg *ServerConfig, cl *membership.RaftCluster, ids []types.ID) (i return } -func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) { +func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) { var walsnap walpb.Snapshot if snapshot != nil { walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term @@ -433,7 +453,7 @@ func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membe return id, cl, n, s, w } -func restartAsStandaloneNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) { +func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) { var walsnap walpb.Snapshot if snapshot != nil { walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term diff --git a/github.com/coreos/etcd/etcdserver/raft_test.go b/github.com/coreos/etcd/etcdserver/raft_test.go index 945f63ce2c..757826cc9e 100644 --- a/github.com/coreos/etcd/etcdserver/raft_test.go +++ b/github.com/coreos/etcd/etcdserver/raft_test.go @@ -153,13 +153,13 @@ func TestCreateConfigChangeEnts(t *testing.T) { func TestStopRaftWhenWaitingForApplyDone(t *testing.T) { n := newNopReadyNode() - srv := &EtcdServer{r: raftNode{ + r := newRaftNode(raftNodeConfig{ Node: n, storage: mockstorage.NewStorageRecorder(""), raftStorage: raft.NewMemoryStorage(), transport: rafthttp.NewNopTransporter(), - ticker: &time.Ticker{}, - }} + }) + srv := &EtcdServer{r: *r} srv.r.start(nil) n.readyc <- raft.Ready{} select { @@ -180,31 +180,22 @@ func TestStopRaftWhenWaitingForApplyDone(t *testing.T) { func TestConfgChangeBlocksApply(t *testing.T) { n := newNopReadyNode() - waitApplyc := make(chan struct{}) - - srv := &EtcdServer{r: raftNode{ + r := newRaftNode(raftNodeConfig{ Node: n, storage: mockstorage.NewStorageRecorder(""), raftStorage: raft.NewMemoryStorage(), transport: rafthttp.NewNopTransporter(), - ticker: &time.Ticker{}, - }} - - rh := &raftReadyHandler{ - updateLeadership: func() {}, - waitForApply: func() { - <-waitApplyc - }, - } + }) + srv := &EtcdServer{r: *r} - srv.r.start(rh) + srv.r.start(&raftReadyHandler{updateLeadership: func(bool) {}}) defer srv.r.Stop() n.readyc <- raft.Ready{ SoftState: &raft.SoftState{RaftState: raft.StateFollower}, CommittedEntries: []raftpb.Entry{{Type: raftpb.EntryConfChange}}, } - <-srv.r.applyc + ap := <-srv.r.applyc continueC := make(chan struct{}) go func() { @@ -220,7 +211,7 @@ func TestConfgChangeBlocksApply(t *testing.T) { } // finish apply, unblock raft routine - close(waitApplyc) + <-ap.notifyc select { case <-continueC: diff --git a/github.com/coreos/etcd/etcdserver/server.go b/github.com/coreos/etcd/etcdserver/server.go index cd95339454..fa38f8e08a 100644 --- a/github.com/coreos/etcd/etcdserver/server.go +++ b/github.com/coreos/etcd/etcdserver/server.go @@ -23,7 +23,6 @@ import ( "net/http" "os" "path" - "path/filepath" "regexp" "sync" "sync/atomic" @@ -41,7 +40,6 @@ import ( "github.com/coreos/etcd/lease" "github.com/coreos/etcd/mvcc" "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/pkg/contention" "github.com/coreos/etcd/pkg/fileutil" "github.com/coreos/etcd/pkg/idutil" "github.com/coreos/etcd/pkg/pbutil" @@ -77,7 +75,6 @@ const ( // (since it will timeout). monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second - databaseFilename = "db" // max number of in-flight snapshot messages etcdserver allows to have // This number is more than enough for most clusters with 5 machines. maxInFlightMsgSnap = 16 @@ -86,6 +83,8 @@ const ( // maxPendingRevokes is the maximum number of outstanding expired lease revocations. maxPendingRevokes = 16 + + recommendedMaxRequestBytes = 10 * 1024 * 1024 ) var ( @@ -171,12 +170,10 @@ type EtcdServer struct { // consistIndex used to hold the offset of current executing entry // It is initialized to 0 before executing any entry. consistIndex consistentIndex // must use atomic operations to access; keep 64-bit aligned. - Cfg *ServerConfig + r raftNode // uses 64-bit atomics; keep 64-bit aligned. readych chan struct{} - r raftNode - - snapCount uint64 + Cfg ServerConfig w wait.Wait @@ -201,7 +198,8 @@ type EtcdServer struct { cluster *membership.RaftCluster - store store.Store + store store.Store + snapshotter *snap.Snapshotter applyV2 ApplierV2 @@ -223,7 +221,7 @@ type EtcdServer struct { SyncTicker *time.Ticker // compactor is used to auto-compact the KV. - compactor *compactor.Periodic + compactor compactor.Compactor // peerRt used to send requests (version, lease) to peers. peerRt http.RoundTripper @@ -243,11 +241,14 @@ type EtcdServer struct { // on etcd server shutdown. ctx context.Context cancel context.CancelFunc + + leadTimeMu sync.RWMutex + leadElectedTime time.Time } // NewServer creates a new EtcdServer from the supplied configuration. The // configuration is considered static for the lifetime of the EtcdServer. -func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { +func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) { st := store.New(StoreClusterPrefix, StoreKeysPrefix) var ( @@ -258,6 +259,10 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { cl *membership.RaftCluster ) + if cfg.MaxRequestBytes > recommendedMaxRequestBytes { + plog.Warningf("MaxRequestBytes %v exceeds maximum recommended size %v", cfg.MaxRequestBytes, recommendedMaxRequestBytes) + } + if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil { return nil, fmt.Errorf("cannot access data directory: %v", terr) } @@ -269,23 +274,9 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { } ss := snap.New(cfg.SnapDir()) - bepath := filepath.Join(cfg.SnapDir(), databaseFilename) + bepath := cfg.backendPath() beExist := fileutil.Exist(bepath) - - var be backend.Backend - beOpened := make(chan struct{}) - go func() { - be = newBackend(bepath, cfg.QuotaBackendBytes) - beOpened <- struct{}{} - }() - - select { - case <-beOpened: - case <-time.After(time.Second): - plog.Warningf("another etcd process is running with the same data dir and holding the file lock.") - plog.Warningf("waiting for it to exit before starting...") - <-beOpened - } + be := openBackend(cfg) defer func() { if err != nil { @@ -383,6 +374,9 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { plog.Panicf("recovered store from snapshot error: %v", err) } plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index) + if be, err = recoverSnapshotBackend(cfg, be, *snapshot); err != nil { + plog.Panicf("recovering backend from snapshot error: %v", err) + } } cfg.Print() if !cfg.ForceNewCluster { @@ -405,33 +399,25 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { return nil, fmt.Errorf("cannot access member directory: %v", terr) } - sstats := &stats.ServerStats{ - Name: cfg.Name, - ID: id.String(), - } - sstats.Initialize() + sstats := stats.NewServerStats(cfg.Name, id.String()) lstats := stats.NewLeaderStats(id.String()) heartbeat := time.Duration(cfg.TickMs) * time.Millisecond srv = &EtcdServer{ - readych: make(chan struct{}), - Cfg: cfg, - snapCount: cfg.SnapCount, - errorc: make(chan error, 1), - store: st, - r: raftNode{ - isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, - Node: n, - ticker: time.NewTicker(heartbeat), - // set up contention detectors for raft heartbeat message. - // expect to send a heartbeat within 2 heartbeat intervals. - td: contention.NewTimeoutDetector(2 * heartbeat), - heartbeat: heartbeat, - raftStorage: s, - storage: NewStorage(w, ss), - msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap), - readStateC: make(chan raft.ReadState, 1), - }, + readych: make(chan struct{}), + Cfg: cfg, + errorc: make(chan error, 1), + store: st, + snapshotter: ss, + r: *newRaftNode( + raftNodeConfig{ + isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, + Node: n, + heartbeat: heartbeat, + raftStorage: s, + storage: NewStorage(w, ss), + }, + ), id: id, attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()}, cluster: cl, @@ -463,6 +449,15 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index) } } + newSrv := srv // since srv == nil in defer if srv is returned as nil + defer func() { + // closing backend without first closing kv can cause + // resumed compactions to fail with closed tx errors + if err != nil { + newSrv.kv.Close() + } + }() + srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex()) tp, err := auth.NewTokenProvider(cfg.AuthToken, func(index uint64) <-chan struct{} { @@ -474,12 +469,15 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { return nil, err } srv.authStore = auth.NewAuthStore(srv.be, tp) - if h := cfg.AutoCompactionRetention; h != 0 { - srv.compactor = compactor.NewPeriodic(h, srv.kv, srv) + if num := cfg.AutoCompactionRetention; num != 0 { + srv.compactor, err = compactor.New(cfg.AutoCompactionMode, num, srv.kv, srv) + if err != nil { + return nil, err + } srv.compactor.Run() } - srv.applyV3Base = &applierV3backend{srv} + srv.applyV3Base = srv.newApplierV3Backend() if err = srv.restoreAlarms(); err != nil { return nil, err } @@ -532,9 +530,9 @@ func (s *EtcdServer) Start() { // modify a server's fields after it has been sent to Start. // This function is just used for testing. func (s *EtcdServer) start() { - if s.snapCount == 0 { + if s.Cfg.SnapCount == 0 { plog.Infof("set snapshot count to default %d", DefaultSnapCount) - s.snapCount = DefaultSnapCount + s.Cfg.SnapCount = DefaultSnapCount } s.w = wait.New() s.applyWait = wait.NewTimeList() @@ -555,18 +553,21 @@ func (s *EtcdServer) start() { } func (s *EtcdServer) purgeFile() { - var serrc, werrc <-chan error + var dberrc, serrc, werrc <-chan error if s.Cfg.MaxSnapFiles > 0 { + dberrc = fileutil.PurgeFile(s.Cfg.SnapDir(), "snap.db", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done) serrc = fileutil.PurgeFile(s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done) } if s.Cfg.MaxWALFiles > 0 { werrc = fileutil.PurgeFile(s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.done) } select { - case e := <-werrc: - plog.Fatalf("failed to purge wal file %v", e) + case e := <-dberrc: + plog.Fatalf("failed to purge snap db file %v", e) case e := <-serrc: plog.Fatalf("failed to purge snap file %v", e) + case e := <-werrc: + plog.Fatalf("failed to purge wal file %v", e) case <-s.stopping: return } @@ -614,9 +615,8 @@ type etcdProgress struct { // and helps decouple state machine logic from Raft algorithms. // TODO: add a state machine interface to apply the commit entries and do snapshot/recover type raftReadyHandler struct { - updateLeadership func() + updateLeadership func(newLeader bool) updateCommittedIndex func(uint64) - waitForApply func() } func (s *EtcdServer) run() { @@ -644,7 +644,7 @@ func (s *EtcdServer) run() { return } rh := &raftReadyHandler{ - updateLeadership: func() { + updateLeadership: func(newLeader bool) { if !s.isLeader() { if s.lessor != nil { s.lessor.Demote() @@ -654,6 +654,12 @@ func (s *EtcdServer) run() { } setSyncC(nil) } else { + if newLeader { + t := time.Now() + s.leadTimeMu.Lock() + s.leadElectedTime = t + s.leadTimeMu.Unlock() + } setSyncC(s.SyncTicker.C) if s.compactor != nil { s.compactor.Resume() @@ -665,9 +671,6 @@ func (s *EtcdServer) run() { if s.stats != nil { s.stats.BecomeLeader() } - if s.r.td != nil { - s.r.td.Reset() - } }, updateCommittedIndex: func(ci uint64) { cci := s.getCommittedIndex() @@ -675,9 +678,6 @@ func (s *EtcdServer) run() { s.setCommittedIndex(ci) } }, - waitForApply: func() { - sched.WaitFinish(0) - }, } s.r.start(rh) @@ -747,7 +747,9 @@ func (s *EtcdServer) run() { } lid := lease.ID s.goAttach(func() { - s.LeaseRevoke(s.ctx, &pb.LeaseRevokeRequest{ID: int64(lid)}) + ctx := s.authStore.WithRoot(s.ctx) + s.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: int64(lid)}) + leaseExpired.Inc() <-c }) } @@ -781,7 +783,7 @@ func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) { // wait for the raft routine to finish the disk writes before triggering a // snapshot. or applied index might be greater than the last index in raft // storage, since the raft routine might be slower than apply routine. - <-apply.raftDone + <-apply.notifyc s.triggerSnapshot(ep) select { @@ -806,18 +808,14 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { apply.snapshot.Metadata.Index, ep.appliedi) } - snapfn, err := s.r.storage.DBFilePath(apply.snapshot.Metadata.Index) - if err != nil { - plog.Panicf("get database snapshot file path error: %v", err) - } + // wait for raftNode to persist snapshot onto the disk + <-apply.notifyc - fn := filepath.Join(s.Cfg.SnapDir(), databaseFilename) - if err := os.Rename(snapfn, fn); err != nil { - plog.Panicf("rename snapshot file error: %v", err) + newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot) + if err != nil { + plog.Panic(err) } - newbe := newBackend(fn, s.Cfg.QuotaBackendBytes) - // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. if s.lessor != nil { @@ -917,7 +915,7 @@ func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) { } func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) { - if ep.appliedi-ep.snapi <= s.snapCount { + if ep.appliedi-ep.snapi <= s.Cfg.SnapCount { return } @@ -934,9 +932,8 @@ func (s *EtcdServer) isLeader() bool { return uint64(s.ID()) == s.Lead() } -// transferLeadership transfers the leader to the given transferee. -// TODO: maybe expose to client? -func (s *EtcdServer) transferLeadership(ctx context.Context, lead, transferee uint64) error { +// MoveLeader transfers the leader to the given transferee. +func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) error { now := time.Now() interval := time.Duration(s.Cfg.TickMs) * time.Millisecond @@ -975,7 +972,7 @@ func (s *EtcdServer) TransferLeadership() error { tm := s.Cfg.ReqTimeout() ctx, cancel := context.WithTimeout(s.ctx, tm) - err := s.transferLeadership(ctx, s.Lead(), uint64(transferee)) + err := s.MoveLeader(ctx, s.Lead(), uint64(transferee)) cancel() return err } @@ -1275,9 +1272,14 @@ func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appl case raftpb.EntryNormal: s.applyEntryNormal(&e) case raftpb.EntryConfChange: + // set the consistent index of current executing entry + if e.Index > s.consistIndex.ConsistentIndex() { + s.consistIndex.setConsistentIndex(e.Index) + } var cc raftpb.ConfChange pbutil.MustUnmarshal(&cc, e.Data) removedSelf, err := s.applyConfChange(cc, confState) + s.setAppliedIndex(e.Index) shouldStop = shouldStop || removedSelf s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err}) default: @@ -1580,7 +1582,9 @@ func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { case context.Canceled: return ErrCanceled case context.DeadlineExceeded: - curLeadElected := s.r.leadElectedTime() + s.leadTimeMu.RLock() + curLeadElected := s.leadElectedTime + s.leadTimeMu.RUnlock() prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond) if start.After(prevLeadLost) && start.Before(curLeadElected) { return ErrTimeoutDueToLeaderFail @@ -1664,12 +1668,6 @@ func (s *EtcdServer) goAttach(f func()) { }() } -func newBackend(path string, quotaBytes int64) backend.Backend { - bcfg := backend.DefaultBackendConfig() - bcfg.Path = path - if quotaBytes > 0 && quotaBytes != DefaultQuotaBytes { - // permit 10% excess over quota for disarm - bcfg.MmapSize = uint64(quotaBytes + quotaBytes/10) - } - return backend.New(bcfg) +func (s *EtcdServer) Alarms() []*pb.AlarmMember { + return s.alarmStore.Get(pb.AlarmType_NONE) } diff --git a/github.com/coreos/etcd/etcdserver/server_test.go b/github.com/coreos/etcd/etcdserver/server_test.go index 60aabc00e2..66c99247bb 100644 --- a/github.com/coreos/etcd/etcdserver/server_test.go +++ b/github.com/coreos/etcd/etcdserver/server_test.go @@ -20,6 +20,7 @@ import ( "io/ioutil" "os" "path" + "path/filepath" "reflect" "testing" "time" @@ -29,6 +30,7 @@ import ( "github.com/coreos/etcd/lease" "github.com/coreos/etcd/mvcc" "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/pkg/fileutil" "github.com/coreos/etcd/pkg/idutil" "github.com/coreos/etcd/pkg/mock/mockstorage" "github.com/coreos/etcd/pkg/mock/mockstore" @@ -40,6 +42,7 @@ import ( "github.com/coreos/etcd/raft" "github.com/coreos/etcd/raft/raftpb" "github.com/coreos/etcd/rafthttp" + "github.com/coreos/etcd/snap" "github.com/coreos/etcd/store" "golang.org/x/net/context" ) @@ -167,15 +170,14 @@ func TestApplyRepeat(t *testing.T) { st := store.New() cl.SetStore(store.New()) cl.AddMember(&membership.Member{ID: 1234}) + r := newRaftNode(raftNodeConfig{ + Node: n, + raftStorage: raft.NewMemoryStorage(), + storage: mockstorage.NewStorageRecorder(""), + transport: rafthttp.NewNopTransporter(), + }) s := &EtcdServer{ - r: raftNode{ - Node: n, - raftStorage: raft.NewMemoryStorage(), - storage: mockstorage.NewStorageRecorder(""), - transport: rafthttp.NewNopTransporter(), - ticker: &time.Ticker{}, - }, - Cfg: &ServerConfig{}, + r: *r, store: st, cluster: cl, reqIDGen: idutil.NewGenerator(0, time.Time{}), @@ -525,9 +527,8 @@ func TestApplyConfChangeError(t *testing.T) { for i, tt := range tests { n := newNodeRecorder() srv := &EtcdServer{ - r: raftNode{Node: n}, + r: *newRaftNode(raftNodeConfig{Node: n}), cluster: cl, - Cfg: &ServerConfig{}, } _, err := srv.applyConfChange(tt.cc, nil) if err != tt.werr { @@ -552,12 +553,13 @@ func TestApplyConfChangeShouldStop(t *testing.T) { for i := 1; i <= 3; i++ { cl.AddMember(&membership.Member{ID: types.ID(i)}) } + r := newRaftNode(raftNodeConfig{ + Node: newNodeNop(), + transport: rafthttp.NewNopTransporter(), + }) srv := &EtcdServer{ - id: 1, - r: raftNode{ - Node: newNodeNop(), - transport: rafthttp.NewNopTransporter(), - }, + id: 1, + r: *r, cluster: cl, } cc := raftpb.ConfChange{ @@ -584,6 +586,49 @@ func TestApplyConfChangeShouldStop(t *testing.T) { } } +// TestApplyConfigChangeUpdatesConsistIndex ensures a config change also updates the consistIndex +// where consistIndex equals to applied index. +func TestApplyConfigChangeUpdatesConsistIndex(t *testing.T) { + cl := membership.NewCluster("") + cl.SetStore(store.New()) + cl.AddMember(&membership.Member{ID: types.ID(1)}) + r := newRaftNode(raftNodeConfig{ + Node: newNodeNop(), + transport: rafthttp.NewNopTransporter(), + }) + srv := &EtcdServer{ + id: 1, + r: *r, + cluster: cl, + w: wait.New(), + } + + // create EntryConfChange entry + now := time.Now() + urls, err := types.NewURLs([]string{"http://whatever:123"}) + if err != nil { + t.Fatal(err) + } + m := membership.NewMember("", urls, "", &now) + m.ID = types.ID(2) + b, err := json.Marshal(m) + if err != nil { + t.Fatal(err) + } + cc := &raftpb.ConfChange{Type: raftpb.ConfChangeAddNode, NodeID: 2, Context: b} + ents := []raftpb.Entry{{ + Index: 2, + Type: raftpb.EntryConfChange, + Data: pbutil.MustMarshal(cc), + }} + + _, appliedi, _ := srv.apply(ents, &raftpb.ConfState{}) + consistIndex := srv.consistIndex.ConsistentIndex() + if consistIndex != appliedi { + t.Fatalf("consistIndex = %v, want %v", consistIndex, appliedi) + } +} + // TestApplyMultiConfChangeShouldStop ensures that apply will return shouldStop // if the local member is removed along with other conf updates. func TestApplyMultiConfChangeShouldStop(t *testing.T) { @@ -592,12 +637,13 @@ func TestApplyMultiConfChangeShouldStop(t *testing.T) { for i := 1; i <= 5; i++ { cl.AddMember(&membership.Member{ID: types.ID(i)}) } + r := newRaftNode(raftNodeConfig{ + Node: newNodeNop(), + transport: rafthttp.NewNopTransporter(), + }) srv := &EtcdServer{ - id: 2, - r: raftNode{ - Node: newNodeNop(), - transport: rafthttp.NewNopTransporter(), - }, + id: 2, + r: *r, cluster: cl, w: wait.New(), } @@ -630,15 +676,15 @@ func TestDoProposal(t *testing.T) { } for i, tt := range tests { st := mockstore.NewRecorder() + r := newRaftNode(raftNodeConfig{ + Node: newNodeCommitter(), + storage: mockstorage.NewStorageRecorder(""), + raftStorage: raft.NewMemoryStorage(), + transport: rafthttp.NewNopTransporter(), + }) srv := &EtcdServer{ - Cfg: &ServerConfig{TickMs: 1}, - r: raftNode{ - Node: newNodeCommitter(), - storage: mockstorage.NewStorageRecorder(""), - raftStorage: raft.NewMemoryStorage(), - transport: rafthttp.NewNopTransporter(), - ticker: &time.Ticker{}, - }, + Cfg: ServerConfig{TickMs: 1}, + r: *r, store: st, reqIDGen: idutil.NewGenerator(0, time.Time{}), SyncTicker: &time.Ticker{}, @@ -665,8 +711,8 @@ func TestDoProposal(t *testing.T) { func TestDoProposalCancelled(t *testing.T) { wt := mockwait.NewRecorder() srv := &EtcdServer{ - Cfg: &ServerConfig{TickMs: 1}, - r: raftNode{Node: newNodeNop()}, + Cfg: ServerConfig{TickMs: 1}, + r: *newRaftNode(raftNodeConfig{Node: newNodeNop()}), w: wt, reqIDGen: idutil.NewGenerator(0, time.Time{}), } @@ -687,8 +733,8 @@ func TestDoProposalCancelled(t *testing.T) { func TestDoProposalTimeout(t *testing.T) { srv := &EtcdServer{ - Cfg: &ServerConfig{TickMs: 1}, - r: raftNode{Node: newNodeNop()}, + Cfg: ServerConfig{TickMs: 1}, + r: *newRaftNode(raftNodeConfig{Node: newNodeNop()}), w: mockwait.NewNop(), reqIDGen: idutil.NewGenerator(0, time.Time{}), } @@ -703,8 +749,8 @@ func TestDoProposalTimeout(t *testing.T) { func TestDoProposalStopped(t *testing.T) { srv := &EtcdServer{ - Cfg: &ServerConfig{TickMs: 1}, - r: raftNode{Node: newNodeNop()}, + Cfg: ServerConfig{TickMs: 1}, + r: *newRaftNode(raftNodeConfig{Node: newNodeNop()}), w: mockwait.NewNop(), reqIDGen: idutil.NewGenerator(0, time.Time{}), } @@ -723,7 +769,7 @@ func TestSync(t *testing.T) { n := newNodeRecorder() ctx, cancel := context.WithCancel(context.TODO()) srv := &EtcdServer{ - r: raftNode{Node: n}, + r: *newRaftNode(raftNodeConfig{Node: n}), reqIDGen: idutil.NewGenerator(0, time.Time{}), ctx: ctx, cancel: cancel, @@ -766,7 +812,7 @@ func TestSyncTimeout(t *testing.T) { n := newProposalBlockerRecorder() ctx, cancel := context.WithCancel(context.TODO()) srv := &EtcdServer{ - r: raftNode{Node: n}, + r: *newRaftNode(raftNodeConfig{Node: n}), reqIDGen: idutil.NewGenerator(0, time.Time{}), ctx: ctx, cancel: cancel, @@ -799,15 +845,16 @@ func TestSyncTrigger(t *testing.T) { n := newReadyNode() st := make(chan time.Time, 1) tk := &time.Ticker{C: st} + r := newRaftNode(raftNodeConfig{ + Node: n, + raftStorage: raft.NewMemoryStorage(), + transport: rafthttp.NewNopTransporter(), + storage: mockstorage.NewStorageRecorder(""), + }) + srv := &EtcdServer{ - Cfg: &ServerConfig{TickMs: 1}, - r: raftNode{ - Node: n, - raftStorage: raft.NewMemoryStorage(), - transport: rafthttp.NewNopTransporter(), - storage: mockstorage.NewStorageRecorder(""), - ticker: &time.Ticker{}, - }, + Cfg: ServerConfig{TickMs: 1}, + r: *r, store: mockstore.NewNop(), SyncTicker: tk, reqIDGen: idutil.NewGenerator(0, time.Time{}), @@ -858,13 +905,13 @@ func TestSnapshot(t *testing.T) { s.Append([]raftpb.Entry{{Index: 1}}) st := mockstore.NewRecorderStream() p := mockstorage.NewStorageRecorderStream("") + r := newRaftNode(raftNodeConfig{ + Node: newNodeNop(), + raftStorage: s, + storage: p, + }) srv := &EtcdServer{ - Cfg: &ServerConfig{}, - r: raftNode{ - Node: newNodeNop(), - raftStorage: s, - storage: p, - }, + r: *r, store: st, } srv.kv = mvcc.New(be, &lease.FakeLessor{}, &srv.consistIndex) @@ -904,6 +951,80 @@ func TestSnapshot(t *testing.T) { <-ch } +// TestSnapshotOrdering ensures raft persists snapshot onto disk before +// snapshot db is applied. +func TestSnapshotOrdering(t *testing.T) { + n := newNopReadyNode() + st := store.New() + cl := membership.NewCluster("abc") + cl.SetStore(st) + + testdir, err := ioutil.TempDir(os.TempDir(), "testsnapdir") + if err != nil { + t.Fatalf("couldn't open tempdir (%v)", err) + } + defer os.RemoveAll(testdir) + + snapdir := filepath.Join(testdir, "member", "snap") + if err := os.MkdirAll(snapdir, 0755); err != nil { + t.Fatalf("couldn't make snap dir (%v)", err) + } + + rs := raft.NewMemoryStorage() + p := mockstorage.NewStorageRecorderStream(testdir) + tr, snapDoneC := rafthttp.NewSnapTransporter(snapdir) + r := newRaftNode(raftNodeConfig{ + isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, + Node: n, + transport: tr, + storage: p, + raftStorage: rs, + }) + s := &EtcdServer{ + Cfg: ServerConfig{DataDir: testdir}, + r: *r, + store: st, + snapshotter: snap.New(snapdir), + cluster: cl, + SyncTicker: &time.Ticker{}, + } + s.applyV2 = &applierV2store{store: s.store, cluster: s.cluster} + + be, tmpPath := backend.NewDefaultTmpBackend() + defer os.RemoveAll(tmpPath) + s.kv = mvcc.New(be, &lease.FakeLessor{}, &s.consistIndex) + s.be = be + + s.start() + defer s.Stop() + + n.readyc <- raft.Ready{Messages: []raftpb.Message{{Type: raftpb.MsgSnap}}} + go func() { + // get the snapshot sent by the transport + snapMsg := <-snapDoneC + // Snapshot first triggers raftnode to persists the snapshot onto disk + // before renaming db snapshot file to db + snapMsg.Snapshot.Metadata.Index = 1 + n.readyc <- raft.Ready{Snapshot: snapMsg.Snapshot} + }() + + if ac := <-p.Chan(); ac.Name != "Save" { + t.Fatalf("expected Save, got %+v", ac) + } + if ac := <-p.Chan(); ac.Name != "Save" { + t.Fatalf("expected Save, got %+v", ac) + } + // confirm snapshot file still present before calling SaveSnap + snapPath := filepath.Join(snapdir, fmt.Sprintf("%016x.snap.db", 1)) + if !fileutil.Exist(snapPath) { + t.Fatalf("expected file %q, got missing", snapPath) + } + // unblock SaveSnapshot, etcdserver now permitted to move snapshot file + if ac := <-p.Chan(); ac.Name != "SaveSnap" { + t.Fatalf("expected SaveSnap, got %+v", ac) + } +} + // Applied > SnapCount should trigger a SaveSnap event func TestTriggerSnap(t *testing.T) { be, tmpPath := backend.NewDefaultTmpBackend() @@ -914,16 +1035,15 @@ func TestTriggerSnap(t *testing.T) { snapc := 10 st := mockstore.NewRecorder() p := mockstorage.NewStorageRecorderStream("") + r := newRaftNode(raftNodeConfig{ + Node: newNodeCommitter(), + raftStorage: raft.NewMemoryStorage(), + storage: p, + transport: rafthttp.NewNopTransporter(), + }) srv := &EtcdServer{ - Cfg: &ServerConfig{TickMs: 1}, - snapCount: uint64(snapc), - r: raftNode{ - Node: newNodeCommitter(), - raftStorage: raft.NewMemoryStorage(), - storage: p, - transport: rafthttp.NewNopTransporter(), - ticker: &time.Ticker{}, - }, + Cfg: ServerConfig{TickMs: 1, SnapCount: uint64(snapc)}, + r: *r, store: st, reqIDGen: idutil.NewGenerator(0, time.Time{}), SyncTicker: &time.Ticker{}, @@ -962,10 +1082,6 @@ func TestTriggerSnap(t *testing.T) { // TestConcurrentApplyAndSnapshotV3 will send out snapshots concurrently with // proposals. func TestConcurrentApplyAndSnapshotV3(t *testing.T) { - const ( - // snapshots that may queue up at once without dropping - maxInFlightMsgSnap = 16 - ) n := newNopReadyNode() st := store.New() cl := membership.NewCluster("abc") @@ -982,22 +1098,20 @@ func TestConcurrentApplyAndSnapshotV3(t *testing.T) { rs := raft.NewMemoryStorage() tr, snapDoneC := rafthttp.NewSnapTransporter(testdir) + r := newRaftNode(raftNodeConfig{ + isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, + Node: n, + transport: tr, + storage: mockstorage.NewStorageRecorder(testdir), + raftStorage: rs, + }) s := &EtcdServer{ - Cfg: &ServerConfig{ - DataDir: testdir, - }, - r: raftNode{ - isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, - Node: n, - transport: tr, - storage: mockstorage.NewStorageRecorder(testdir), - raftStorage: rs, - msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap), - ticker: &time.Ticker{}, - }, - store: st, - cluster: cl, - SyncTicker: &time.Ticker{}, + Cfg: ServerConfig{DataDir: testdir}, + r: *r, + store: st, + snapshotter: snap.New(testdir), + cluster: cl, + SyncTicker: &time.Ticker{}, } s.applyV2 = &applierV2store{store: s.store, cluster: s.cluster} @@ -1069,15 +1183,14 @@ func TestAddMember(t *testing.T) { cl := newTestCluster(nil) st := store.New() cl.SetStore(st) + r := newRaftNode(raftNodeConfig{ + Node: n, + raftStorage: raft.NewMemoryStorage(), + storage: mockstorage.NewStorageRecorder(""), + transport: rafthttp.NewNopTransporter(), + }) s := &EtcdServer{ - r: raftNode{ - Node: n, - raftStorage: raft.NewMemoryStorage(), - storage: mockstorage.NewStorageRecorder(""), - transport: rafthttp.NewNopTransporter(), - ticker: &time.Ticker{}, - }, - Cfg: &ServerConfig{}, + r: *r, store: st, cluster: cl, reqIDGen: idutil.NewGenerator(0, time.Time{}), @@ -1111,15 +1224,14 @@ func TestRemoveMember(t *testing.T) { st := store.New() cl.SetStore(store.New()) cl.AddMember(&membership.Member{ID: 1234}) + r := newRaftNode(raftNodeConfig{ + Node: n, + raftStorage: raft.NewMemoryStorage(), + storage: mockstorage.NewStorageRecorder(""), + transport: rafthttp.NewNopTransporter(), + }) s := &EtcdServer{ - r: raftNode{ - Node: n, - raftStorage: raft.NewMemoryStorage(), - storage: mockstorage.NewStorageRecorder(""), - transport: rafthttp.NewNopTransporter(), - ticker: &time.Ticker{}, - }, - Cfg: &ServerConfig{}, + r: *r, store: st, cluster: cl, reqIDGen: idutil.NewGenerator(0, time.Time{}), @@ -1152,14 +1264,14 @@ func TestUpdateMember(t *testing.T) { st := store.New() cl.SetStore(st) cl.AddMember(&membership.Member{ID: 1234}) + r := newRaftNode(raftNodeConfig{ + Node: n, + raftStorage: raft.NewMemoryStorage(), + storage: mockstorage.NewStorageRecorder(""), + transport: rafthttp.NewNopTransporter(), + }) s := &EtcdServer{ - r: raftNode{ - Node: n, - raftStorage: raft.NewMemoryStorage(), - storage: mockstorage.NewStorageRecorder(""), - transport: rafthttp.NewNopTransporter(), - ticker: &time.Ticker{}, - }, + r: *r, store: st, cluster: cl, reqIDGen: idutil.NewGenerator(0, time.Time{}), @@ -1194,9 +1306,9 @@ func TestPublish(t *testing.T) { ctx, cancel := context.WithCancel(context.TODO()) srv := &EtcdServer{ readych: make(chan struct{}), - Cfg: &ServerConfig{TickMs: 1}, + Cfg: ServerConfig{TickMs: 1}, id: 1, - r: raftNode{Node: n, ticker: &time.Ticker{}}, + r: *newRaftNode(raftNodeConfig{Node: n}), attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://a", "http://b"}}, cluster: &membership.RaftCluster{}, w: w, @@ -1239,13 +1351,13 @@ func TestPublish(t *testing.T) { // TestPublishStopped tests that publish will be stopped if server is stopped. func TestPublishStopped(t *testing.T) { ctx, cancel := context.WithCancel(context.TODO()) + r := newRaftNode(raftNodeConfig{ + Node: newNodeNop(), + transport: rafthttp.NewNopTransporter(), + }) srv := &EtcdServer{ - Cfg: &ServerConfig{TickMs: 1}, - r: raftNode{ - Node: newNodeNop(), - transport: rafthttp.NewNopTransporter(), - ticker: &time.Ticker{}, - }, + Cfg: ServerConfig{TickMs: 1}, + r: *r, cluster: &membership.RaftCluster{}, w: mockwait.NewNop(), done: make(chan struct{}), @@ -1266,8 +1378,8 @@ func TestPublishRetry(t *testing.T) { ctx, cancel := context.WithCancel(context.TODO()) n := newNodeRecorderStream() srv := &EtcdServer{ - Cfg: &ServerConfig{TickMs: 1}, - r: raftNode{Node: n, ticker: &time.Ticker{}}, + Cfg: ServerConfig{TickMs: 1}, + r: *newRaftNode(raftNodeConfig{Node: n}), w: mockwait.NewNop(), stopping: make(chan struct{}), reqIDGen: idutil.NewGenerator(0, time.Time{}), @@ -1307,8 +1419,8 @@ func TestUpdateVersion(t *testing.T) { ctx, cancel := context.WithCancel(context.TODO()) srv := &EtcdServer{ id: 1, - Cfg: &ServerConfig{TickMs: 1}, - r: raftNode{Node: n, ticker: &time.Ticker{}}, + Cfg: ServerConfig{TickMs: 1}, + r: *newRaftNode(raftNodeConfig{Node: n}), attributes: membership.Attributes{Name: "node1", ClientURLs: []string{"http://node1.com"}}, cluster: &membership.RaftCluster{}, w: w, @@ -1455,7 +1567,7 @@ type nodeProposalBlockerRecorder struct { } func newProposalBlockerRecorder() *nodeProposalBlockerRecorder { - return &nodeProposalBlockerRecorder{*newNodeRecorder()} + return &nodeProposalBlockerRecorder{*newNodeRecorderStream()} } func (n *nodeProposalBlockerRecorder) Propose(ctx context.Context, data []byte) error { diff --git a/github.com/coreos/etcd/etcdserver/snapshot_merge.go b/github.com/coreos/etcd/etcdserver/snapshot_merge.go index 9cfc852168..928aa95b6b 100644 --- a/github.com/coreos/etcd/etcdserver/snapshot_merge.go +++ b/github.com/coreos/etcd/etcdserver/snapshot_merge.go @@ -60,9 +60,14 @@ func newSnapshotReaderCloser(snapshot backend.Snapshot) io.ReadCloser { n, err := snapshot.WriteTo(pw) if err == nil { plog.Infof("wrote database snapshot out [total bytes: %d]", n) + } else { + plog.Warningf("failed to write database snapshot out [written bytes: %d]: %v", n, err) } pw.CloseWithError(err) - snapshot.Close() + err = snapshot.Close() + if err != nil { + plog.Panicf("failed to close database snapshot: %v", err) + } }() return pr } diff --git a/github.com/coreos/etcd/etcdserver/stats/leader.go b/github.com/coreos/etcd/etcdserver/stats/leader.go index 1bed85474e..8f6a54ff75 100644 --- a/github.com/coreos/etcd/etcdserver/stats/leader.go +++ b/github.com/coreos/etcd/etcdserver/stats/leader.go @@ -24,25 +24,30 @@ import ( // LeaderStats is used by the leader in an etcd cluster, and encapsulates // statistics about communication with its followers type LeaderStats struct { + leaderStats + sync.Mutex +} + +type leaderStats struct { // Leader is the ID of the leader in the etcd cluster. // TODO(jonboulle): clarify that these are IDs, not names Leader string `json:"leader"` Followers map[string]*FollowerStats `json:"followers"` - - sync.Mutex } // NewLeaderStats generates a new LeaderStats with the given id as leader func NewLeaderStats(id string) *LeaderStats { return &LeaderStats{ - Leader: id, - Followers: make(map[string]*FollowerStats), + leaderStats: leaderStats{ + Leader: id, + Followers: make(map[string]*FollowerStats), + }, } } func (ls *LeaderStats) JSON() []byte { ls.Lock() - stats := *ls + stats := ls.leaderStats ls.Unlock() b, err := json.Marshal(stats) // TODO(jonboulle): appropriate error handling? diff --git a/github.com/coreos/etcd/etcdserver/stats/server.go b/github.com/coreos/etcd/etcdserver/stats/server.go index cd450e2d19..0278e885cf 100644 --- a/github.com/coreos/etcd/etcdserver/stats/server.go +++ b/github.com/coreos/etcd/etcdserver/stats/server.go @@ -26,6 +26,26 @@ import ( // ServerStats encapsulates various statistics about an EtcdServer and its // communication with other members of the cluster type ServerStats struct { + serverStats + sync.Mutex +} + +func NewServerStats(name, id string) *ServerStats { + ss := &ServerStats{ + serverStats: serverStats{ + Name: name, + ID: id, + }, + } + now := time.Now() + ss.StartTime = now + ss.LeaderInfo.StartTime = now + ss.sendRateQueue = &statsQueue{back: -1} + ss.recvRateQueue = &statsQueue{back: -1} + return ss +} + +type serverStats struct { Name string `json:"name"` // ID is the raft ID of the node. // TODO(jonboulle): use ID instead of name? @@ -49,17 +69,15 @@ type ServerStats struct { sendRateQueue *statsQueue recvRateQueue *statsQueue - - sync.Mutex } func (ss *ServerStats) JSON() []byte { ss.Lock() - stats := *ss + stats := ss.serverStats ss.Unlock() stats.LeaderInfo.Uptime = time.Since(stats.LeaderInfo.StartTime).String() - stats.SendingPkgRate, stats.SendingBandwidthRate = stats.SendRates() - stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.RecvRates() + stats.SendingPkgRate, stats.SendingBandwidthRate = stats.sendRateQueue.Rate() + stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.recvRateQueue.Rate() b, err := json.Marshal(stats) // TODO(jonboulle): appropriate error handling? if err != nil { @@ -68,32 +86,6 @@ func (ss *ServerStats) JSON() []byte { return b } -// Initialize clears the statistics of ServerStats and resets its start time -func (ss *ServerStats) Initialize() { - if ss == nil { - return - } - now := time.Now() - ss.StartTime = now - ss.LeaderInfo.StartTime = now - ss.sendRateQueue = &statsQueue{ - back: -1, - } - ss.recvRateQueue = &statsQueue{ - back: -1, - } -} - -// RecvRates calculates and returns the rate of received append requests -func (ss *ServerStats) RecvRates() (float64, float64) { - return ss.recvRateQueue.Rate() -} - -// SendRates calculates and returns the rate of sent append requests -func (ss *ServerStats) SendRates() (float64, float64) { - return ss.sendRateQueue.Rate() -} - // RecvAppendReq updates the ServerStats in response to an AppendRequest // from the given leader being received func (ss *ServerStats) RecvAppendReq(leader string, reqSize int) { diff --git a/github.com/coreos/etcd/etcdserver/storage.go b/github.com/coreos/etcd/etcdserver/storage.go index 693618fbd5..aa8f87569d 100644 --- a/github.com/coreos/etcd/etcdserver/storage.go +++ b/github.com/coreos/etcd/etcdserver/storage.go @@ -32,9 +32,6 @@ type Storage interface { Save(st raftpb.HardState, ents []raftpb.Entry) error // SaveSnap function saves snapshot to the underlying stable storage. SaveSnap(snap raftpb.Snapshot) error - // DBFilePath returns the file path of database snapshot saved with given - // id. - DBFilePath(id uint64) (string, error) // Close closes the Storage and performs finalization. Close() error } diff --git a/github.com/coreos/etcd/etcdserver/v3_server.go b/github.com/coreos/etcd/etcdserver/v3_server.go index 09cc3fccd3..4b9409a4a4 100644 --- a/github.com/coreos/etcd/etcdserver/v3_server.go +++ b/github.com/coreos/etcd/etcdserver/v3_server.go @@ -31,12 +31,6 @@ import ( ) const ( - // the max request size that raft accepts. - // TODO: make this a flag? But we probably do not want to - // accept large request which might block raft stream. User - // specify a large value might end up with shooting in the foot. - maxRequestBytes = 1.5 * 1024 * 1024 - // In the health case, there might be a small gap (10s of entries) between // the applied index and committed index. // However, if the committed entries are very heavy to apply, the gap might grow. @@ -374,7 +368,9 @@ func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest for { checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password) if err != nil { - plog.Errorf("invalid authentication request to user %s was issued", r.Name) + if err != auth.ErrAuthNotEnabled { + plog.Errorf("invalid authentication request to user %s was issued", r.Name) + } return nil, err } @@ -603,7 +599,7 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In return nil, err } - if len(data) > maxRequestBytes { + if len(data) > int(s.Cfg.MaxRequestBytes) { return nil, ErrRequestTooLarge } diff --git a/github.com/coreos/etcd/glide.lock b/github.com/coreos/etcd/glide.lock index 6269cce8e4..b27b26876d 100644 --- a/github.com/coreos/etcd/glide.lock +++ b/github.com/coreos/etcd/glide.lock @@ -1,18 +1,18 @@ -hash: 731f35aa7008dd526f126f5bd8f74bcd7d8639aeafccc4db8f472cb67029f88d -updated: 2017-04-11T08:41:43.277154365-07:00 +hash: 4151c7de891aaf3c611392ecd302c88fd7b6ea01f494bdb0900eb2b2009c2072 +updated: 2017-07-05T14:33:35.042371004-07:00 imports: - name: github.com/beorn7/perks version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 subpackages: - quantile - name: github.com/bgentry/speakeasy - version: 36e9cfdd690967f4f690c6edcc9ffacd006014a0 -- name: github.com/boltdb/bolt - version: 583e8937c61f1af6513608ccc75c97b6abdf4ff9 + version: 4aabc24848ce5fd31929f7d1e4ea74d3709c14cd - name: github.com/cockroachdb/cmux version: 112f0506e7743d64a6eb8fedbcff13d9979bbf92 +- name: github.com/coreos/bbolt + version: ad39960eb40bb33c9bda31bed2eaf4fdda15efe6 - name: github.com/coreos/go-semver - version: 568e959cd89871e61434c1143528d9162da89ef2 + version: 8ab6407b697782a06568d4b7f1db25550ec2e4c6 subpackages: - semver - name: github.com/coreos/go-systemd @@ -27,7 +27,7 @@ imports: - capnslog - dlopen - name: github.com/cpuguy83/go-md2man - version: a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa + version: 23709d0847197db6021a51fdb193e66e9222d4e7 subpackages: - md2man - name: github.com/dgrijalva/jwt-go @@ -35,9 +35,9 @@ imports: - name: github.com/dustin/go-humanize version: 8929fe90cee4b2cb9deb468b51fb34eba64d1bf0 - name: github.com/ghodss/yaml - version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee + version: 0ca9ea5df5451ffdf184b4428c902747c2c11cd7 - name: github.com/gogo/protobuf - version: 909568be09de550ed094403c2bf8a261b5bb730a + version: 100ba4e885062801d56799d78530b73b178a78f3 subpackages: - proto - name: github.com/golang/groupcache @@ -45,16 +45,18 @@ imports: subpackages: - lru - name: github.com/golang/protobuf - version: 4bd1920723d7b7c925de087aa32e2187708897f7 + version: 5a0f697c9ed9d68fef0116532c6e05cfeae00e55 subpackages: - jsonpb - proto + - ptypes/any + - ptypes/struct - name: github.com/google/btree version: 925471ac9e2131377a91e1595defec898166fe49 - name: github.com/grpc-ecosystem/go-grpc-prometheus version: 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 - name: github.com/grpc-ecosystem/grpc-gateway - version: 18d159699f2e83fc5bb9ef2f79465ca3f3122676 + version: 589b126116b5fc961939b3e156c29e4d9d58222f subpackages: - runtime - runtime/internal @@ -64,7 +66,7 @@ imports: - name: github.com/jonboulle/clockwork version: 2eee05ed794112d45db504eb05aa693efd2b8b09 - name: github.com/kr/pty - version: f7ee69f31298ecbe5d2b349c711e2547a617d398 + version: 2c10821df3c3cf905230d078702dfbe9404c9b23 - name: github.com/mattn/go-runewidth version: 9e777a8366cce605130a531d2cd6363d07ad7317 subpackages: @@ -94,17 +96,11 @@ imports: subpackages: - xfs - name: github.com/russross/blackfriday - version: 5f33e7b7878355cd2b7e6b8eefc48a5472c69f70 -- name: github.com/shurcooL/sanitized_anchor_name - version: 1dba4b3954bc059efc3991ec364f9f9a35f597d2 + version: 067529f716f4c3f5e37c8c95ddd59df1007290ae - name: github.com/spf13/cobra version: 1c44ec8d3f1552cac48999f9306da23c4d8a288b - name: github.com/spf13/pflag version: 08b1a584251b5b62f458943640fc8ebd4d50aaa5 -- name: github.com/stretchr/testify - version: 976c720a22c8eb4eb6a0b4348ad85ad12491a506 - subpackages: - - assert - name: github.com/ugorji/go version: ded73eae5db7e7a0ef6f55aace87a2873c5d2b74 subpackages: @@ -119,7 +115,7 @@ imports: - bcrypt - blowfish - name: golang.org/x/net - version: d1e1b351919c6738fdeb9893d5c998b161464f0c + version: c8c74377599bd978aee1cf3b9b63a8634051cec2 subpackages: - context - http2 @@ -133,40 +129,40 @@ imports: subpackages: - unix - name: golang.org/x/text - version: f4b4367115ec2de254587813edaa901bc1c723a8 + version: 9e2f80a6ba7ed4ba13e0cd4b1f094bf916875735 subpackages: - secure/bidirule - transform - unicode/bidi - unicode/norm - name: golang.org/x/time - version: a4bde12657593d5e90d0533a3e4fd95e635124cb + version: c06e80d9300e4443158a03817b8a8cb37d230320 subpackages: - rate +- name: google.golang.org/genproto + version: aa2eb687b4d3e17154372564ad8d6bf11c3cf21f + subpackages: + - googleapis/rpc/status - name: google.golang.org/grpc - version: 8050b9cbc271307e5a716a9d782803d09b0d6f2d + version: b15215fb911b24a5d61d57feec4233d610530464 subpackages: - codes - credentials + - grpclb/grpc_lb_v1 - grpclog + - health + - health/grpc_health_v1 - internal - keepalive - metadata - naming - peer - stats + - status - tap - transport - name: gopkg.in/cheggaaa/pb.v1 version: 226d21d43a305fac52b3a104ef83e721b15275e0 - name: gopkg.in/yaml.v2 - version: 53feefa2559fb8dfa8d81baad31be332c97d6c77 -testImports: -- name: github.com/davecgh/go-spew - version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9 - subpackages: - - spew -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib + version: cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b +testImports: [] diff --git a/github.com/coreos/etcd/glide.yaml b/github.com/coreos/etcd/glide.yaml index f669c2deac..19d62cfa17 100644 --- a/github.com/coreos/etcd/glide.yaml +++ b/github.com/coreos/etcd/glide.yaml @@ -1,13 +1,13 @@ package: github.com/coreos/etcd import: - package: github.com/bgentry/speakeasy - version: 36e9cfdd690967f4f690c6edcc9ffacd006014a0 -- package: github.com/boltdb/bolt - version: v1.3.0 + version: v0.1.0 +- package: github.com/coreos/bbolt + version: ad39960eb40bb33c9bda31bed2eaf4fdda15efe6 - package: github.com/cockroachdb/cmux version: 112f0506e7743d64a6eb8fedbcff13d9979bbf92 - package: github.com/coreos/go-semver - version: 568e959cd89871e61434c1143528d9162da89ef2 + version: v0.2.0 subpackages: - semver - package: github.com/coreos/go-systemd @@ -23,9 +23,9 @@ import: - package: github.com/dustin/go-humanize version: 8929fe90cee4b2cb9deb468b51fb34eba64d1bf0 - package: github.com/ghodss/yaml - version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee + version: v1.0.0 - package: github.com/gogo/protobuf - version: v0.3 + version: v0.4 subpackages: - proto - package: github.com/golang/groupcache @@ -33,14 +33,14 @@ import: subpackages: - lru - package: github.com/golang/protobuf - version: 4bd1920723d7b7c925de087aa32e2187708897f7 + version: 5a0f697c9ed9d68fef0116532c6e05cfeae00e55 subpackages: - jsonpb - proto - package: github.com/google/btree version: 925471ac9e2131377a91e1595defec898166fe49 - package: github.com/grpc-ecosystem/grpc-gateway - version: v1.2.0 + version: v1.2.2 subpackages: - runtime - runtime/internal @@ -48,7 +48,7 @@ import: - package: github.com/jonboulle/clockwork version: v0.1.0 - package: github.com/kr/pty - version: f7ee69f31298ecbe5d2b349c711e2547a617d398 + version: v1.0.0 - package: github.com/olekukonko/tablewriter version: a0225b3f23b5ce0cbec6d7a66a968f8a59eca9c4 - package: github.com/mattn/go-runewidth @@ -74,7 +74,7 @@ import: - package: github.com/urfave/cli version: v1.18.0 - package: github.com/xiang90/probing - version: 07dd2e8dfe18522e9c447ba95f2fe95262f63bb2 + version: 0.0.1 - package: github.com/grpc-ecosystem/go-grpc-prometheus version: v1.1 - package: golang.org/x/crypto @@ -83,7 +83,7 @@ import: - bcrypt - blowfish - package: golang.org/x/net - version: d1e1b351919c6738fdeb9893d5c998b161464f0c + version: c8c74377599bd978aee1cf3b9b63a8634051cec2 subpackages: - context - http2 @@ -93,11 +93,11 @@ import: - package: golang.org/x/sys version: e48874b42435b4347fc52bdee0424a52abc974d7 - package: golang.org/x/time - version: a4bde12657593d5e90d0533a3e4fd95e635124cb + version: c06e80d9300e4443158a03817b8a8cb37d230320 subpackages: - rate - package: google.golang.org/grpc - version: v1.2.1 + version: v1.4.2 subpackages: - codes - credentials @@ -107,13 +107,13 @@ import: - naming - peer - transport + - health + - health/grpc_health_v1 - package: gopkg.in/cheggaaa/pb.v1 version: v1.0.2 - package: gopkg.in/yaml.v2 - version: 53feefa2559fb8dfa8d81baad31be332c97d6c77 -- package: github.com/stretchr/testify - version: 976c720a22c8eb4eb6a0b4348ad85ad12491a506 - subpackages: - - assert + version: cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b - package: github.com/dgrijalva/jwt-go version: v3.0.0 +ignore: + - google.golang.org/appengine diff --git a/github.com/coreos/etcd/integration/bridge.go b/github.com/coreos/etcd/integration/bridge.go index 6c1ec54499..b9e67318e5 100644 --- a/github.com/coreos/etcd/integration/bridge.go +++ b/github.com/coreos/etcd/integration/bridge.go @@ -31,8 +31,9 @@ type bridge struct { l net.Listener conns map[*bridgeConn]struct{} - stopc chan struct{} - wg sync.WaitGroup + stopc chan struct{} + pausec chan struct{} + wg sync.WaitGroup mu sync.Mutex } @@ -43,8 +44,11 @@ func newBridge(addr string) (*bridge, error) { inaddr: addr + "0", outaddr: addr, conns: make(map[*bridgeConn]struct{}), - stopc: make(chan struct{}, 1), + stopc: make(chan struct{}), + pausec: make(chan struct{}), } + close(b.pausec) + l, err := transport.NewUnixListener(b.inaddr) if err != nil { return nil, fmt.Errorf("listen failed on socket %s (%v)", addr, err) @@ -59,10 +63,13 @@ func (b *bridge) URL() string { return "unix://" + b.inaddr } func (b *bridge) Close() { b.l.Close() + b.mu.Lock() select { - case b.stopc <- struct{}{}: + case <-b.stopc: default: + close(b.stopc) } + b.mu.Unlock() b.wg.Wait() } @@ -75,6 +82,22 @@ func (b *bridge) Reset() { b.conns = make(map[*bridgeConn]struct{}) } +func (b *bridge) Pause() { + b.mu.Lock() + b.pausec = make(chan struct{}) + b.mu.Unlock() +} + +func (b *bridge) Unpause() { + b.mu.Lock() + select { + case <-b.pausec: + default: + close(b.pausec) + } + b.mu.Unlock() +} + func (b *bridge) serveListen() { defer func() { b.l.Close() @@ -91,13 +114,23 @@ func (b *bridge) serveListen() { if ierr != nil { return } + b.mu.Lock() + pausec := b.pausec + b.mu.Unlock() + select { + case <-b.stopc: + inc.Close() + return + case <-pausec: + } + outc, oerr := net.Dial("unix", b.outaddr) if oerr != nil { inc.Close() return } - bc := &bridgeConn{inc, outc} + bc := &bridgeConn{inc, outc, make(chan struct{})} b.wg.Add(1) b.mu.Lock() b.conns[bc] = struct{}{} @@ -108,6 +141,7 @@ func (b *bridge) serveListen() { func (b *bridge) serveConn(bc *bridgeConn) { defer func() { + close(bc.donec) bc.Close() b.mu.Lock() delete(b.conns, bc) @@ -119,21 +153,29 @@ func (b *bridge) serveConn(bc *bridgeConn) { wg.Add(2) go func() { io.Copy(bc.out, bc.in) + bc.close() wg.Done() }() go func() { io.Copy(bc.in, bc.out) + bc.close() wg.Done() }() wg.Wait() } type bridgeConn struct { - in net.Conn - out net.Conn + in net.Conn + out net.Conn + donec chan struct{} } func (bc *bridgeConn) Close() { + bc.close() + <-bc.donec +} + +func (bc *bridgeConn) close() { bc.in.Close() bc.out.Close() } diff --git a/github.com/coreos/etcd/integration/cluster.go b/github.com/coreos/etcd/integration/cluster.go index b2e0566acf..ed245eca2f 100644 --- a/github.com/coreos/etcd/integration/cluster.go +++ b/github.com/coreos/etcd/integration/cluster.go @@ -36,7 +36,9 @@ import ( "github.com/coreos/etcd/client" "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/embed" "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/etcdhttp" "github.com/coreos/etcd/etcdserver/api/v2http" "github.com/coreos/etcd/etcdserver/api/v3client" "github.com/coreos/etcd/etcdserver/api/v3election" @@ -76,6 +78,13 @@ var ( ClientCertAuth: true, } + testTLSInfoExpired = transport.TLSInfo{ + KeyFile: "./fixtures-expired/server-key.pem", + CertFile: "./fixtures-expired/server.pem", + TrustedCAFile: "./fixtures-expired/etcd-root-ca.pem", + ClientCertAuth: true, + } + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "integration") ) @@ -86,6 +95,8 @@ type ClusterConfig struct { DiscoveryURL string UseGRPC bool QuotaBackendBytes int64 + MaxTxnOps uint + MaxRequestBytes uint } type cluster struct { @@ -217,6 +228,8 @@ func (c *cluster) mustNewMember(t *testing.T) *member { peerTLS: c.cfg.PeerTLS, clientTLS: c.cfg.ClientTLS, quotaBackendBytes: c.cfg.QuotaBackendBytes, + maxTxnOps: c.cfg.MaxTxnOps, + maxRequestBytes: c.cfg.MaxRequestBytes, }) m.DiscoveryURL = c.cfg.DiscoveryURL if c.cfg.UseGRPC { @@ -340,7 +353,6 @@ func (c *cluster) waitMembersMatch(t *testing.T, membs []client.Member) { time.Sleep(tickDuration) } } - return } func (c *cluster) WaitLeader(t *testing.T) int { return c.waitLeader(t, c.Members) } @@ -484,6 +496,8 @@ type memberConfig struct { peerTLS *transport.TLSInfo clientTLS *transport.TLSInfo quotaBackendBytes int64 + maxTxnOps uint + maxRequestBytes uint } // mustNewMember return an inited member with the given name. If peerTLS is @@ -531,6 +545,14 @@ func mustNewMember(t *testing.T, mcfg memberConfig) *member { m.ElectionTicks = electionTicks m.TickMs = uint(tickDuration / time.Millisecond) m.QuotaBackendBytes = mcfg.quotaBackendBytes + m.MaxTxnOps = mcfg.maxTxnOps + if m.MaxTxnOps == 0 { + m.MaxTxnOps = embed.DefaultMaxTxnOps + } + m.MaxRequestBytes = mcfg.maxRequestBytes + if m.MaxRequestBytes == 0 { + m.MaxRequestBytes = embed.DefaultMaxRequestBytes + } m.AuthToken = "simple" // for the purpose of integration testing, simple token is enough return m } @@ -548,7 +570,7 @@ func (m *member) listenGRPC() error { l.Close() return err } - m.grpcAddr = m.grpcBridge.URL() + m.grpcAddr = schemeFromTLSInfo(m.ClientTLSInfo) + "://" + m.grpcBridge.inaddr m.grpcListener = l return nil } @@ -557,7 +579,11 @@ func (m *member) electionTimeout() time.Duration { return time.Duration(m.s.Cfg.ElectionTicks) * time.Millisecond } -func (m *member) DropConnections() { m.grpcBridge.Reset() } +func (m *member) ID() types.ID { return m.s.ID() } + +func (m *member) DropConnections() { m.grpcBridge.Reset() } +func (m *member) PauseConnections() { m.grpcBridge.Pause() } +func (m *member) UnpauseConnections() { m.grpcBridge.Unpause() } // NewClientV3 creates a new grpc client connection to the member func NewClientV3(m *member) (*clientv3.Client, error) { @@ -617,13 +643,13 @@ func (m *member) Clone(t *testing.T) *member { func (m *member) Launch() error { plog.Printf("launching %s (%s)", m.Name, m.grpcAddr) var err error - if m.s, err = etcdserver.NewServer(&m.ServerConfig); err != nil { + if m.s, err = etcdserver.NewServer(m.ServerConfig); err != nil { return fmt.Errorf("failed to initialize the etcd server: %v", err) } m.s.SyncTicker = time.NewTicker(500 * time.Millisecond) m.s.Start() - m.raftHandler = &testutil.PauseableHandler{Next: v2http.NewPeerHandler(m.s)} + m.raftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.s)} for _, ln := range m.PeerListeners { hs := &httptest.Server{ @@ -927,4 +953,8 @@ type grpcAPI struct { Maintenance pb.MaintenanceClient // Auth is the authentication API for the client's connection. Auth pb.AuthClient + // Lock is the lock API for the client's connection. + Lock lockpb.LockClient + // Election is the election API for the client's connection. + Election epb.ElectionClient } diff --git a/github.com/coreos/etcd/integration/cluster_direct.go b/github.com/coreos/etcd/integration/cluster_direct.go index 84b2a796cc..ff97e6146e 100644 --- a/github.com/coreos/etcd/integration/cluster_direct.go +++ b/github.com/coreos/etcd/integration/cluster_direct.go @@ -18,6 +18,8 @@ package integration import ( "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" + "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" ) @@ -29,6 +31,8 @@ func toGRPC(c *clientv3.Client) grpcAPI { pb.NewWatchClient(c.ActiveConnection()), pb.NewMaintenanceClient(c.ActiveConnection()), pb.NewAuthClient(c.ActiveConnection()), + v3lockpb.NewLockClient(c.ActiveConnection()), + v3electionpb.NewElectionClient(c.ActiveConnection()), } } diff --git a/github.com/coreos/etcd/integration/cluster_proxy.go b/github.com/coreos/etcd/integration/cluster_proxy.go index 0152a16c67..613b61b9a4 100644 --- a/github.com/coreos/etcd/integration/cluster_proxy.go +++ b/github.com/coreos/etcd/integration/cluster_proxy.go @@ -21,7 +21,6 @@ import ( "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/clientv3/namespace" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/proxy/grpcproxy" "github.com/coreos/etcd/proxy/grpcproxy/adapter" ) @@ -58,6 +57,9 @@ func toGRPC(c *clientv3.Client) grpcAPI { lp, lpch := grpcproxy.NewLeaseProxy(c) mp := grpcproxy.NewMaintenanceProxy(c) clp, _ := grpcproxy.NewClusterProxy(c, "", "") // without registering proxy URLs + authp := grpcproxy.NewAuthProxy(c) + lockp := grpcproxy.NewLockProxy(c) + electp := grpcproxy.NewElectionProxy(c) grpc := grpcAPI{ adapter.ClusterServerToClusterClient(clp), @@ -65,7 +67,9 @@ func toGRPC(c *clientv3.Client) grpcAPI { adapter.LeaseServerToLeaseClient(lp), adapter.WatchServerToWatchClient(wp), adapter.MaintenanceServerToMaintenanceClient(mp), - pb.NewAuthClient(c.ActiveConnection()), + adapter.AuthServerToAuthClient(authp), + adapter.LockServerToLockClient(lockp), + adapter.ElectionServerToElectionClient(electp), } proxies[c] = grpcClientProxy{grpc: grpc, wdonec: wpch, kvdonec: kvpch, lpdonec: lpch} return grpc @@ -75,6 +79,7 @@ type proxyCloser struct { clientv3.Watcher wdonec <-chan struct{} kvdonec <-chan struct{} + lclose func() lpdonec <-chan struct{} } @@ -83,6 +88,7 @@ func (pc *proxyCloser) Close() error { <-pc.kvdonec err := pc.Watcher.Close() <-pc.wdonec + pc.lclose() <-pc.lpdonec return err } @@ -95,11 +101,13 @@ func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) { rpc := toGRPC(c) c.KV = clientv3.NewKVFromKVClient(rpc.KV) pmu.Lock() + lc := c.Lease c.Lease = clientv3.NewLeaseFromLeaseClient(rpc.Lease, cfg.DialTimeout) c.Watcher = &proxyCloser{ Watcher: clientv3.NewWatchFromWatchClient(rpc.Watch), wdonec: proxies[c].wdonec, kvdonec: proxies[c].kvdonec, + lclose: func() { lc.Close() }, lpdonec: proxies[c].lpdonec, } pmu.Unlock() diff --git a/github.com/coreos/etcd/integration/cluster_test.go b/github.com/coreos/etcd/integration/cluster_test.go index e356564c7f..5907d6841f 100644 --- a/github.com/coreos/etcd/integration/cluster_test.go +++ b/github.com/coreos/etcd/integration/cluster_test.go @@ -447,7 +447,9 @@ func TestRejectUnhealthyRemove(t *testing.T) { // (see https://github.com/coreos/etcd/issues/7512 for more). func TestRestartRemoved(t *testing.T) { defer testutil.AfterTest(t) + capnslog.SetGlobalLogLevel(capnslog.INFO) + defer capnslog.SetGlobalLogLevel(defaultLogLevel) // 1. start single-member cluster c := NewCluster(t, 1) @@ -524,51 +526,6 @@ func clusterMustProgress(t *testing.T, membs []*member) { } } -func TestTransferLeader(t *testing.T) { - defer testutil.AfterTest(t) - - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) - defer clus.Terminate(t) - - oldLeadIdx := clus.WaitLeader(t) - oldLeadID := uint64(clus.Members[oldLeadIdx].s.ID()) - - // ensure followers go through leader transition while learship transfer - idc := make(chan uint64) - for i := range clus.Members { - if oldLeadIdx != i { - go func(m *member) { - idc <- checkLeaderTransition(t, m, oldLeadID) - }(clus.Members[i]) - } - } - - err := clus.Members[oldLeadIdx].s.TransferLeadership() - if err != nil { - t.Fatal(err) - } - - // wait until leader transitions have happened - var newLeadIDs [2]uint64 - for i := range newLeadIDs { - select { - case newLeadIDs[i] = <-idc: - case <-time.After(time.Second): - t.Fatal("timed out waiting for leader transition") - } - } - - // remaining members must agree on the same leader - if newLeadIDs[0] != newLeadIDs[1] { - t.Fatalf("expected same new leader %d == %d", newLeadIDs[0], newLeadIDs[1]) - } - - // new leader must be different than the old leader - if oldLeadID == newLeadIDs[0] { - t.Fatalf("expected old leader %d != new leader %d", oldLeadID, newLeadIDs[0]) - } -} - func TestSpeedyTerminate(t *testing.T) { defer testutil.AfterTest(t) clus := NewClusterV3(t, &ClusterConfig{Size: 3}) diff --git a/github.com/coreos/etcd/integration/embed_test.go b/github.com/coreos/etcd/integration/embed_test.go index 8cba0b3960..751494eaea 100644 --- a/github.com/coreos/etcd/integration/embed_test.go +++ b/github.com/coreos/etcd/integration/embed_test.go @@ -15,13 +15,16 @@ package integration import ( + "context" "fmt" "net/url" "os" "path/filepath" "strings" "testing" + "time" + "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/embed" ) @@ -102,6 +105,47 @@ func TestEmbedEtcd(t *testing.T) { } } +// TestEmbedEtcdGracefulStop ensures embedded server stops +// cutting existing transports. +func TestEmbedEtcdGracefulStop(t *testing.T) { + cfg := embed.NewConfig() + + urls := newEmbedURLs(2) + setupEmbedCfg(cfg, []url.URL{urls[0]}, []url.URL{urls[1]}) + + cfg.Dir = filepath.Join(os.TempDir(), fmt.Sprintf("embed-etcd")) + os.RemoveAll(cfg.Dir) + defer os.RemoveAll(cfg.Dir) + + e, err := embed.StartEtcd(cfg) + if err != nil { + t.Fatal(err) + } + <-e.Server.ReadyNotify() // wait for e.Server to join the cluster + + cli, err := clientv3.New(clientv3.Config{Endpoints: []string{urls[0].String()}}) + if err != nil { + t.Fatal(err) + } + defer cli.Close() + + // open watch connection + cli.Watch(context.Background(), "foo") + + donec := make(chan struct{}) + go func() { + e.Close() + close(donec) + }() + select { + case err := <-e.Err(): + t.Fatal(err) + case <-donec: + case <-time.After(2*time.Second + e.Server.Cfg.ReqTimeout()): + t.Fatalf("took too long to close server") + } +} + func newEmbedURLs(n int) (urls []url.URL) { for i := 0; i < n; i++ { u, _ := url.Parse(fmt.Sprintf("unix://localhost:%d%06d", os.Getpid(), i)) diff --git a/github.com/coreos/etcd/integration/fixtures-expired/README b/github.com/coreos/etcd/integration/fixtures-expired/README new file mode 100644 index 0000000000..3651eb557f --- /dev/null +++ b/github.com/coreos/etcd/integration/fixtures-expired/README @@ -0,0 +1,5 @@ +To generate bad certs + +1. Manually set system time back to past +2. Run ./gencerts.sh + diff --git a/github.com/coreos/etcd/integration/fixtures-expired/etcd-root-ca-key.pem b/github.com/coreos/etcd/integration/fixtures-expired/etcd-root-ca-key.pem new file mode 100644 index 0000000000..449cab35d6 --- /dev/null +++ b/github.com/coreos/etcd/integration/fixtures-expired/etcd-root-ca-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKQIBAAKCAgEAn/3pG4N9sLWucz5yPVmAuPCuh5tvHs2wRWsBnrTM9qqIpCjR +7rNJzZSy3bAMxX+u1JXUK/Nt3lT87zrIkkC4En74avJSxt+cQlSs54sHFsRo/Idl +b/6b/dEeS4bko7xlymzX5WDSZJ9Aj69wNZx73TGsHiZDBnQziyE1lPPs38qYcJtc +kZKGgsTwJ0e1gvBE+k8KdhTSBX1jYPiycOpibajEERa6dMNZHIJRElJAQejgFDzE +VRLCZBddT0kwVx1ttqYCtYDGlqg2Th2J5n1GAddQLffz8/8ZOuJsCYYgA+8LY55f +x7H392msdTWnaLVW3VYE9j5lf69/pJlVThP46kjuwtX9hfEkoLRjXBF98TibnQXQ +E+LVUNv3ezR/W+lntJOxg7Pka+5OjG/S9Kgj/QOA4nAkoPeQ/NXosBX8d/Z8qHi3 +f5YRtmT5NLwAgaSBg2lty8B20o9a63prwhEOmk++ENh8UmexUJ+Amy8lGyB/0fRC +2YFnC5sJJETjDjyrPrWJA8760Eq0TffRYYgcKyJJtioduyPncLxGY7CkT/tsh6oy +IY+RndKOfHdD67yqLZyuwdz/LsaxABELEbbFekE6mlQ/OclZzce5m8+bDZ4W3nRt +S/GygXhWNj6XxKyk8RQNB6p2a5gRIxEAadHuUJd0fFZht+xNlOEuB6n7CPkCAwEA +AQKCAgBILiZ/2jfXhG/64D5r/Tg8t6EV3wMn84ZGGzu03T7nPhK9dQkZVtvCGwcD +SwzIAY3frOT3GzEDMHaYe33HtdkVxyDOJxs/S9zUdB05rRh6pgvzeiZCe6zmuvSf +AHGgiTunMqnIe4EQEmTvLihCl6GuLl3HkF2GyOAEMexZkh7Y7C8QBpehuWhkEPOD +1S9HrpyADS7cDRKflW1Db5AZrzTO4mfqicV/Li7C1Ow8hs0kryqBFtVAyGDZBU18 +mrlrZAR+dbEdL8boa2Vsopj3Wqc952TuCEKQXxOD5Gj3dwJ0o+EQhYASuPD1N0Ct +9JHdhIp2+vrsGURzcbr1iJPa0NnoKk1HHee5LI8PnjOIsy/KaNBM4PWvmP+sWbUC +Ej6JTiyZklHztRCq6EkXhUU2D1PplkqBtAM9DnubkuHvqrPa+BDEI1OZABxJHblA +FvSB5D8bLx7rFZD7H2UvDG+e/Y7STNSo178qY2X6e5GRxoaB+/m9XU/P0+nSA+U1 +QtR00b95WSw6rn3hdgLXf5pxpmCoQqndkQzT8Xx/iY53s8Lr020c84tp6eMp4rsJ +t145eLi+RnJLGDnXeb0I5/sEJE9SUyR7L/AARB0ewgrTsr2Cy2zpDCDh2s6oWTcS +46XqU/yPcEf5NnPC7YLVjF8zWa6qO6VsBadntW6PmUbxQqehXQKCAQEAwdJGyX6w +F8WrUv3nxP1GOAB1z86/6HS/+2znmDAqlIFqMikIcUHsMfRMeBga/M+pvSRajmG3 +MUWIoRZhgyDMMtdMGOqv8bAvaHqR7UlFymeU4m/kIRmJaU81167KKF0eFc4akf6G +bzIbkGkZVAlnLWGBaloYj6vz8NWw6gJkTT+vH+Cz9g0hJ+bnNHuttxLKkDW2Fc87 +Mt8KFI14xK/tJiPktSCfVxjyVj16tn63lLQFKo5bOqsTUSxkFTPtIhGm2YGTk0Dv +/hM9x7GhMPnQ2o0lK6FhCHzAnQkoD5ld8KL5hV3iP7Jg0+H9c9c8e+gHum0n+vxo +WolOFsrd26ocEwKCAQEA01FbcKfz85qCP6336oVhr/Kl4TE2V+kWRbOpg48EVkJ+ +uJrqLoA/OSCYjmmh7ly5fjTpE1Juvhbuo54MoGLxQ954H47Hux+0daoX9hAEcOK5 +AiWINC7Gqi1rzQ3b9Vp2PxbiR6JcDqiy6UoK81uP6N6PgpUuu+EV74asP5SWGx/u +BhScd5QLjjtf77n0Zn5aoNSuHt3JOzjToMl4WCtaP+/t0edkBsfcU/grNs85B/wN +6A5uJR8T52wVWw5xQYbblU92JeDSgfQr1LD5VVOr8hQzVxDoOI8SL2dx68OEMw9v +hzVfHL79sKCLUAJHGqnBC+zLcVCbDctm7EVxgAmQQwKCAQEArO1Xit8lbZBHUzyG +VRNEWyLN+iKUxmmkAEciOn5+/xCYFzjU93fBrLAyqdOYAIenAcI1qWM1dxh61n6J +cd0JUzMUCgcaA6EWKzlwiS0ev3+7Lmx2NbH7D6JEf7LLW0f5V6sTub5FY2Bph2a9 +2mSpUav1M1Y/I7BfbTi7J44Kv4FaVi69YYJFWryA/Cp8yyJQ6GmDk+HZB4JIFB5E +6festqK/o3r/r03qqVcg7UIRuPMEyPtKGgYYrgvVH7W8lPD61ITvjioZ9a5lKI4r +Ku84kEXuLAdH87Kah4Fr5L8JOXGu/nbNLdeQ3Hp9D6WxqTtT6dkKGryovl5S9bL6 +TspvUQKCAQEAxAwJmlWnJMymo++BPolqHLMwI+DlOt/bMuVAkfYgHurn59qJAoUm +ophUEGN9wMczrBvoVG24ohBia1dY/X9tt/pwVU7AjCEY6cTZIAayKAyfeZdaapcu +5njnN0DxXQoFA/j2C2FcqJjoCzkPOcErnO7GE27WAaYMFMFLkl0GebnAuNFsbB/k +LJt3IM/TJzd4WxeVRruaUqAg7l2bkaj+vKyaZY+XpBbNmPV3Gg1cKsU0HaMtmrDf +ZWdH1MdsWU+E7lvfD7spcTkXZOafGwNaVWdaTh84YiiRxXriHMmyHzDl1nm0eNXU +RIZdWOgUEW+F0stn3wPaJg0bun2elBvLQwKCAQBcTaEhnVOJvBxMtM6G6N/rzBLb +yQNKPPmMfCK9+TXFMpfsfYqiST/63wRbYIQ0tjiyx+dXb7VawhovCT7AR5Ct+0zW +iCG9yUNhbFEXUWUbthdrt1Xr3IBw9NCfYHosTjyOHi0eAn1ORFlD6GNzv27zeQHR +nBJwR6/SJOLYNztJLIyQGrK8fBuqaVFf2zaxDwCiPtIRUudbLJPobEyGfszjpvAR +nIe1aqh/ONLjBgwkj/6uLI15IDexqoW5j6KyW+MlAqBmqLecOFnfM7ZKW6VHvZpZ +me+2Zgxulhq9iRyPHcYDhUzIktH6IF4hYITdLS4IbCezcp4LmHgbyDpxu3+J +-----END RSA PRIVATE KEY----- diff --git a/github.com/coreos/etcd/integration/fixtures-expired/etcd-root-ca.pem b/github.com/coreos/etcd/integration/fixtures-expired/etcd-root-ca.pem new file mode 100644 index 0000000000..64cc269b76 --- /dev/null +++ b/github.com/coreos/etcd/integration/fixtures-expired/etcd-root-ca.pem @@ -0,0 +1,34 @@ +-----BEGIN CERTIFICATE----- +MIIF5jCCA86gAwIBAgIUIzbfeuRpE4/TdkmJEYNNOA2VoLgwDQYJKoZIhvcNAQEN +BQAweTEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH +Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl +Y3VyaXR5MRUwEwYDVQQDEwxldGNkLXJvb3QtY2EwHhcNMTcwMTI2MTkxNTAwWhcN +MTcwMTI2MjAxNTAwWjB5MQwwCgYDVQQGEwNVU0ExEzARBgNVBAgTCkNhbGlmb3Ju +aWExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xDTALBgNVBAoTBGV0Y2QxFjAUBgNV +BAsTDWV0Y2QgU2VjdXJpdHkxFTATBgNVBAMTDGV0Y2Qtcm9vdC1jYTCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ/96RuDfbC1rnM+cj1ZgLjwroebbx7N +sEVrAZ60zPaqiKQo0e6zSc2Ust2wDMV/rtSV1Cvzbd5U/O86yJJAuBJ++GryUsbf +nEJUrOeLBxbEaPyHZW/+m/3RHkuG5KO8Zcps1+Vg0mSfQI+vcDWce90xrB4mQwZ0 +M4shNZTz7N/KmHCbXJGShoLE8CdHtYLwRPpPCnYU0gV9Y2D4snDqYm2oxBEWunTD +WRyCURJSQEHo4BQ8xFUSwmQXXU9JMFcdbbamArWAxpaoNk4dieZ9RgHXUC338/P/ +GTribAmGIAPvC2OeX8ex9/dprHU1p2i1Vt1WBPY+ZX+vf6SZVU4T+OpI7sLV/YXx +JKC0Y1wRffE4m50F0BPi1VDb93s0f1vpZ7STsYOz5GvuToxv0vSoI/0DgOJwJKD3 +kPzV6LAV/Hf2fKh4t3+WEbZk+TS8AIGkgYNpbcvAdtKPWut6a8IRDppPvhDYfFJn +sVCfgJsvJRsgf9H0QtmBZwubCSRE4w48qz61iQPO+tBKtE330WGIHCsiSbYqHbsj +53C8RmOwpE/7bIeqMiGPkZ3Sjnx3Q+u8qi2crsHc/y7GsQARCxG2xXpBOppUPznJ +Wc3HuZvPmw2eFt50bUvxsoF4VjY+l8SspPEUDQeqdmuYESMRAGnR7lCXdHxWYbfs +TZThLgep+wj5AgMBAAGjZjBkMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAG +AQH/AgECMB0GA1UdDgQWBBRnbPUmgSmUC3API24MQ5x/Xh08xzAfBgNVHSMEGDAW +gBRnbPUmgSmUC3API24MQ5x/Xh08xzANBgkqhkiG9w0BAQ0FAAOCAgEAFPoCwCcw +ecCkvFTxjJnMI9v+i0VlqgKH5Q8ZAxwsPI+bck5KdUbi7aWTwvlZxM/2WT0NsWGO +hKZhsJnOZsRaEmeKV5TD1Ua2urQSXWztjGDn/+6JR47FYIP57d3+w5wYuwwzy2ne +4oY4OIOmot9Wqgc1D5yOo9D81Udq6DOfb9DeXqa+UuQGoYu1hLQrgUQATxiYsu8T +FNoG7EQihNuIMlBhU/H1rCKtX4aeRXRRl7Rr/p/+AYqNUblnjwowvBGyYEfzO9ag +ixO+li3SbpD4SfZwX1T3SQukoOq2iSCnrWDdP9yvx04X8oPxhbAncjxASDfy4l2S +vhaks6L10qZkLjWNGA65UVDPgzAWTi/7XCZZ37bP2poLbg+/VbKVvN4PII81NB54 +Ew9mkS9NwcjWQvjkhVPVGtk/fiYtkl5yrrWswJMW/fQJvipveMZbEW0jLVx28f7n +t+hvaKMy1QBr1HG3bVtty/izDVTsHJLbki07NRNkJM8M7zv960/rL8SK4J300Zm1 +DjxeyipcX1IGnIeBzNT2ASu1cD40T+qwG7hYtSCpGAkBVq4ZnFSGb3yICv5TvUE4 +WItEf4eaV/dK0f7yu02u+TS22LiFiWU1d1/wL8HX9n8utS2w3g/YXy8GNWahcjiM +AlehNnzoyVafYDVvMKNHBfJuaxa5qTQrctY= +-----END CERTIFICATE----- diff --git a/github.com/coreos/etcd/integration/fixtures-expired/gencerts.sh b/github.com/coreos/etcd/integration/fixtures-expired/gencerts.sh new file mode 100755 index 0000000000..3722b75cea --- /dev/null +++ b/github.com/coreos/etcd/integration/fixtures-expired/gencerts.sh @@ -0,0 +1,91 @@ +#!/usr/bin/env bash +set -e + +if ! [[ "$0" =~ "./gencerts.sh" ]]; then + echo "must be run from 'fixtures-expired'" + exit 255 +fi + +if which cfssl >/dev/null; then + echo "cfssl is installed; generating certs" +else + echo "cfssl is not installed; exiting" + exit 255 +fi + +cat > ./etcd-root-ca-csr.json < ./etcd-gencert.json < ./server-ca-csr.json </dev/null; then + openssl x509 -in ./etcd-root-ca.pem -text -noout + openssl x509 -in ./server.pem -text -noout +fi diff --git a/github.com/coreos/etcd/integration/fixtures-expired/server-key.pem b/github.com/coreos/etcd/integration/fixtures-expired/server-key.pem new file mode 100644 index 0000000000..a1a9f1eb72 --- /dev/null +++ b/github.com/coreos/etcd/integration/fixtures-expired/server-key.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEArjJG7BkDXGJ7IJeUbt9ilXZl/SqKVYnQQcbAVqKKsZOUTnWl +jYgslXIVDJAYUCZ2WNzXrHq6XOY4qbfNjDIdq28hlq+EydMOU9T/1WrFASbiJdrC +mFH1XUX9SImDw9qDit2S4bxW5gs+Q6cSRSGyHf+6/DD24NY2VP3qpjwaEByZGZ7K +769zRBhoI2sWslilztQKuiVdeTRNqMtCD/Qa4JIiwyzmYOwKEd+kitrsPwUQg/11 +RNbUHZEy51GCzeZ467DcjnXiGHsAF6ZMznDvPY8GU71JYkBN1Tq8IQVe9yemqk6S +UXYoJJnebX3WMvCZ+XLKYWObANQpXxHsu6t68Hkcg5bDTTbg79JAS8co7wf1S3NA +3QudhrTf3anvdbtaZFukusQbVnH7qSp5LFTle+Vl7Megy0/juCBL9/xMSpWqUwUl +7evfrm/JkVmCoGm2+56uCyrVK9uRRraqp7J19RuNmhunOiURN610DjsisI1eqAHS +naDtMluwBc+HEaJWYzJN1JLvGA3ahOyOcv94FZ1msabq20txvp5oHLd7j9TaVGob +qSbDGLZGHrm5If/x2+1Dc9O55Om/82Q8r1Qxt+GqVX+TFed7AvBHFTYbBiW5gT8M +SqMb91t+MYmC9ChFbWepC35I6zoHX1B1CgV20lr5PH1wYO+k0IdO6ufZYBcCAwEA +AQKCAgAMledX4YrDnv3kYe9Af3VA9TfzLaKnAXkBd5mn6MB6if4aGRfn/OGzvnVU +3ghTqiO80d/nP0q9sYiAgp4gNfK80x+rIm1Go7ASUH5Xbgpjxepq775FgQ5oOclN +91mEygHdA5s8If5pSoCqJKUGR6P11Ocul18O6YstYtcUQZ1kcpyBJF7nKFb8oYLM +pE0Uf6EjK2DHCDITsrq1qlHQk0Np3EUUsubGM+eaWP0rZxvQhc4mqyZQ3fCfXkE+ +Qz5fH/q2lKWqyUuXlzNvgf1koPY4DWBYpoFpztmQwVicTiYJV10MSvb5Wb8WveM7 +J+9U6NtHEYsbtDWbvrhqfQIMoRwVqxryUj1h/GN95oZ80pFkhcLfBeu045Fyc7Aa +gZT/ugC2Jov/+1uxtLe9ZsZeY+MVBuLrUoG5+Q+Tink+uJ3KYn2TltpdiYmSZ7lY +s/SnUBGbmSJjpXsbqbcimnsZLX/T8X53UwHRG5eWmGhJBU60kATsFqZmvkYgI3wc +yenDQaIx93fwsBWEBn/Ms1XHaYvVIpeQ4eRboIzkNq0Aefyat6MIQPj5tf2Hlb93 +bRNxoJaX6oiOtCrqfIdqk688pgjTwV5r4z4R+K4FbVRx0VbU0Dfsa4rVsRFidMdg +9s+xvS2wYePkjP5m5q574oStRKYuJsaPDJeXLI5XfzKnLa/eUQKCAQEA5bjIhflh +P/3yL/EEC4Pr9t655mi0qyxNegllfkuhEEdH6u+ygj4MtJikVUatL5XHylo0KGM1 +asdJLlOwFYGsOKLk0Vj2BU1b9PljRYyS+8sZvOQOhDbxkl7zrvaezYPbsVd1IX0v +Q6fvOh9N5H127LkspHC+G03g6X+nY/+5b5CxaUHke6Cd+fjxiZnAbyzmhgIHohfC +7HCzmKfyJPIJgVpDAHaEWIpAVYU5qRwgrWWjx2WftFXF7NaI5KIoMgbqWCLp1A/O +eaO3CB/uRn3l4yjBy+paxpYzkS0LtfHMDk/0tPn8/AFt9L6Pjv+KXYhvpsJAqknQ +p0RmFEuEh+AQKQKCAQEAwh96H3TdUzRl0TewWL9IbKdeF1IV7PC1dwljevsTYlU5 +kUztebtHA6XWXNwlKr8VQ1PVTsncB4VlX3bIlbg8fVy90Uu7FQ/dhSczyuZANjz/ +ocazZi9wk8OG5k6Sgz/EdevsIPnBfdbPPGgxFkSr1CUEOkRwtI7p/F3gjeJviH7j +7BYKJje1mln3/r8h3esgeILOmK4/oKpjjRCbhAsU4j8kLFKMgUYdPgpqFzdgzOLO +EIoDqTl7anQkbJUoSbenoHF0xWmWG5uaOo4/ORuTr+ZoN4nkZ6D8R6ZQlPpIw4oD +WDeoAcSoFgKoczIlBk4lcz5mbwp2UsiLBYXJFH72PwKCAQEAzBd0R9r8dK74KXG2 +h0iILodIoBTsVpuApeiNPDyS9vRtR6P3c1EPVq+6aGznVrx6iSPE7RDfF2PAd6Ew +cpsHWDYYlomz4ZgOF8ItWVAAEiYqUrBG2V47FzC2zP4crjf0ykUKMluWz0P2/Sts +t5BkRQJrUBk+POHe7XRVUjmTFTR2+i3pgZB8aearKPXpipxYnjxVbcQwkIG8febP +8dT7bumzV0j0YflKGPDI/p6XxZXkgTWfQsdllfowGviaP3/3WaCDH71/UoCKD3TW +69fUkxHVw4YNahtt6xAbNGWDRj/xB4yGH5phhyx6PLB5zIl3sK8qZmA4OTNCgctq +DpGZqQKCAQBgc+lnBdcOh4Nrj+MERY7Dxek/Zx7Tysovai/OpD/+ZOAkrPd1u7LO +QjEflJa3BZiYCmh7LFsyNXqoE0oY8iDEHTeHbbx3+5kSlubqErum92oAxMzQohOq +p8U4W6P6qM2B1gZOYCpez0PK/O4e5WIHF5lhJi5l2Hi0VyTC+tZ2GK5A2LaURKvs +FHXfUrKOJEzO9BeYz0N4HhE2vyC2XBc1TzA3AZEkjmTrNZt/C5oCU1MV7q1hANms +jCao+Pe6oREd7CGcERlvgEIChDkvs98O0EnKBq7BOsD/DMkPLMjIt6Nvyr+kmUT3 +Irz1991jo6KB/2hAFg+ylEhXJyFBGNBbAoIBAFQhMh25emwXX/L0lEqoo1miDl2U +IYUFLl8sasRyZp7PmGuUSyKLMZwJesPvcXb4OL4h4Q+2Esx4nFhTkHjoo22AJWRK +ivLiDZHEVN5DKFCfaNoNCMeLi07syLRWl28K5O924lVfsEwISOd5VjuFynNHn5Tu +pE/VkfwUtY1owak3k737Yum1bBmUHyP6kJyUGQW0E9yhTcau1OnhU8XSvO+6lClK +wOg3RsP3LF3gslrRVgc+R95KOva7Oc2EuJDqoHJ8877+r68cHdJYe3mmb1pPNqC1 +It+c6mphFAT6frmzkew72FEFzaiSx/Iqiwz4LqoMEnVYN8eVp7hehyGbb8o= +-----END RSA PRIVATE KEY----- diff --git a/github.com/coreos/etcd/integration/fixtures-expired/server.pem b/github.com/coreos/etcd/integration/fixtures-expired/server.pem new file mode 100644 index 0000000000..0e4a0ab8bb --- /dev/null +++ b/github.com/coreos/etcd/integration/fixtures-expired/server.pem @@ -0,0 +1,35 @@ +-----BEGIN CERTIFICATE----- +MIIGHDCCBASgAwIBAgIUcGlr9BUSOAwUt3SDhav9yWokZDowDQYJKoZIhvcNAQEN +BQAweTEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH +Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl +Y3VyaXR5MRUwEwYDVQQDEwxldGNkLXJvb3QtY2EwHhcNMTcwMTI2MTkxNTAwWhcN +MTcwMTI2MjAxNTAwWjB4MQwwCgYDVQQGEwNVU0ExEzARBgNVBAgTCkNhbGlmb3Ju +aWExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xDTALBgNVBAoTBGV0Y2QxFjAUBgNV +BAsTDWV0Y2QgU2VjdXJpdHkxFDASBgNVBAMTC2V4YW1wbGUuY29tMIICIjANBgkq +hkiG9w0BAQEFAAOCAg8AMIICCgKCAgEArjJG7BkDXGJ7IJeUbt9ilXZl/SqKVYnQ +QcbAVqKKsZOUTnWljYgslXIVDJAYUCZ2WNzXrHq6XOY4qbfNjDIdq28hlq+EydMO +U9T/1WrFASbiJdrCmFH1XUX9SImDw9qDit2S4bxW5gs+Q6cSRSGyHf+6/DD24NY2 +VP3qpjwaEByZGZ7K769zRBhoI2sWslilztQKuiVdeTRNqMtCD/Qa4JIiwyzmYOwK +Ed+kitrsPwUQg/11RNbUHZEy51GCzeZ467DcjnXiGHsAF6ZMznDvPY8GU71JYkBN +1Tq8IQVe9yemqk6SUXYoJJnebX3WMvCZ+XLKYWObANQpXxHsu6t68Hkcg5bDTTbg +79JAS8co7wf1S3NA3QudhrTf3anvdbtaZFukusQbVnH7qSp5LFTle+Vl7Megy0/j +uCBL9/xMSpWqUwUl7evfrm/JkVmCoGm2+56uCyrVK9uRRraqp7J19RuNmhunOiUR +N610DjsisI1eqAHSnaDtMluwBc+HEaJWYzJN1JLvGA3ahOyOcv94FZ1msabq20tx +vp5oHLd7j9TaVGobqSbDGLZGHrm5If/x2+1Dc9O55Om/82Q8r1Qxt+GqVX+TFed7 +AvBHFTYbBiW5gT8MSqMb91t+MYmC9ChFbWepC35I6zoHX1B1CgV20lr5PH1wYO+k +0IdO6ufZYBcCAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYI +KwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwHQYDVR0OBBYEFE7MTPuM +DNH+edtzjnjB+8Tuwx62MB8GA1UdIwQYMBaAFGds9SaBKZQLcA8jbgxDnH9eHTzH +MBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQ0FAAOCAgEA +S1Kx/+L5UNAjvXoDWAvFJMIpQcsFhV6vj/sbwxgwXTKjQHOOehEwaaEW735EDmhC +4CLgyM94i7eFEGwAVwWpnh6XAfqCMGd32T5aRPktiGqnQ+aAVdC/fgmWWKqA7ix5 +Bsjg9WbuBZvI1tAIscq7ajeHFBb/mndgP2kRJf8Rd7NH3VsmLHlK6KKwe/ThKvwZ +IRTfN7ABWzKq/MmGUOWuBiQaLM7DT05m3ISpN3YCHJL4HRjLz6WZ9vP3GLDcrC8H +a7TPizjB3/+y++htnDBhVAAVl4GgolRZzjkzERxDZlvyY7T8sfq9a+9GGHgRXB8v +9wWOYph2r8K1aPaVPw88cri9l993g+vWgKhEse+JoiHgcyCp2VjnM6cpMhCPktBp +YBZ/jBma5EQoLIdBFmDcH/tVs6l6o/9J3q2x+fPZYZkvyuUbxb+TdRZllCqx1myy +YxCGTLdjWEHQbdcVc8totLPgJik2LjFoPAvYgrqO0o3vTz1oagLbwie4D2uK9Ats +pu4KxGCsDtzyf/w9sBZti/ovIgttB7IxeFWZYIWVRCkJkre9rm8qmaCmMY2FvBDY +nBSTldaLpHAryjleyu/WYdqW8Qc+EqIPCzCvJkrKfhZEN7AT7vFwmvnOjJetFdEL +UNJ3wyITBZtiMRAInMkRi3zFeHTVqaockL/FoplkY4Q= +-----END CERTIFICATE----- diff --git a/github.com/coreos/etcd/integration/fixtures/ca-csr.json b/github.com/coreos/etcd/integration/fixtures/ca-csr.json new file mode 100644 index 0000000000..a25404ba46 --- /dev/null +++ b/github.com/coreos/etcd/integration/fixtures/ca-csr.json @@ -0,0 +1,19 @@ +{ + "key": { + "algo": "rsa", + "size": 4096 + }, + "names": [ + { + "O": "etcd", + "OU": "etcd Security", + "L": "San Francisco", + "ST": "California", + "C": "USA" + } + ], + "CN": "ca", + "ca": { + "expiry": "87600h" + } +} diff --git a/github.com/coreos/etcd/integration/fixtures/ca.crt b/github.com/coreos/etcd/integration/fixtures/ca.crt index 8470f6bf93..3ccaadbfe6 100644 --- a/github.com/coreos/etcd/integration/fixtures/ca.crt +++ b/github.com/coreos/etcd/integration/fixtures/ca.crt @@ -1,23 +1,33 @@ -----BEGIN CERTIFICATE----- -MIID2zCCAsOgAwIBAgIUZXdXtcOe421Geq9VjM35+SRJUS8wDQYJKoZIhvcNAQEL -BQAwdTEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMRAwDgYDVQQKEwdldGNkLWNhMQswCQYDVQQLEwJDQTEZ -MBcGA1UEAxMQQXV0b2dlbmVyYXRlZCBDQTAeFw0xNjA3MDUxOTQ1MDBaFw0yMTA3 -MDQxOTQ1MDBaMHUxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEW -MBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEQMA4GA1UEChMHZXRjZC1jYTELMAkGA1UE -CxMCQ0ExGTAXBgNVBAMTEEF1dG9nZW5lcmF0ZWQgQ0EwggEiMA0GCSqGSIb3DQEB -AQUAA4IBDwAwggEKAoIBAQDBMoRjH0ULs+0cRZWZ8BGJ7Fmf152J9uUE3/NgYV3M -4Ntu6l3IYALXT5QSHQZIz5425HP6827mwAOZ/bk6E3yzq6XR/vHzxPFLzBMzFuq/ -elQA4nb7eYHICriEFUdJo2EUg3lSD3m6Deof/NjPMgUHtuvhn1OJMezaALZiMZ0K -9B9/1ktW4Roi6FMVFfJM5rKr9EIz6P2mFUpVHI7KSGbeuHiTPq0FLVv7wFPxRFX5 -Ygd/nF6bbSsE2LAx/JdY1j0LQi0WUcA/HaWYVOpFSKohO6FmshP5bX0o//wWSkg2 -8CSbtqvSxRF/Ril7raZlX713AAZVn8+B83tpjFqOLH+7AgMBAAGjYzBhMA4GA1Ud -DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSlyMYprKNDkzyP -gGA5cYnEEe9Y8DAfBgNVHSMEGDAWgBSlyMYprKNDkzyPgGA5cYnEEe9Y8DANBgkq -hkiG9w0BAQsFAAOCAQEAjjZkuoBl6meveg1frQuUhWtgtN/g9JqIjhEQ7tr4H46/ -cHz3ngCuJh/GKSt7MTqafP99kqtm1GBs7BcoFKwsNFxNOo/a2MV2oYe2T5ol5U6/ -RnmPv7yXzV1WlSC2IxFdtKEIfM859TFrWFN+NyH7yyYzjx+CzFdu6SHMwrQkETKr -R/PJrb0pV+gbeFpe/VfVyT7tFSxRTkSqwvMFNjQmbSLSiIFDNdZmPBmnWk418zoP -lkUESi3OQc4Eh/yQuldDXKl7L8+Ar8DddAu4nsni9EAJWi1u5wPPaLd+3s5USr1f -zFC3tb8o+WfNf+VSxWWPWyZXlcnB2glT+TWW40Ng1w== +MIIFrjCCA5agAwIBAgIUXWXsuLEZuHtKgeQSIVthb14+9EQwDQYJKoZIhvcNAQEN +BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH +Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl +Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzA3MjAyMjA1MDBaFw0yNzA3MTgyMjA1 +MDBaMG8xDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE +BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT +ZWN1cml0eTELMAkGA1UEAxMCY2EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCmtwjSg7gQBcVaoMycpePT0qoM0SKJVuvQRXIjL53/Bae5zuWiBdDVTElf +6OOFkjqPAxU7t28jmn/EqNcKkaVuFcFtVbgyD+vXWQITGSGfE1hmqVUcpbSpzLim +UIFNy6slMeUdFGiLG7/4P6mCHePgoW9r1+J2oAHSooCzJDqLNAGkgHhFQPhBC62G +3QrY2gwKlJ6Yl+2Ilb+bdT4PJq8sSlyAynPFTp07hnciEG6Ef6IQxc9pZb+UCa2A +Cyn9RU83AWj/aIcdlB8iNf86np4wFe8VEkgBdih91vfEzvoMhJZYBb0b0CnrRo1e +jVXAJkqTbajQM+yxlvlhB2PNCZusJa69eDCtnnO29MbTjOTqElTxlvU9c3huZycc +VMDgzyzm87F+Me3vh/6l6VC4Pm0zkA3XdwydncxreFoD/G+fQK2m6wXWzIsSGwqG +gzgAq8neJFfkcgzRu6WU1S8S/idqK9AoQAFIEPXYyIk3+K6JzHxhYZIBFE3OrZ58 +oEo2PCP4snzTysZk7eWCe/WTZvReKtytzKAIS/CcjxsmgaviHee5tlV/rIghAxq8 +QFnldJ1J9AtqPriRv0+EDFwOL8eyA+cVbWgX9UR0gWLe5lUqooowpq2ioWHG5F1m +cyi0u8cUtf5YZN6SVktQUdddsOCFfxvCU1NigxVxqs1ZWhSSrwIDAQABo0IwQDAO +BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUp//gP4sC +l0dWZOXJWaPOYED2YdQwDQYJKoZIhvcNAQENBQADggIBABXyQox/T4kD+sLuTAp9 +IP6Hr/XaHmKj3Zkrp8DdWt62R13ugCdWA8hu2yYzu92mSHBGbssaSaLzsNeb+LqE +/gSNQBvbfV0btQN2h+B3+BmEUuiv4ZTMPNArGfG7L1p35kH0NL46Bcssu59XSFLe +RIc7M5yT/C5+f/muhIxsAT6AdnwwkcxjQvQj9257S1gonOjLmmsVXW+Z+G9Y3YIf +hp84yvrJh86QVGsDC5Cu5i9kC/0CodCouIlBjWdELZDWV5KvbLAuWoQ5Jp1Y6+Jo +Dhx+2HB9mKmDWJfS8rWd//EiX/JH8iSMSaltmrzk6PYlWFAuM8jycDyyQI4mCe6J +wPMRyism7cowcGqHb+Nn2OiPvJtX6bGcVb8DbaGDmfgPdACqjdguzLHnaFyLmDe/ +la0y1FAfW7jOyQrXEzqB4tJ8ZhI+HxRiXAh8ahBcKnMQFpjsEse03d2t65ZPDgev +NjIcoqhbANpYXdygux4hJNCT8KB194frC+eK0XqyO8BJYvid1Qp7SlnpFdEo1vMK +whLje6QkrgIyqoTP1+SiB3R79rtg+41bTb8paPJs9AqNaxS/l2bSnWnRvdkiJv89 +YWgQGNO21XW+VbNV7Z0tMglmTvJc0ubbV5zZpVsuSOAQjdRXKieAxWAePrzDx5AM +ZiQgL5b9icqHm0aV7bcfp8H+ -----END CERTIFICATE----- diff --git a/github.com/coreos/etcd/integration/fixtures/gencert.json b/github.com/coreos/etcd/integration/fixtures/gencert.json new file mode 100644 index 0000000000..09b67267bb --- /dev/null +++ b/github.com/coreos/etcd/integration/fixtures/gencert.json @@ -0,0 +1,13 @@ +{ + "signing": { + "default": { + "usages": [ + "signing", + "key encipherment", + "server auth", + "client auth" + ], + "expiry": "87600h" + } + } +} diff --git a/github.com/coreos/etcd/integration/fixtures/gencerts.sh b/github.com/coreos/etcd/integration/fixtures/gencerts.sh new file mode 100755 index 0000000000..912e04903e --- /dev/null +++ b/github.com/coreos/etcd/integration/fixtures/gencerts.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +if ! [[ "$0" =~ "./gencerts.sh" ]]; then + echo "must be run from 'fixtures'" + exit 255 +fi + +if ! which cfssl; then + echo "cfssl is not installed" + exit 255 +fi + +cfssl gencert --initca=true ./ca-csr.json | cfssljson --bare ./ca +mv ca.pem ca.crt +openssl x509 -in ca.crt -noout -text + +# generate DNS: localhost, IP: 127.0.0.1, CN: example.com certificates +cfssl gencert \ + --ca ./ca.crt \ + --ca-key ./ca-key.pem \ + --config ./gencert.json \ + ./server-ca-csr.json | cfssljson --bare ./server +mv server.pem server.crt +mv server-key.pem server.key.insecure + +# generate revoked certificates and crl +cfssl gencert --ca ./ca.crt \ + --ca-key ./ca-key.pem \ + --config ./gencert.json \ + ./server-ca-csr.json 2>revoked.stderr | cfssljson --bare ./server-revoked +mv server-revoked.pem server-revoked.crt +mv server-revoked-key.pem server-revoked.key.insecure +grep serial revoked.stderr | awk ' { print $9 } ' >revoke.txt +cfssl gencrl revoke.txt ca.crt ca-key.pem | base64 -d >revoke.crl + +# generate wildcard certificates DNS: *.etcd.local +cfssl gencert \ + --ca ./ca.crt \ + --ca-key ./ca-key.pem \ + --config ./gencert.json \ + ./server-ca-csr-wildcard.json | cfssljson --bare ./server-wildcard +mv server-wildcard.pem server-wildcard.crt +mv server-wildcard-key.pem server-wildcard.key.insecure + + +rm -f *.csr *.pem *.stderr *.txt diff --git a/github.com/coreos/etcd/integration/fixtures/revoke.crl b/github.com/coreos/etcd/integration/fixtures/revoke.crl new file mode 100644 index 0000000000..dd378e0926 Binary files /dev/null and b/github.com/coreos/etcd/integration/fixtures/revoke.crl differ diff --git a/github.com/coreos/etcd/integration/fixtures/server-ca-csr.json b/github.com/coreos/etcd/integration/fixtures/server-ca-csr.json new file mode 100644 index 0000000000..b89066568f --- /dev/null +++ b/github.com/coreos/etcd/integration/fixtures/server-ca-csr.json @@ -0,0 +1,20 @@ +{ + "key": { + "algo": "rsa", + "size": 4096 + }, + "names": [ + { + "O": "etcd", + "OU": "etcd Security", + "L": "San Francisco", + "ST": "California", + "C": "USA" + } + ], + "CN": "example.com", + "hosts": [ + "127.0.0.1", + "localhost" + ] +} diff --git a/github.com/coreos/etcd/integration/fixtures/server-revoked.crt b/github.com/coreos/etcd/integration/fixtures/server-revoked.crt new file mode 100644 index 0000000000..92997edf14 --- /dev/null +++ b/github.com/coreos/etcd/integration/fixtures/server-revoked.crt @@ -0,0 +1,35 @@ +-----BEGIN CERTIFICATE----- +MIIGEjCCA/qgAwIBAgIUBmQ4fvS9/9znydzkBFJ6EwYeoC0wDQYJKoZIhvcNAQEN +BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH +Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl +Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzA3MjAyMjA1MDBaFw0yNzA3MTgyMjA1 +MDBaMHgxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE +BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT +ZWN1cml0eTEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDKaPgCc+vw1m8qNuPp4ujMhd0ZomWb4Ev5ik3wqp0M0ebK +fVXFwKhLwsmkA8YRafVsjZsyDeS7rRPm1tyAXonTjIXeQfYNEmS9SlqV3zSlNJk4 +lRGJSAEnAJwqpH4fAAxAGzvyHL8o7Pu+Rg2yMXEaWb0niuafa5s0oRQln25DGMWq +dwdzVeiRb8MkUhlyWrmEKe5gr/lolxAxwXh7pVXqyqqzisnP+UETY/kmwNO5feGS +Ox195tyWsYNFq142gWJlfI+atrslBW1qOD+zam+niVkvSwATLQqrqF6HmfBRLIUO +xzvzWarnXskV5PiF7WVZuYZgw7Ez7JYxqo713xcGzB6IygMjjXpirLuhIon2E8aq +jtsOJHuEiJvfMywYbAd3ryPDU040iKHxHLsiQXMjycv+AjK/Z6JWBRk9OM06v8ua +/XeyBdChiXrRPwwSfPvkggyWo07ZYzuWCYZgxke5Bt7LsJ/uIBmX6gjGzW/mxNuA +X+c9FLKtMsqTRjHdU5Y1GkgGx9ZOWKXnDICPosz5fYqiGKKAy0sS2/4+/TFRssvY +Ef7KWjYQoMHJpWzhpLPmimp69XCTIdVh6JbkNxQlebiMS/NRD4qyCnZRvVvRhetp +9wlzDf+T/aUmBMdd6ins05mLjol/TCDb3IOhoMkzjwPKXzl89FAGhHrEXTFexQID +AQABo4GcMIGZMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYI +KwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUO3LyYm5pTwmuadST/Y3N +d78VIAswHwYDVR0jBBgwFoAUp//gP4sCl0dWZOXJWaPOYED2YdQwGgYDVR0RBBMw +EYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBDQUAA4ICAQA37eO0sz0gXZcn +rEp/jLoCpD6PF96tMYtWztqhoCBj3IlzkSHBiZG3o8Jm08ldbgyOe4xCKrLgTPM5 +zDCAXcTN1JpV4NPBzk+Oeyzl7Hayg8WftftoAyxzZqYZA2JAdQrjuW8nC18eKZ5P +05t+lQiXgOI0DaYpfJabJ2AFr8kqsZKW/gw1kvdhhar72Ar2rJwm+h6XWpWghGzQ +CWgr+q+FbWCqCtJ+MvwVe9qxwc8vwG/YxPeumNI2sC4pGIx6AxnNvvTtEVndQdA1 +AG2HCjDm/6hbTre+4ps4orFfgwkavSxT4SPJYsLloD914oJ8ekuPKoMqgqF4jQRp +IiuwXZ5dhDJu5qAL/LcgG99j8beyNhpXbsiO7iWgskUHPOjFzZEfnV1K7g2yu0zS +Ym4zRKKIWWePn2Tvnu14aIC3pVaaYGL13+0UCbI1Dhm5qyJ+I7MNQte/bMVKEdfG +Xr+fL7VQL0MH58cNJoPdUBPmmiDTR8ZH73iuFA+6YpTzOtoDi1mWAu/PHdFDiR3o +hqTzUBEisfWsvj9Dd2las+glsCsmhCon00kuxau1zyvqxZrVXA82rdy981E09NOu +kSagZkOb60q/il1BCKYVXlZ5Mn9IUMQur7y8Tg2NPY66BXs0neTS/RcrMKtOdTdM +hE/fY15ykrUxtAio49yuhfQm5SxSQA== +-----END CERTIFICATE----- diff --git a/github.com/coreos/etcd/integration/fixtures/server-revoked.key.insecure b/github.com/coreos/etcd/integration/fixtures/server-revoked.key.insecure new file mode 100644 index 0000000000..8bf96aae92 --- /dev/null +++ b/github.com/coreos/etcd/integration/fixtures/server-revoked.key.insecure @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKQIBAAKCAgEAymj4AnPr8NZvKjbj6eLozIXdGaJlm+BL+YpN8KqdDNHmyn1V +xcCoS8LJpAPGEWn1bI2bMg3ku60T5tbcgF6J04yF3kH2DRJkvUpald80pTSZOJUR +iUgBJwCcKqR+HwAMQBs78hy/KOz7vkYNsjFxGlm9J4rmn2ubNKEUJZ9uQxjFqncH +c1XokW/DJFIZclq5hCnuYK/5aJcQMcF4e6VV6sqqs4rJz/lBE2P5JsDTuX3hkjsd +febclrGDRateNoFiZXyPmra7JQVtajg/s2pvp4lZL0sAEy0Kq6heh5nwUSyFDsc7 +81mq517JFeT4he1lWbmGYMOxM+yWMaqO9d8XBsweiMoDI416Yqy7oSKJ9hPGqo7b +DiR7hIib3zMsGGwHd68jw1NONIih8Ry7IkFzI8nL/gIyv2eiVgUZPTjNOr/Lmv13 +sgXQoYl60T8MEnz75IIMlqNO2WM7lgmGYMZHuQbey7Cf7iAZl+oIxs1v5sTbgF/n +PRSyrTLKk0Yx3VOWNRpIBsfWTlil5wyAj6LM+X2KohiigMtLEtv+Pv0xUbLL2BH+ +ylo2EKDByaVs4aSz5opqevVwkyHVYeiW5DcUJXm4jEvzUQ+Ksgp2Ub1b0YXrafcJ +cw3/k/2lJgTHXeop7NOZi46Jf0wg29yDoaDJM48Dyl85fPRQBoR6xF0xXsUCAwEA +AQKCAgA3rEubERtycOi+qb5ilIEH0EISTPK5vyXmiz4I1kTAQ/PA+lxfOjCQNhGU +RV1zaLuSkhh/2gZyAJcaxTp9LIOoZlxj16y/x7Fhx1PHKU+nqSIDyCy8n8uBWM+b +gwcVq8Oy1krUu0dxEE3l9grKWoMLhmdauv/YFZbpEO0jtAh1+BhWXMr11ElVx3Hb +SaGqLH4edhIVEhH9zJ8tsFNdXwqUvA3buG7t/1cA6FydZihWOuOSfyQLfzZpVIYQ +4aPWRhw0YeB145DyC94eez46MSpo4IRhV7W7kIYA1Ry7G4JYMXCfsfkxZBZ38UBJ +/2LEI5ne5gKqFulkqMxe+NS9mBES1c5s2EqGdIsRAyOHlh7XvE+aHoiPSNqXGC4S +h1x5HIakc8AITtCkoJ1WF5Ir01vc2XFQzZW+yJtXoO2GmZjJUO0NowNkpnCJPIaL +ri3q5Yx0O/1ZqctS4J1+/U6FG3ccdZvgui+XfX6x7Nf/l5JSuKIYNiHoPmQXfFcn +yfw0e2xR84IXkXnXy8pOHfFelQkAzNIoH+wcH0hQrECD+R8XDIrv5i7ys2IRsJuG +/PLJ0I2adYjMgn2s4Z8upMJcHDzn2wbIhzxG1LY7yVvyr233ScuaJE8+LpVwQvU9 +fDjfJ+wXmGm2R+xfwXUAOtnvMpKOYOrVBDwT3Cp4V1b8NUaYwQKCAQEAz7hZ9kCk +wSDOE/LEdzf/OSIKgb9UVAl9af3F1n02U06PUHrlhB9qxhOi5z+Hqox1cFkHlVvl +1P6S6VGLAEnyMaD7e2zhVx0ByOSZAjMGaEve3ataocCWJfOM16WrC7r65gptjdZZ +FabzUBAW1YB7kadrdVg767c/xf9fAQ7bEYuDoJB4ER3o3h6aMZ1eeL2tiKrgIn8m +ql9WHX4X0dfF94V2bIaHnqTpTmmDMYE4tFZarfG4+e21YvnREd1PhlGMrsfGhYFS +DLYJw1gtJKM9ht4g1A0dggBt6GW7ziOvA23HGeIAEqLO+zT5e4WvxBV95x/OjWrs +v7YcipW8Kndd3QKCAQEA+XSoz0fsiKvfGJeg6dR64pr9hmGZz9nJRkCOejVd3U5L +uCbkPq5bFRjDGsBgih+9lWDApQq1lKeQmnin+sXDSmQEtVpDqW4gHuOk++xf6CkG +D9X6NR9PMrXDkRCJDONfyk7WppMjLzpFOrUcBW2VI2ZJdZd2oeQGV8QhQIHQEw0W +iYmAEFAjRoU6Y6g/XDngMkwL5PqmOuAXrw+HvwEJ0cyNisl0F9a6C0pkRQeZGWNi +EEpCvsg1NasTzsk/Rd0YjWGY5cNJKIkKVQoIX1Y8O644aIam3klpZLpA3Z41zmdl +/saIMcJHtW4s2s/YZ8wJrjxY/vsvCK9SrIFB2nc6CQKCAQEArCyQVO9MINDCQAKw +GmmN4Zt3vggLLAUZsSsqOxlaWD77x6e2aodoB3rrQmaEWzdFeLQy9vhPTvccasyu +PLUOTVi0Wp/rQDvI6O2ibhJBM14AAxkvbHenfRmdGno1humbYeYu3KxY9vuFHk3X +v2xaAGcXSRKzyDQCZmnAI65eovJTTlmxS+7QwJv5DzrzvXzrRCbu1WkzcXDIfnWv +5L3HV92GRXpVG4hx4g7PMc98Yu5ZB2ke+/quqLWNOBOLATjDNXRd9vc0PVj+Sq7h +7EZqj0m7SEj+tm0IrOL2gm1Nebgamjeb+9Z7XfmQ+XPcNtnhnBvfJ5UDW/zlN3HZ +oBGE+QKCAQBAjjAhdDuCIvhZJOQ/nv0uJ230mM11PKcZxxsYBTeFTf3KakYm8ngf +vYjAI4jYNd6aCa/RBjR3g+WoFBFklEk1tyyAwhtAfX9SfxbzGvi22+b1sipOFQwp +02AI6n6NF0py0HQ8J1ezoSDJUJUv0mwF/TKFe+z3eEsr61Wvm+h9BaYPccXycsqu +NUwm/iNnepLKcWOinjrmgZefdiVrCJnB8W2vvPKOOMNsqJPKSW0VZOK71HvxY15h +xSQbH3mAWvc/n0IyM5d7JfvGhhIkmex8hVmTs0T7wYPEzW5767WA9MEwbbBRMDXe +feSdu6wFMIhQzs05L0e8t6JtggXIw5IBAoIBAQCtyxIDHmoxKQeomXvdsCSL45F2 +caA8b8THXTD0IDLPq1Z8J568lE86meVmyogLktyWRouatE4ZAg/ehOZdJwcTBGki +ZcQ1oycveFzH8cbVrS1x1zMXRcWh4ERkm28M9xTfOFBJRhcCADUOB6RHlz5q7iRj +5wQMd4OEtrXHnUaewelCNmfTUA+Z2JBA+wGUXEPpg9tD5NMzYMtYKYRCljUEyLTQ +oieAAiPzZI9UmolaHufW5+9s0kyGqGU69ORk8i/Fy27g/Ca83A6i9BsNiPaWUk5m +HF9YrkAh5TQDOA3dRoPmjJfPg6Pa92m95QHq9GcfdV3ifrNTJb0X3JCxCa9b +-----END RSA PRIVATE KEY----- diff --git a/github.com/coreos/etcd/integration/fixtures/server-wildcard.crt b/github.com/coreos/etcd/integration/fixtures/server-wildcard.crt new file mode 100644 index 0000000000..283e8ec6fd --- /dev/null +++ b/github.com/coreos/etcd/integration/fixtures/server-wildcard.crt @@ -0,0 +1,30 @@ +-----BEGIN CERTIFICATE----- +MIIFDzCCAvegAwIBAgIUeJ6mpnfPEW9DQsXb3n1/wfj6y04wDQYJKoZIhvcNAQEN +BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH +Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl +Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzA3MjAyMjA1MDBaFw0yNzA3MTgyMjA1 +MDBaMHgxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE +BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT +ZWN1cml0eTEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQClaGICudWL2LbCIxBEtsSUC9cFzskwH0+9M17nKN9mA6dx +PWnOK3kLuLcV5kjzc70kechTzHXHfSSCw7DozUfecQQlxxeRZsf/sGJ88aUqnmn9 +oYA0Yf32rZ5M4MkyeZM1uG279N/LLUvBt1wojgmddw9VhgM3FQ2wC8L6TwCPCER4 +0pDjLx2wqjsGd/M2D2ixBSenRKnDMrIe1zY5RQYMfSX0Y1zmkr3ld2SfMpeIh28T +fG8tmOx19y++mB3kz8VJKknC2AbdnX/8i3BV+Y4DWZlxR7CT1flBih358w+6TOIi +BR6yuErkpsiYUP5dt4tyTYxxMIkHl+bIWRxswRQ7AgMBAAGjgZkwgZYwDgYDVR0P +AQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMB +Af8EAjAAMB0GA1UdDgQWBBQPhUhOX+soZsPIPqa0cw9SEqM9lzAfBgNVHSMEGDAW +gBSn/+A/iwKXR1Zk5clZo85gQPZh1DAXBgNVHREEEDAOggwqLmV0Y2QubG9jYWww +DQYJKoZIhvcNAQENBQADggIBAHKU/dvRCfMXW2XurSpuIuGtnMn6ozSapiYETy3C +9UZ1hPldZc4D7QE2MsC2nthEzXkrf+uU/TtYl2OR/DWjaCWX4zP1F3qyLSUiXdZh +e48HQViLtLN8ZG+PRxM7KOW+DYChIRA+OAtYj9Ti6OiBOcnWY5K8MyJfNqq2wxD/ +ja+dGD3u4qeHlnsfloFwr0Nj1+fhj+PYrAKgRJ7VsPKNisDgTWnBGfzO9pFY9e1E +aAoPDQJEfxpnEzupATfrHeEKBJVBj2SW1MX0zUwDv9NS7+AToxBuRbNtdsC7zfrS +657LAGQMHcahxuLvQQjYhP662ke98JsYNld49i/SP4o4ViK16BURwAHlm1ljNw43 +7WQxdCuH7Fk5lXouaX+Btn5pkKGvguQfuW3T6lU+Kv2i3ZRKPdvhyN+aSfK7t8Rp +G2tDodGjZDorV6ZjBbMNUKHK8J/r+MHYiU95JIgjfn3pXztU0Nxhq15eIW+74yug +IZQfZ5TIMUlmhtR5truKSvdrwJqznFzPfr1iupyy30HhIHLVxy3fmyXHjdD9YeHC +CjQu+juiJrVo2JCrmlp/pEIe7sY2UnzkC30tJT1Ys4tRSlbN8YHrzdSgcd/0bkSA +TlkDkee6yb5jKf9/xHtUYFhGu3nLDX9aJwkJcKuZ0pz8yjlBK7sN/HFsQACbAVeW +aGks +-----END CERTIFICATE----- diff --git a/github.com/coreos/etcd/integration/fixtures/server-wildcard.key.insecure b/github.com/coreos/etcd/integration/fixtures/server-wildcard.key.insecure new file mode 100644 index 0000000000..0806784d37 --- /dev/null +++ b/github.com/coreos/etcd/integration/fixtures/server-wildcard.key.insecure @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEApWhiArnVi9i2wiMQRLbElAvXBc7JMB9PvTNe5yjfZgOncT1p +zit5C7i3FeZI83O9JHnIU8x1x30kgsOw6M1H3nEEJccXkWbH/7BifPGlKp5p/aGA +NGH99q2eTODJMnmTNbhtu/Tfyy1LwbdcKI4JnXcPVYYDNxUNsAvC+k8AjwhEeNKQ +4y8dsKo7BnfzNg9osQUnp0SpwzKyHtc2OUUGDH0l9GNc5pK95XdknzKXiIdvE3xv +LZjsdfcvvpgd5M/FSSpJwtgG3Z1//ItwVfmOA1mZcUewk9X5QYod+fMPukziIgUe +srhK5KbImFD+XbeLck2McTCJB5fmyFkcbMEUOwIDAQABAoIBAFFMoP/d/0whVJLP +USu0+aIav7EnFlQEz9ia60aLHGKz4RUTrnGbhH9yZuroqWqftJO9F+24TRukMtHj +BY0neO+odPVsifT5o8vVElN/IkN0YXw8aRtWHtGkPG5k/f9FKkn5QFZl4amntdid +GzsDtU4kOvE6UVI9kuC4pzkIo6mpCDmDFu9SzsSKVAakKPPrvZ7w25jiyBuJUYKF +XOMOlTviZjbrsEW2BsNF/HNZywH3YRcE1jfiRHcQMLyF4Bk1VJSrPh4ahXlvTWmO +/5Pwl7OIaxFUSjrjSioRXP9VJRMgeMH0D2giQCHDXzYrqg6cxGTCwLnvVNagVBLx +/uiVFuECgYEAxyA11z778PyIxpbPxC6ui+vODKRh+sHERFUkhJTXLfYFZ55fIJ24 +XjQJkZp8qMLhiBoqZjYvVCp+HUmyXoS2ts+DwNP+r/dOm/yyc1DsAyhvb39g9tHO +a2IjhkOiR5z6OxgYuv90gefq0glyVvFzmIhnRi398lhXJ2OU4XJWxHUCgYEA1KbB +iKKAT5Tvw3T23Z2T52YEorTS2DfPyp07zmiKxSdJiW2Or0mZAtNwDT72emy+9bSD +THs9PvS/Csoq6pWer1humF84K6qZ/ICPQnzt6jDG44R8vkdPdBSm2K8lWqlXxYFp +ya3Y8Fen5XFxEvrp7eD5NYkqZY3tqRVO8c9vn+8CgYEAxQz0+tqTSzk8yPj5BbUE +eeaR8yTA6PrTFKQFDUaVYiAx3QZ2MLqjdmWcioAMmJyxvpPWHWvFjk62mpkRcEN4 +5JOaWDnxsYTUP7zjgwYzaDSdggLVm6qn0NA/Q2CuuJt5bP09i9+8FcnBMLS0d6Fc +uTdSq7pbsXUGWi5LaIZTovkCgYBacbpqxMLSFkSL21mMFJNtneRm14W91K8aPBnN +xoUPKZCLVP+U6jacDxXfbGIk28+0bVxS0S/RcQM4MZhjQdPGPFR9ljIr0FnCHWPR +IZWHP8u3xQfRXj8a3hXAn23By7i7FjnKP5i/UGjmm4M+UV3hgQg9juNrYhwtCBUV +n+aYHQKBgQCtidqW1EajQmZL26mzCb9ChtAgRT0fR4ui3wA2b4aZGu9CmJdRBS2K +K6XjqlQhFDtARWc8Svwuw5FZ+SJNDBFiHmuxu3uwmzMsPPgMk9YJxh5olu/ZgiKg +GApXR4U+CKlKLzeGk2WgpmfsF3KR12aUuc+BQnaeILRBZqjR0W4kvw== +-----END RSA PRIVATE KEY----- diff --git a/github.com/coreos/etcd/integration/fixtures/server.crt b/github.com/coreos/etcd/integration/fixtures/server.crt index 0293bc2871..d54d9c2806 100644 --- a/github.com/coreos/etcd/integration/fixtures/server.crt +++ b/github.com/coreos/etcd/integration/fixtures/server.crt @@ -1,24 +1,35 @@ -----BEGIN CERTIFICATE----- -MIID9TCCAt2gAwIBAgIUXtrXPwZLfKUJiGr6ClP3lqhOuKUwDQYJKoZIhvcNAQEL -BQAwdTEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH -Ew1TYW4gRnJhbmNpc2NvMRAwDgYDVQQKEwdldGNkLWNhMQswCQYDVQQLEwJDQTEZ -MBcGA1UEAxMQQXV0b2dlbmVyYXRlZCBDQTAeFw0xNjA3MDUxOTQ1MDBaFw0xNzA3 -MDUxOTQ1MDBaMFUxFTATBgNVBAcTDHRoZSBpbnRlcm5ldDEWMBQGA1UEChMNYXV0 -b2dlbmVyYXRlZDEVMBMGA1UECxMMZXRjZCBjbHVzdGVyMQ0wCwYDVQQDEwRldGNk -MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArGvOLPmy5i+1j4JitG4V -g99w125ncs2ImhqucmJe6YtSnKruaLdOx93X7lzC3k0umnvLgh643R4eYS5PvrDk -vw1dSYB7BHhveFPmmWd7m7n7bXtgbcdkCmUeTbSeqvptPgyMJOQfXzfOGbEHfu7U -0raulR6KtqAatofKpRZhZgzZQpVkhdd0UTsOwqCWdX3Qe0D1MS922kX99c4UlGyD -OTVL6tulvDBBYgHbGErFmhxdgwm4e6dFfdkPUeHczzUWnKo2sIGBvo4R/NwPIp6G -PnebrO0VWvcQfdSqjYk3BmILl8BVL5W1/EBRLtz9mZuQgc/VC62LvsgXusC9pwXC -3QIDAQABo4GcMIGZMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcD -AQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQULLrktzdBK6iLINQ7 -hGRjQbMYXKowHwYDVR0jBBgwFoAUpcjGKayjQ5M8j4BgOXGJxBHvWPAwGgYDVR0R -BBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQCI2Tp4pMYk -LFLzGy4e/5pwpA4x/C2zl01Sv/eC79RA5Zz1NtSF/7LCfL+KPNpNkxzPyTxWOaX5 -YMuAbD49ZBQYeEyNUxKcwWEpaVlmlIUj3b21fBXQ7Nw25Uea45bNhdZcdMUOTums -J1/BrA2eoEB0guTlh3E8iadbVmSf6elA9TbYLd7QTTgcb3XclYCwhV3eKdm3IEiX -g4q50iM6/LRz1E5C3LlQ0aNqpGroBv/9ahLVfLr06ziSRcecLJ4485MtJOxP4guA -1tc6qPyw2MLmAlLZfOCHKLbK3KboZI8IANmrpNyL590D9bDl9nLnHmJuitBpIVp1 -Hw0I8e4ZYhab +MIIGEjCCA/qgAwIBAgIUPViBCYkAU+aOqe9Db3rdN4EJxj8wDQYJKoZIhvcNAQEN +BQAwbzEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH +Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRldGNkMRYwFAYDVQQLEw1ldGNkIFNl +Y3VyaXR5MQswCQYDVQQDEwJjYTAeFw0xNzA3MjAyMjA1MDBaFw0yNzA3MTgyMjA1 +MDBaMHgxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UE +BxMNU2FuIEZyYW5jaXNjbzENMAsGA1UEChMEZXRjZDEWMBQGA1UECxMNZXRjZCBT +ZWN1cml0eTEUMBIGA1UEAxMLZXhhbXBsZS5jb20wggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDi5InXatzyDc9B+ho+S4+6O1ZmIU44OMOnNfROaYBz4wnH +xGRcwFPBONVT+mAZ7CIdXV02oxHAJ2d5+Asjkt3p93dWJk+Q3DTnBL/3puPDUXt7 +LCuYJ8sQrihqoF7KVHZeDVKUXMDKTwp0CnyDXAEG0wNbI81AqDA/umo1Kh9gacP6 +1z4129EybwRd7K/ZqC97Zo36qsT3iCKrxS2njvQbbBfqr7njpFxF0qTBgDERhFeR +SFcaMPn1v0AFvaFFczTQDCVvSdLD0XswJVhe29DI0TsB/f85OV8R5rHcBXNvHdzB +vYpfH6Iy10/lKCj1CDNsGxaZHK3nhwODeTxa6qQCfFSRBvL8e5ieBNekHK0mM2pb +8P4nu3Xo4C3FMUDmO2qE0xYaJ076nTqkMQRYoLM/hTQzbIaXt2kUw9D6oqbmu3xt +gaZTFZjHveXVRg3hzjUKVLeqHqB2nMozBtx9wSCOvLCZSKj8DHwvh5kJrJpxYgpM +9uNQk32XL+QYF/OK2eIFVU9Mk1NC7PtrsQczdvFyAAkCTBzba0xOd3VzK8SQQmCr +RBFmsIAABPzPUv1WkwsWbYz5LaWOJXKUGMC00dH97eD6apgvA7jK2ILO14ETbNm9 +hxod/DZqr6z8ijlw3WcqHzjYW49oZYg9H6YCRisf3yWuWFTWyL03fM00RgCSYwID +AQABo4GcMIGZMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYI +KwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQURgEb31GFrs1S/4cgV4ZI +fonP7OkwHwYDVR0jBBgwFoAUp//gP4sCl0dWZOXJWaPOYED2YdQwGgYDVR0RBBMw +EYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBDQUAA4ICAQBS506hAyC9R6qo +KZY8E1tFeIxlkunnWqKrc/ElDxOvJOVJRJmY88KzyGN8v7gUog3RUmen4h5v6+SJ +IY00ljeu8zSqYoBOajlX+ej8LnNcr4viq+m+eMljA3jt7TRyHt9TIZ5MOns+M7Vb +HGTa9juRMfdX7oL8KdCsdzKVI7p8bK9qcE4JEDjDsh1vX5qc61hQfLyG6hkwTgyb +WjFeCjhP41ZrxoTS85qtBsmCj0Rv7uPjvR4GI9MCP/q41sXve7ejcepLFPTl0diH +E/5/9IO8TcrcaEBG8k5QvGoknW2vR8Y1IpcakcxoijjtDewJYcfMnB7uchBriGaR +sHFE8t3kmjOCo7Ve3kdsOGmcQAi3rCQ6PTs8t11L1g435NOYrNP72D22N96YYgYT +0VFNsJYUpDlGHGShTVtIAQFWlRmR9GxRb2OoJWN/JsxOuO++jc/SJw/X1NXOdpHH +pp+OhQTZqkU1KRMkdtxqI4LcQUN6TL6BPeXFcCKJ8F7kQ78DntYFOdf/WVsoY/8i +GmTIaCCGsB4yBDM6oe7WroVMgOd2ES+yeyyfyQr591/7peoFrTGUlVpZjjMHaGxl +HORsnQO48dN6h7EIoKxG/cfrx3TwwaNgzWSQJdA5pB+Vsia4we92UK/r+5Lu8Uzv +1DSnPpH/KZ1ASOH2ebWeBJP+PU8+cg== -----END CERTIFICATE----- diff --git a/github.com/coreos/etcd/integration/fixtures/server.key.insecure b/github.com/coreos/etcd/integration/fixtures/server.key.insecure index 68ea872216..563ab208c7 100644 --- a/github.com/coreos/etcd/integration/fixtures/server.key.insecure +++ b/github.com/coreos/etcd/integration/fixtures/server.key.insecure @@ -1,27 +1,51 @@ -----BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEArGvOLPmy5i+1j4JitG4Vg99w125ncs2ImhqucmJe6YtSnKru -aLdOx93X7lzC3k0umnvLgh643R4eYS5PvrDkvw1dSYB7BHhveFPmmWd7m7n7bXtg -bcdkCmUeTbSeqvptPgyMJOQfXzfOGbEHfu7U0raulR6KtqAatofKpRZhZgzZQpVk -hdd0UTsOwqCWdX3Qe0D1MS922kX99c4UlGyDOTVL6tulvDBBYgHbGErFmhxdgwm4 -e6dFfdkPUeHczzUWnKo2sIGBvo4R/NwPIp6GPnebrO0VWvcQfdSqjYk3BmILl8BV -L5W1/EBRLtz9mZuQgc/VC62LvsgXusC9pwXC3QIDAQABAoIBAQCNseKz71hn9tk8 -YKiBIt6nix6OzHpTTDlwe3DVK6ZYQ1jWy1o10D773YIwryatzbv41Ld/7YN6o9/P -eWGrkm/J2k/Jsb5nBBqHRlwBwZtBdOv9IyEx1mSObl8i+MZUOI1CKsmZH6fwdkn3 -rxY76EYaDGsYvQq93oFVc+7DEMtmMtr03xm2bleEvsUH0VVqLhiAof/PCgOzja/L -mPxhK0FqOmhk94JFo2l0XNMn/b2lpUhrx+xny5RD6/W/k2C1DuzBiFiNZkbPW1r1 -n5QccJHpe/S3Y4WZ75yKyQdrcIz6AKSeHNNGw2mYERAOmejpVV+8OIvKY6pzyXi9 -EM/BsLaBAoGBAN+XiqHHGilsrvjLKGak2KIaPRxA7EgFKKWBv8DojpXLqgkoloDL -1wS6uG4XE0FeJCiKZk/DpVgPSiKYkQJEFLgU8N3q8OO2cGYW8kfH/TuejWRebtgJ -GC7o5CqAHjFqRbTPJBLLNlSUZP08HVIRhob3t0zkvVRdDjA1rZIM/FlxAoGBAMVp -jTcimGEOhFbOvfLwFeMCFLglTzbxjSnxCLCKF5TbxcBN7iUE2wYRfexBLoP/3+rk -RheyRnMr4PeZ/JPQLHs80TUm9HGg8Phy+jAsIW/rF8BJ4aAExt2T4uLNsj4TXw1y -ckDMBLmZi0OFy4vDtwg4T2wVo55eN/oQfVNFFaotAoGAGLQ8q/08pcENYA3KS/UA -voBZqip+MMLpJ8g7MIxBXMmg4twqLNbYzfv3bqp8BSfqpNQN09hRB3bBASuMMgzl -oSUnK83OicpZht4YLNgq4ZB2HNXWN2Zh1qUCuLNpIpqUUxLj8HOlcBjpQ5WFw9CN -5ZGvHf7T8GNLswXrRIzMwPECgYAC5Q5WDaLQYYcdQsDUTCL2BjTJknp74sTgJZGs -DQpVe3eF316rmkuf5ifDjB0jgGAHMLu6YznXPIB7AP4MKNROJlEnB2A0PljqO71h -cXQ4EOlzP2IYl5lW7HE6RCvl7yDIsLHuM0+qbQ72uYKHlSIc875uZk7U5qrJdu5v -hybPLQKBgAmswE0nM9Fnj4ue9QaDgOvp1p7peZuzywBI4+TTJ/3++5vtUrgRl9Ak -UVzSVvltxhFpFtNfVrZxckDwb6louumRtBrLVWJDlAakvc5eG5tky+SA2u/bdXSr -8tq8c24K19Pg+OLkdZpiJqmKyyV0dVn6NNmiBmiLe2tClNsUHI47 +MIIJKQIBAAKCAgEA4uSJ12rc8g3PQfoaPkuPujtWZiFOODjDpzX0TmmAc+MJx8Rk +XMBTwTjVU/pgGewiHV1dNqMRwCdnefgLI5Ld6fd3ViZPkNw05wS/96bjw1F7eywr +mCfLEK4oaqBeylR2Xg1SlFzAyk8KdAp8g1wBBtMDWyPNQKgwP7pqNSofYGnD+tc+ +NdvRMm8EXeyv2agve2aN+qrE94giq8Utp470G2wX6q+546RcRdKkwYAxEYRXkUhX +GjD59b9ABb2hRXM00Awlb0nSw9F7MCVYXtvQyNE7Af3/OTlfEeax3AVzbx3cwb2K +Xx+iMtdP5Sgo9QgzbBsWmRyt54cDg3k8WuqkAnxUkQby/HuYngTXpBytJjNqW/D+ +J7t16OAtxTFA5jtqhNMWGidO+p06pDEEWKCzP4U0M2yGl7dpFMPQ+qKm5rt8bYGm +UxWYx73l1UYN4c41ClS3qh6gdpzKMwbcfcEgjrywmUio/Ax8L4eZCayacWIKTPbj +UJN9ly/kGBfzitniBVVPTJNTQuz7a7EHM3bxcgAJAkwc22tMTnd1cyvEkEJgq0QR +ZrCAAAT8z1L9VpMLFm2M+S2ljiVylBjAtNHR/e3g+mqYLwO4ytiCzteBE2zZvYca +Hfw2aq+s/Io5cN1nKh842FuPaGWIPR+mAkYrH98lrlhU1si9N3zNNEYAkmMCAwEA +AQKCAgBU1KuwzfSTz5P5EAB14Bx5vau8/aDYJmkIgIS6OHndWjqS5Ru9De+Co7Qm +9Mqvhnjuz7SFNAzz8gefM50+jK/JxUtp+2LuP1bMNRttBYnMwg9P6yDVf7NNpj/Y +NeOa9F9ZJNQGQnOWcFzxK+aH2oNLwONVVRptnTSE7za8b+ZRTtoGVCmfS3N4zscs +Ms1ArMAr/BkDaovAHLcRz+QU7L7Z7d03UsJGmXIibPJKopo1+WwgFpLyaPNb7UiL +nO3KIJvTWWc5p7lUm2Laimvy826pVokgYo3lIE0qTT2cjLEvD1Q1X593l0U2iwgF +HrHJg2pudeZKEs32baVjp5VoTKhKzWZMugY/ZZG4fuOdmJRZksj4Op9Swk6F5cgT +tZ+EWEK48jIWfxVyAyua41301APTZwohwWp1qc9QOkA/tuqULZLfsVGRqrupF8te +UKbMG0MnTBKlL+ojGmFUSnrj2GBd4NSUbGLOve0TxvktTakKATdFMphE2A5JnvxC +lET3J1THj4xsDX3XwJ2KDkNCv86VUK0EptsaP7C/h3DbRhqJU30VTRhGjmtpwXj4 +4+EKDmWKHpIXY9Qu/qMXm1utM1q39wUFYdTpVDY1/VcHVu1VDjqHwxJXwLzv3B5U +u17P2RIqvgD2IT1Xo0phi9WgzcllMRlEREpuLPnAZ8sSRbdNKQKCAQEA70cRtk5E +yPoQ8X18RFxllZle6MfIuYv1v6ojF9Cd/Klf/OKu1VAbXjcUxhH4gpKsdrvvA6TS +6Io8UonR3zqMuBsY+/L817ZTr8N/Q+iLK5hkhj4p2J3uX6ipLmfuQovlL478LpR6 +nD1eB+2tF1yk76A5uyny1m+rUHufpdQ+MlsoV0kLdo18uRF02w5HY1D2S5/lTACz +WAmFCe9mnuQK0YMFyfiM/oVp9GjuOkf15QBnKW5klJByrB7oxw64VPwyxR1kkJhF +UkTb6AIHEGcZkI5pKyuifaGNhDk5b9x8R9nvqoNMRgNV9XR/euJ33W0BzePB5fAQ +NIrw4Y6z+oKJhQKCAQEA8r/j9vtFucZYfxPGXauipqubPER7YyXNbbO6skaQm7T7 +CUzaJdAYwRorHsDZ8Cvii0W+iZ9s0JLekcPtEECoiVm7Xg8pcFfzMu/B7Cb7MkEu +w1kxrhYkb9xLZtBUaPD5vD7irxUxnu13GB9pbMYc/vw78F7SYjh/xyZXUjA7EVza +q2IgCS8oWqlROJBh4lzNPw787WPpBokeUujIY9HOcE7MFE8+p5AUm8Zz62sgY/4f +AMrSWD+cQ71bCMcdu12O4PUFCZtUx+ON8p0Awoy5KZniyUwIbt8UAlaoCT581Xh1 +1BGEVHKkwv1nwuH6hOL80/9zl6yZSKu0mBgq+IK8xwKCAQAuh1tiYAXwLvBshUJM +6Mq4NILIMVFPA3BePO9mCiMupqELw+jLgjBQOdXITmZMvcjbrd/kjYCVx4vDYRl2 +lyQWCO7qz21rZQERBKsSwX2OlKu3jw8EGHHqGBoN9BfYyOtgPCW9yRGuoCBQ2l72 +VAWes0GGq7mVCVH+7Is26/bMQ/2sO4AHJaxDMKnQjw5CudOrEQS9qsU1MWS1ceA6 +tY2FAD138OU5+SeJZ34rxyKBzXpCDD1yxkQGRFxvmOUvYXtd6UFM/M8+GDXK/9nv +zpyiB49b3bhTRb8HHzmUDwP71N1OAwop8ywb9vNzKea1ICVhrBBgbjY4gWwl8GH/ +LLMhAoIBAQDHci4FARKavoJ7dm3nDFwJALn83G9cWPTeC2t7ikrKA/q1+3TI2J9e +GPgQvnbRw9zQfS89t8UZ4XII5adjURyoLRerAl4Ttc9VrHPyaVy+P5wCWMhetkad +uawh/007I7KsniZ1n74zS/wrz7M48dVlEyzUI7RLiwxBPhlEp+gALgBkC60ynpJT +WwYmqUojSAhCpTfee9Y7znEhwazThtBMqhE3Jpzd451rF7SqWkw0m9gxOHN2mlzz +syKWpbKh/Q6leer3p64Sxb4c9i5nqmN/8LXKmjPblGHGQhix76t1YRG+ed313HPO +2ZFlJ3JDuJPuQtZgailO8fThegnkQNaFAoIBAQCCG81C/p7BcUPdKoCT0vKXEEeB +4Kf0ziKy/1asIY365qAWXJ+wet/Y6erl4JddeUEp2IupB67G35uf/5Eit2+/4sOG +utd8BwxPKxkDY/iah00+2jRuNp+CYMr9MnIfTJE5daxG/YPabhMpUxRPsdveBWUL +sogPgEvbxvvlzg6NyQDsmCFRQfmwNZOsfUqOoFwmrE5Woz26y4wvF+ZooxOHnxx8 +RsoJy1DvJgKtisE5eVBAq4ToLiFdQsb4NP39JBPWeaRZTRonS6F2NZw/lTWqKnQk +QZZMSoUwzJWFUqJ22sE4NHJzv17rJ3txnPtMroB0RPXcQUIywNn7mvF3ltZp -----END RSA PRIVATE KEY----- diff --git a/github.com/coreos/etcd/integration/logger_test.go b/github.com/coreos/etcd/integration/logger_test.go index a71330a41a..aecd720004 100644 --- a/github.com/coreos/etcd/integration/logger_test.go +++ b/github.com/coreos/etcd/integration/logger_test.go @@ -16,6 +16,8 @@ package integration import "github.com/coreos/pkg/capnslog" +const defaultLogLevel = capnslog.CRITICAL + func init() { - capnslog.SetGlobalLogLevel(capnslog.CRITICAL) + capnslog.SetGlobalLogLevel(defaultLogLevel) } diff --git a/github.com/coreos/etcd/integration/metrics_test.go b/github.com/coreos/etcd/integration/metrics_test.go new file mode 100644 index 0000000000..8e7d60a537 --- /dev/null +++ b/github.com/coreos/etcd/integration/metrics_test.go @@ -0,0 +1,97 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package integration + +import ( + "context" + "strconv" + "testing" + "time" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/pkg/testutil" +) + +// TestMetricDbSizeBoot checks that the db size metric is set on boot. +func TestMetricDbSizeBoot(t *testing.T) { + defer testutil.AfterTest(t) + clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + defer clus.Terminate(t) + + v, err := clus.Members[0].Metric("etcd_debugging_mvcc_db_total_size_in_bytes") + if err != nil { + t.Fatal(err) + } + + if v == "0" { + t.Fatalf("expected non-zero, got %q", v) + } +} + +// TestMetricDbSizeDefrag checks that the db size metric is set after defrag. +func TestMetricDbSizeDefrag(t *testing.T) { + defer testutil.AfterTest(t) + clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + defer clus.Terminate(t) + + kvc := toGRPC(clus.Client(0)).KV + mc := toGRPC(clus.Client(0)).Maintenance + + // expand the db size + numPuts := 10 + putreq := &pb.PutRequest{Key: []byte("k"), Value: make([]byte, 4096)} + for i := 0; i < numPuts; i++ { + if _, err := kvc.Put(context.TODO(), putreq); err != nil { + t.Fatal(err) + } + } + + // wait for backend txn sync + time.Sleep(500 * time.Millisecond) + + beforeDefrag, err := clus.Members[0].Metric("etcd_debugging_mvcc_db_total_size_in_bytes") + if err != nil { + t.Fatal(err) + } + bv, err := strconv.Atoi(beforeDefrag) + if err != nil { + t.Fatal(err) + } + if expected := numPuts * len(putreq.Value); bv < expected { + t.Fatalf("expected db size greater than %d, got %d", expected, bv) + } + + // clear out historical keys + creq := &pb.CompactionRequest{Revision: int64(numPuts), Physical: true} + if _, err := kvc.Compact(context.TODO(), creq); err != nil { + t.Fatal(err) + } + + // defrag should give freed space back to fs + mc.Defragment(context.TODO(), &pb.DefragmentRequest{}) + afterDefrag, err := clus.Members[0].Metric("etcd_debugging_mvcc_db_total_size_in_bytes") + if err != nil { + t.Fatal(err) + } + + av, err := strconv.Atoi(afterDefrag) + if err != nil { + t.Fatal(err) + } + + if bv <= av { + t.Fatalf("expected less than %d, got %d after defrag", bv, av) + } +} diff --git a/github.com/coreos/etcd/integration/util_test.go b/github.com/coreos/etcd/integration/util_test.go new file mode 100644 index 0000000000..1889419801 --- /dev/null +++ b/github.com/coreos/etcd/integration/util_test.go @@ -0,0 +1,62 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package integration + +import ( + "io" + "os" + "path/filepath" + + "github.com/coreos/etcd/pkg/transport" +) + +// copyTLSFiles clones certs files to dst directory. +func copyTLSFiles(ti transport.TLSInfo, dst string) (transport.TLSInfo, error) { + ci := transport.TLSInfo{ + KeyFile: filepath.Join(dst, "server-key.pem"), + CertFile: filepath.Join(dst, "server.pem"), + TrustedCAFile: filepath.Join(dst, "etcd-root-ca.pem"), + ClientCertAuth: ti.ClientCertAuth, + } + if err := copyFile(ti.KeyFile, ci.KeyFile); err != nil { + return transport.TLSInfo{}, err + } + if err := copyFile(ti.CertFile, ci.CertFile); err != nil { + return transport.TLSInfo{}, err + } + if err := copyFile(ti.TrustedCAFile, ci.TrustedCAFile); err != nil { + return transport.TLSInfo{}, err + } + return ci, nil +} + +func copyFile(src, dst string) error { + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + w, err := os.Create(dst) + if err != nil { + return err + } + defer w.Close() + + if _, err = io.Copy(w, f); err != nil { + return err + } + return w.Sync() +} diff --git a/github.com/coreos/etcd/integration/v3_auth_test.go b/github.com/coreos/etcd/integration/v3_auth_test.go index 06ee68ec96..d0965a7894 100644 --- a/github.com/coreos/etcd/integration/v3_auth_test.go +++ b/github.com/coreos/etcd/integration/v3_auth_test.go @@ -20,6 +20,7 @@ import ( "golang.org/x/net/context" + "github.com/coreos/etcd/auth/authpb" "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" @@ -104,17 +105,190 @@ func TestV3AuthRevision(t *testing.T) { } } -func authSetupRoot(t *testing.T, auth pb.AuthClient) { - if _, err := auth.UserAdd(context.TODO(), &pb.AuthUserAddRequest{Name: "root", Password: "123"}); err != nil { +type user struct { + name string + password string + role string + key string + end string +} + +func TestV3AuthWithLeaseRevoke(t *testing.T) { + defer testutil.AfterTest(t) + clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + defer clus.Terminate(t) + + users := []user{ + { + name: "user1", + password: "user1-123", + role: "role1", + key: "k1", + end: "k2", + }, + } + authSetupUsers(t, toGRPC(clus.Client(0)).Auth, users) + + authSetupRoot(t, toGRPC(clus.Client(0)).Auth) + + rootc, cerr := clientv3.New(clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "root", Password: "123"}) + if cerr != nil { + t.Fatal(cerr) + } + defer rootc.Close() + + leaseResp, err := rootc.Grant(context.TODO(), 90) + if err != nil { t.Fatal(err) } - if _, err := auth.RoleAdd(context.TODO(), &pb.AuthRoleAddRequest{Name: "root"}); err != nil { + leaseID := leaseResp.ID + // permission of k3 isn't granted to user1 + _, err = rootc.Put(context.TODO(), "k3", "val", clientv3.WithLease(leaseID)) + if err != nil { t.Fatal(err) } - if _, err := auth.UserGrantRole(context.TODO(), &pb.AuthUserGrantRoleRequest{User: "root", Role: "root"}); err != nil { + + userc, cerr := clientv3.New(clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"}) + if cerr != nil { + t.Fatal(cerr) + } + defer userc.Close() + _, err = userc.Revoke(context.TODO(), leaseID) + if err == nil { + t.Fatal("revoking from user1 should be failed with permission denied") + } +} + +func TestV3AuthWithLeaseAttach(t *testing.T) { + defer testutil.AfterTest(t) + clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + defer clus.Terminate(t) + + users := []user{ + { + name: "user1", + password: "user1-123", + role: "role1", + key: "k1", + end: "k3", + }, + { + name: "user2", + password: "user2-123", + role: "role2", + key: "k2", + end: "k4", + }, + } + authSetupUsers(t, toGRPC(clus.Client(0)).Auth, users) + + authSetupRoot(t, toGRPC(clus.Client(0)).Auth) + + user1c, cerr := clientv3.New(clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user1", Password: "user1-123"}) + if cerr != nil { + t.Fatal(cerr) + } + defer user1c.Close() + + user2c, cerr := clientv3.New(clientv3.Config{Endpoints: clus.Client(0).Endpoints(), Username: "user2", Password: "user2-123"}) + if cerr != nil { + t.Fatal(cerr) + } + defer user2c.Close() + + leaseResp, err := user1c.Grant(context.TODO(), 90) + if err != nil { t.Fatal(err) } + leaseID := leaseResp.ID + // permission of k2 is also granted to user2 + _, err = user1c.Put(context.TODO(), "k2", "val", clientv3.WithLease(leaseID)) + if err != nil { + t.Fatal(err) + } + + _, err = user2c.Revoke(context.TODO(), leaseID) + if err != nil { + t.Fatal(err) + } + + leaseResp, err = user1c.Grant(context.TODO(), 90) + if err != nil { + t.Fatal(err) + } + leaseID = leaseResp.ID + // permission of k1 isn't granted to user2 + _, err = user1c.Put(context.TODO(), "k1", "val", clientv3.WithLease(leaseID)) + if err != nil { + t.Fatal(err) + } + + _, err = user2c.Revoke(context.TODO(), leaseID) + if err == nil { + t.Fatal("revoking from user2 should be failed with permission denied") + } +} + +func authSetupUsers(t *testing.T, auth pb.AuthClient, users []user) { + for _, user := range users { + if _, err := auth.UserAdd(context.TODO(), &pb.AuthUserAddRequest{Name: user.name, Password: user.password}); err != nil { + t.Fatal(err) + } + if _, err := auth.RoleAdd(context.TODO(), &pb.AuthRoleAddRequest{Name: user.role}); err != nil { + t.Fatal(err) + } + if _, err := auth.UserGrantRole(context.TODO(), &pb.AuthUserGrantRoleRequest{User: user.name, Role: user.role}); err != nil { + t.Fatal(err) + } + + if len(user.key) == 0 { + continue + } + + perm := &authpb.Permission{ + PermType: authpb.READWRITE, + Key: []byte(user.key), + RangeEnd: []byte(user.end), + } + if _, err := auth.RoleGrantPermission(context.TODO(), &pb.AuthRoleGrantPermissionRequest{Name: user.role, Perm: perm}); err != nil { + t.Fatal(err) + } + } +} + +func authSetupRoot(t *testing.T, auth pb.AuthClient) { + root := []user{ + { + name: "root", + password: "123", + role: "root", + key: "", + }, + } + authSetupUsers(t, auth, root) if _, err := auth.AuthEnable(context.TODO(), &pb.AuthEnableRequest{}); err != nil { t.Fatal(err) } } + +func TestV3AuthNonAuthorizedRPCs(t *testing.T) { + defer testutil.AfterTest(t) + clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + defer clus.Terminate(t) + + nonAuthedKV := clus.Client(0).KV + + key := "foo" + val := "bar" + _, err := nonAuthedKV.Put(context.TODO(), key, val) + if err != nil { + t.Fatalf("couldn't put key (%v)", err) + } + + authSetupRoot(t, toGRPC(clus.Client(0)).Auth) + + respput, err := nonAuthedKV.Put(context.TODO(), key, val) + if !eqErrGRPC(err, rpctypes.ErrGRPCUserEmpty) { + t.Fatalf("could put key (%v), it should cause an error of permission denied", respput) + } +} diff --git a/github.com/coreos/etcd/integration/v3_election_test.go b/github.com/coreos/etcd/integration/v3_election_test.go index d74e2966c8..95f5b4949b 100644 --- a/github.com/coreos/etcd/integration/v3_election_test.go +++ b/github.com/coreos/etcd/integration/v3_election_test.go @@ -272,3 +272,39 @@ func TestElectionOnSessionRestart(t *testing.T) { t.Errorf("expected value=%q, got response %v", "def", resp) } } + +// TestElectionObserveCompacted checks that observe can tolerate +// a leader key with a modrev less than the compaction revision. +func TestElectionObserveCompacted(t *testing.T) { + clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + defer clus.Terminate(t) + + cli := clus.Client(0) + + session, err := concurrency.NewSession(cli) + if err != nil { + t.Fatal(err) + } + defer session.Orphan() + + e := concurrency.NewElection(session, "test-elect") + if cerr := e.Campaign(context.TODO(), "abc"); cerr != nil { + t.Fatal(cerr) + } + + presp, perr := cli.Put(context.TODO(), "foo", "bar") + if perr != nil { + t.Fatal(perr) + } + if _, cerr := cli.Compact(context.TODO(), presp.Header.Revision); cerr != nil { + t.Fatal(cerr) + } + + v, ok := <-e.Observe(context.TODO()) + if !ok { + t.Fatal("failed to observe on compacted revision") + } + if string(v.Kvs[0].Value) != "abc" { + t.Fatalf(`expected leader value "abc", got %q`, string(v.Kvs[0].Value)) + } +} diff --git a/github.com/coreos/etcd/integration/v3_grpc_test.go b/github.com/coreos/etcd/integration/v3_grpc_test.go index 5113821def..83b211578c 100644 --- a/github.com/coreos/etcd/integration/v3_grpc_test.go +++ b/github.com/coreos/etcd/integration/v3_grpc_test.go @@ -17,16 +17,19 @@ package integration import ( "bytes" "fmt" + "io/ioutil" "math/rand" "os" "reflect" "testing" "time" - "github.com/coreos/etcd/etcdserver/api/v3rpc" + "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/pkg/testutil" + "github.com/coreos/etcd/pkg/transport" + "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/metadata" @@ -146,7 +149,8 @@ func TestV3CompactCurrentRev(t *testing.T) { func TestV3TxnTooManyOps(t *testing.T) { defer testutil.AfterTest(t) - clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + maxTxnOps := uint(128) + clus := NewClusterV3(t, &ClusterConfig{Size: 3, MaxTxnOps: maxTxnOps}) defer clus.Terminate(t) kvc := toGRPC(clus.RandClient()).KV @@ -188,16 +192,27 @@ func TestV3TxnTooManyOps(t *testing.T) { }, }) } + addTxnOps := func(txn *pb.TxnRequest) { + newTxn := &pb.TxnRequest{} + addSuccessOps(newTxn) + txn.Success = append(txn.Success, + &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{ + RequestTxn: newTxn, + }, + }, + ) + } tests := []func(txn *pb.TxnRequest){ addCompareOps, addSuccessOps, addFailureOps, + addTxnOps, } for i, tt := range tests { txn := &pb.TxnRequest{} - for j := 0; j < v3rpc.MaxOpsPerTxn+1; j++ { + for j := 0; j < int(maxTxnOps+1); j++ { tt(txn) } @@ -232,6 +247,27 @@ func TestV3TxnDuplicateKeys(t *testing.T) { }, }, } + txnDelReq := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{ + RequestTxn: &pb.TxnRequest{Success: []*pb.RequestOp{delInRangeReq}}, + }, + } + txnDelReqTwoSide := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{ + RequestTxn: &pb.TxnRequest{ + Success: []*pb.RequestOp{delInRangeReq}, + Failure: []*pb.RequestOp{delInRangeReq}}, + }, + } + + txnPutReq := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{ + RequestTxn: &pb.TxnRequest{Success: []*pb.RequestOp{putreq}}, + }, + } + txnPutReqTwoSide := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{ + RequestTxn: &pb.TxnRequest{ + Success: []*pb.RequestOp{putreq}, + Failure: []*pb.RequestOp{putreq}}, + }, + } kvc := toGRPC(clus.RandClient()).KV tests := []struct { @@ -254,6 +290,36 @@ func TestV3TxnDuplicateKeys(t *testing.T) { werr: rpctypes.ErrGRPCDuplicateKey, }, + // Then(Put(a), Then(Del(a))) + { + txnSuccess: []*pb.RequestOp{putreq, txnDelReq}, + + werr: rpctypes.ErrGRPCDuplicateKey, + }, + // Then(Del(a), Then(Put(a))) + { + txnSuccess: []*pb.RequestOp{delInRangeReq, txnPutReq}, + + werr: rpctypes.ErrGRPCDuplicateKey, + }, + // Then((Then(Put(a)), Else(Put(a))), (Then(Put(a)), Else(Put(a))) + { + txnSuccess: []*pb.RequestOp{txnPutReqTwoSide, txnPutReqTwoSide}, + + werr: rpctypes.ErrGRPCDuplicateKey, + }, + // Then(Del(x), (Then(Put(a)), Else(Put(a)))) + { + txnSuccess: []*pb.RequestOp{delOutOfRangeReq, txnPutReqTwoSide}, + + werr: nil, + }, + // Then(Then(Del(a)), (Then(Del(a)), Else(Del(a)))) + { + txnSuccess: []*pb.RequestOp{txnDelReq, txnDelReqTwoSide}, + + werr: nil, + }, { txnSuccess: []*pb.RequestOp{delKeyReq, delInRangeReq, delKeyReq, delInRangeReq}, @@ -324,6 +390,200 @@ func TestV3TxnRevision(t *testing.T) { } } +// Testv3TxnCmpHeaderRev tests that the txn header revision is set as expected +// when compared to the Succeeded field in the txn response. +func TestV3TxnCmpHeaderRev(t *testing.T) { + defer testutil.AfterTest(t) + clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + defer clus.Terminate(t) + + kvc := toGRPC(clus.RandClient()).KV + + for i := 0; i < 10; i++ { + // Concurrently put a key with a txn comparing on it. + revc := make(chan int64, 1) + go func() { + defer close(revc) + pr := &pb.PutRequest{Key: []byte("k"), Value: []byte("v")} + presp, err := kvc.Put(context.TODO(), pr) + if err != nil { + t.Fatal(err) + } + revc <- presp.Header.Revision + }() + + // The read-only txn uses the optimized readindex server path. + txnget := &pb.RequestOp{Request: &pb.RequestOp_RequestRange{ + RequestRange: &pb.RangeRequest{Key: []byte("k")}}} + txn := &pb.TxnRequest{Success: []*pb.RequestOp{txnget}} + // i = 0 /\ Succeeded => put followed txn + cmp := &pb.Compare{ + Result: pb.Compare_EQUAL, + Target: pb.Compare_VERSION, + Key: []byte("k"), + TargetUnion: &pb.Compare_Version{Version: int64(i)}, + } + txn.Compare = append(txn.Compare, cmp) + + tresp, err := kvc.Txn(context.TODO(), txn) + if err != nil { + t.Fatal(err) + } + + prev := <-revc + // put followed txn; should eval to false + if prev > tresp.Header.Revision && !tresp.Succeeded { + t.Errorf("#%d: got else but put rev %d followed txn rev (%+v)", i, prev, tresp) + } + // txn follows put; should eval to true + if tresp.Header.Revision >= prev && tresp.Succeeded { + t.Errorf("#%d: got then but put rev %d preceded txn (%+v)", i, prev, tresp) + } + } +} + +// TestV3TxnRangeCompare tests range comparisons in txns +func TestV3TxnRangeCompare(t *testing.T) { + defer testutil.AfterTest(t) + clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + defer clus.Terminate(t) + + // put keys, named by expected revision + for _, k := range []string{"/a/2", "/a/3", "/a/4", "/f/5"} { + if _, err := clus.Client(0).Put(context.TODO(), k, "x"); err != nil { + t.Fatal(err) + } + } + + tests := []struct { + cmp pb.Compare + + wSuccess bool + }{ + { + // >= /a/; all create revs fit + pb.Compare{ + Key: []byte("/a/"), + RangeEnd: []byte{0}, + Target: pb.Compare_CREATE, + Result: pb.Compare_LESS, + TargetUnion: &pb.Compare_CreateRevision{6}, + }, + true, + }, + { + // >= /a/; one create rev doesn't fit + pb.Compare{ + Key: []byte("/a/"), + RangeEnd: []byte{0}, + Target: pb.Compare_CREATE, + Result: pb.Compare_LESS, + TargetUnion: &pb.Compare_CreateRevision{5}, + }, + false, + }, + { + // prefix /a/*; all create revs fit + pb.Compare{ + Key: []byte("/a/"), + RangeEnd: []byte("/a0"), + Target: pb.Compare_CREATE, + Result: pb.Compare_LESS, + TargetUnion: &pb.Compare_CreateRevision{5}, + }, + true, + }, + { + // prefix /a/*; one create rev doesn't fit + pb.Compare{ + Key: []byte("/a/"), + RangeEnd: []byte("/a0"), + Target: pb.Compare_CREATE, + Result: pb.Compare_LESS, + TargetUnion: &pb.Compare_CreateRevision{4}, + }, + false, + }, + { + // does not exist, does not succeed + pb.Compare{ + Key: []byte("/b/"), + RangeEnd: []byte("/b0"), + Target: pb.Compare_VALUE, + Result: pb.Compare_EQUAL, + TargetUnion: &pb.Compare_Value{[]byte("x")}, + }, + false, + }, + } + + kvc := toGRPC(clus.Client(0)).KV + for i, tt := range tests { + txn := &pb.TxnRequest{} + txn.Compare = append(txn.Compare, &tt.cmp) + tresp, err := kvc.Txn(context.TODO(), txn) + if err != nil { + t.Fatal(err) + } + if tt.wSuccess != tresp.Succeeded { + t.Errorf("#%d: expected %v, got %v", i, tt.wSuccess, tresp.Succeeded) + } + } +} + +// TestV3TxnNested tests nested txns follow paths as expected. +func TestV3TxnNestedPath(t *testing.T) { + defer testutil.AfterTest(t) + clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + defer clus.Terminate(t) + + kvc := toGRPC(clus.RandClient()).KV + + cmpTrue := &pb.Compare{ + Result: pb.Compare_EQUAL, + Target: pb.Compare_VERSION, + Key: []byte("k"), + TargetUnion: &pb.Compare_Version{Version: int64(0)}, + } + cmpFalse := &pb.Compare{ + Result: pb.Compare_EQUAL, + Target: pb.Compare_VERSION, + Key: []byte("k"), + TargetUnion: &pb.Compare_Version{Version: int64(1)}, + } + + // generate random path to eval txns + topTxn := &pb.TxnRequest{} + txn := topTxn + txnPath := make([]bool, 10) + for i := range txnPath { + nextTxn := &pb.TxnRequest{} + op := &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: nextTxn}} + txnPath[i] = rand.Intn(2) == 0 + if txnPath[i] { + txn.Compare = append(txn.Compare, cmpTrue) + txn.Success = append(txn.Success, op) + } else { + txn.Compare = append(txn.Compare, cmpFalse) + txn.Failure = append(txn.Failure, op) + } + txn = nextTxn + } + + tresp, err := kvc.Txn(context.TODO(), topTxn) + if err != nil { + t.Fatal(err) + } + + curTxnResp := tresp + for i := range txnPath { + if curTxnResp.Succeeded != txnPath[i] { + t.Fatalf("expected path %+v, got response %+v", txnPath, *tresp) + } + curTxnResp = curTxnResp.Responses[0].Response.(*pb.ResponseOp_ResponseTxn).ResponseTxn + } +} + // TestV3PutIgnoreValue ensures that writes with ignore_value overwrites with previous key-value pair. func TestV3PutIgnoreValue(t *testing.T) { defer testutil.AfterTest(t) @@ -1374,6 +1634,164 @@ func TestTLSGRPCAcceptSecureAll(t *testing.T) { } } +// TestTLSReloadAtomicReplace ensures server reloads expired/valid certs +// when all certs are atomically replaced by directory renaming. +// And expects server to reject client requests, and vice versa. +func TestTLSReloadAtomicReplace(t *testing.T) { + tmpDir, err := ioutil.TempDir(os.TempDir(), "fixtures-tmp") + if err != nil { + t.Fatal(err) + } + os.RemoveAll(tmpDir) + defer os.RemoveAll(tmpDir) + + certsDir, err := ioutil.TempDir(os.TempDir(), "fixtures-to-load") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(certsDir) + + certsDirExp, err := ioutil.TempDir(os.TempDir(), "fixtures-expired") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(certsDirExp) + + cloneFunc := func() transport.TLSInfo { + tlsInfo, terr := copyTLSFiles(testTLSInfo, certsDir) + if terr != nil { + t.Fatal(terr) + } + if _, err = copyTLSFiles(testTLSInfoExpired, certsDirExp); err != nil { + t.Fatal(err) + } + return tlsInfo + } + replaceFunc := func() { + if err = os.Rename(certsDir, tmpDir); err != nil { + t.Fatal(err) + } + if err = os.Rename(certsDirExp, certsDir); err != nil { + t.Fatal(err) + } + // after rename, + // 'certsDir' contains expired certs + // 'tmpDir' contains valid certs + // 'certsDirExp' does not exist + } + revertFunc := func() { + if err = os.Rename(tmpDir, certsDirExp); err != nil { + t.Fatal(err) + } + if err = os.Rename(certsDir, tmpDir); err != nil { + t.Fatal(err) + } + if err = os.Rename(certsDirExp, certsDir); err != nil { + t.Fatal(err) + } + } + testTLSReload(t, cloneFunc, replaceFunc, revertFunc) +} + +// TestTLSReloadCopy ensures server reloads expired/valid certs +// when new certs are copied over, one by one. And expects server +// to reject client requests, and vice versa. +func TestTLSReloadCopy(t *testing.T) { + certsDir, err := ioutil.TempDir(os.TempDir(), "fixtures-to-load") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(certsDir) + + cloneFunc := func() transport.TLSInfo { + tlsInfo, terr := copyTLSFiles(testTLSInfo, certsDir) + if terr != nil { + t.Fatal(terr) + } + return tlsInfo + } + replaceFunc := func() { + if _, err = copyTLSFiles(testTLSInfoExpired, certsDir); err != nil { + t.Fatal(err) + } + } + revertFunc := func() { + if _, err = copyTLSFiles(testTLSInfo, certsDir); err != nil { + t.Fatal(err) + } + } + testTLSReload(t, cloneFunc, replaceFunc, revertFunc) +} + +func testTLSReload(t *testing.T, cloneFunc func() transport.TLSInfo, replaceFunc func(), revertFunc func()) { + defer testutil.AfterTest(t) + + // 1. separate copies for TLS assets modification + tlsInfo := cloneFunc() + + // 2. start cluster with valid certs + clus := NewClusterV3(t, &ClusterConfig{Size: 1, PeerTLS: &tlsInfo, ClientTLS: &tlsInfo}) + defer clus.Terminate(t) + + // 3. concurrent client dialing while certs become expired + errc := make(chan error, 1) + go func() { + for { + cc, err := tlsInfo.ClientConfig() + if err != nil { + // errors in 'go/src/crypto/tls/tls.go' + // tls: private key does not match public key + // tls: failed to find any PEM data in key input + // tls: failed to find any PEM data in certificate input + // Or 'does not exist', 'not found', etc + t.Log(err) + continue + } + cli, cerr := clientv3.New(clientv3.Config{ + Endpoints: []string{clus.Members[0].GRPCAddr()}, + DialTimeout: time.Second, + TLS: cc, + }) + if cerr != nil { + errc <- cerr + return + } + cli.Close() + } + }() + + // 4. replace certs with expired ones + replaceFunc() + + // 5. expect dial time-out when loading expired certs + select { + case gerr := <-errc: + if gerr != grpc.ErrClientConnTimeout { + t.Fatalf("expected %v, got %v", grpc.ErrClientConnTimeout, gerr) + } + case <-time.After(5 * time.Second): + t.Fatal("failed to receive dial timeout error") + } + + // 6. replace expired certs back with valid ones + revertFunc() + + // 7. new requests should trigger listener to reload valid certs + tls, terr := tlsInfo.ClientConfig() + if terr != nil { + t.Fatal(terr) + } + cl, cerr := clientv3.New(clientv3.Config{ + Endpoints: []string{clus.Members[0].GRPCAddr()}, + DialTimeout: time.Second, + TLS: tls, + }) + if cerr != nil { + t.Fatalf("expected no error, got %v", cerr) + } + cl.Close() +} + func TestGRPCRequireLeader(t *testing.T) { defer testutil.AfterTest(t) @@ -1394,7 +1812,7 @@ func TestGRPCRequireLeader(t *testing.T) { time.Sleep(time.Duration(3*electionTicks) * tickDuration) md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) - ctx := metadata.NewContext(context.Background(), md) + ctx := metadata.NewOutgoingContext(context.Background(), md) reqput := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")} if _, err := toGRPC(client).KV.Put(ctx, reqput); grpc.ErrorDesc(err) != rpctypes.ErrNoLeader.Error() { t.Errorf("err = %v, want %v", err, rpctypes.ErrNoLeader) @@ -1416,7 +1834,7 @@ func TestGRPCStreamRequireLeader(t *testing.T) { wAPI := toGRPC(client).Watch md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) - ctx := metadata.NewContext(context.Background(), md) + ctx := metadata.NewOutgoingContext(context.Background(), md) wStream, err := wAPI.Watch(ctx) if err != nil { t.Fatalf("wAPI.Watch error: %v", err) @@ -1463,6 +1881,35 @@ func TestGRPCStreamRequireLeader(t *testing.T) { } } +// TestV3PutLargeRequests ensures that configurable MaxRequestBytes works as intended. +func TestV3PutLargeRequests(t *testing.T) { + defer testutil.AfterTest(t) + tests := []struct { + key string + maxRequestBytes uint + valueSize int + expectError error + }{ + // don't set to 0. use 0 as the default. + {"foo", 1, 1024, rpctypes.ErrGRPCRequestTooLarge}, + {"foo", 10 * 1024 * 1024, 9 * 1024 * 1024, nil}, + {"foo", 10 * 1024 * 1024, 10 * 1024 * 1024, rpctypes.ErrGRPCRequestTooLarge}, + {"foo", 10 * 1024 * 1024, 10*1024*1024 + 5, rpctypes.ErrGRPCRequestTooLarge}, + } + for i, test := range tests { + clus := NewClusterV3(t, &ClusterConfig{Size: 1, MaxRequestBytes: test.maxRequestBytes}) + kvcli := toGRPC(clus.Client(0)).KV + reqput := &pb.PutRequest{Key: []byte(test.key), Value: make([]byte, test.valueSize)} + _, err := kvcli.Put(context.TODO(), reqput) + + if !eqErrGRPC(err, test.expectError) { + t.Errorf("#%d: expected error %v, got %v", i, test.expectError, err) + } + + clus.Terminate(t) + } +} + func eqErrGRPC(err1 error, err2 error) bool { return !(err1 == nil && err2 != nil) || err1.Error() == err2.Error() } diff --git a/github.com/coreos/etcd/integration/v3_health_test.go b/github.com/coreos/etcd/integration/v3_health_test.go new file mode 100644 index 0000000000..1636983cdd --- /dev/null +++ b/github.com/coreos/etcd/integration/v3_health_test.go @@ -0,0 +1,40 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package integration + +import ( + "context" + "testing" + + "github.com/coreos/etcd/pkg/testutil" + + healthpb "google.golang.org/grpc/health/grpc_health_v1" +) + +func TestHealthCheck(t *testing.T) { + defer testutil.AfterTest(t) + + clus := NewClusterV3(t, &ClusterConfig{Size: 1}) + defer clus.Terminate(t) + + cli := healthpb.NewHealthClient(clus.RandClient().ActiveConnection()) + resp, err := cli.Check(context.TODO(), &healthpb.HealthCheckRequest{}) + if err != nil { + t.Fatal(err) + } + if resp.Status != healthpb.HealthCheckResponse_SERVING { + t.Fatalf("status expected %s, got %s", healthpb.HealthCheckResponse_SERVING, resp.Status) + } +} diff --git a/github.com/coreos/etcd/integration/v3_leadership_test.go b/github.com/coreos/etcd/integration/v3_leadership_test.go new file mode 100644 index 0000000000..7f41f3bfe5 --- /dev/null +++ b/github.com/coreos/etcd/integration/v3_leadership_test.go @@ -0,0 +1,108 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package integration + +import ( + "context" + "testing" + "time" + + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/pkg/testutil" +) + +func TestMoveLeader(t *testing.T) { testMoveLeader(t, true) } +func TestMoveLeaderService(t *testing.T) { testMoveLeader(t, false) } + +func testMoveLeader(t *testing.T, auto bool) { + defer testutil.AfterTest(t) + + clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + defer clus.Terminate(t) + + oldLeadIdx := clus.WaitLeader(t) + oldLeadID := uint64(clus.Members[oldLeadIdx].s.ID()) + + // ensure followers go through leader transition while learship transfer + idc := make(chan uint64) + for i := range clus.Members { + if oldLeadIdx != i { + go func(m *member) { + idc <- checkLeaderTransition(t, m, oldLeadID) + }(clus.Members[i]) + } + } + + target := uint64(clus.Members[(oldLeadIdx+1)%3].s.ID()) + if auto { + err := clus.Members[oldLeadIdx].s.TransferLeadership() + if err != nil { + t.Fatal(err) + } + } else { + mvc := toGRPC(clus.Client(oldLeadIdx)).Maintenance + _, err := mvc.MoveLeader(context.TODO(), &pb.MoveLeaderRequest{TargetID: target}) + if err != nil { + t.Fatal(err) + } + } + + // wait until leader transitions have happened + var newLeadIDs [2]uint64 + for i := range newLeadIDs { + select { + case newLeadIDs[i] = <-idc: + case <-time.After(time.Second): + t.Fatal("timed out waiting for leader transition") + } + } + + // remaining members must agree on the same leader + if newLeadIDs[0] != newLeadIDs[1] { + t.Fatalf("expected same new leader %d == %d", newLeadIDs[0], newLeadIDs[1]) + } + + // new leader must be different than the old leader + if oldLeadID == newLeadIDs[0] { + t.Fatalf("expected old leader %d != new leader %d", oldLeadID, newLeadIDs[0]) + } + + // if move-leader were used, new leader must match transferee + if !auto { + if newLeadIDs[0] != target { + t.Fatalf("expected new leader %d != target %d", newLeadIDs[0], target) + } + } +} + +// TestMoveLeaderError ensures that request to non-leader fail. +func TestMoveLeaderError(t *testing.T) { + defer testutil.AfterTest(t) + + clus := NewClusterV3(t, &ClusterConfig{Size: 3}) + defer clus.Terminate(t) + + oldLeadIdx := clus.WaitLeader(t) + followerIdx := (oldLeadIdx + 1) % 3 + + target := uint64(clus.Members[(oldLeadIdx+2)%3].s.ID()) + + mvc := toGRPC(clus.Client(followerIdx)).Maintenance + _, err := mvc.MoveLeader(context.TODO(), &pb.MoveLeaderRequest{TargetID: target}) + if !eqErrGRPC(err, rpctypes.ErrGRPCNotLeader) { + t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCNotLeader) + } +} diff --git a/github.com/coreos/etcd/integration/v3_lease_test.go b/github.com/coreos/etcd/integration/v3_lease_test.go index f03ee22017..7bb72ba131 100644 --- a/github.com/coreos/etcd/integration/v3_lease_test.go +++ b/github.com/coreos/etcd/integration/v3_lease_test.go @@ -360,12 +360,16 @@ func TestV3GetNonExistLease(t *testing.T) { } for _, client := range clus.clients { + // quorum-read to ensure revoke completes before TimeToLive + if _, err := toGRPC(client).KV.Range(ctx, &pb.RangeRequest{Key: []byte("_")}); err != nil { + t.Fatal(err) + } resp, err := toGRPC(client).Lease.LeaseTimeToLive(ctx, leaseTTLr) if err != nil { t.Fatalf("expected non nil error, but go %v", err) } if resp.TTL != -1 { - t.Fatalf("expected TTL to be -1, but got %v \n", resp.TTL) + t.Fatalf("expected TTL to be -1, but got %v", resp.TTL) } } } @@ -456,7 +460,7 @@ func TestV3LeaseFailover(t *testing.T) { lreq := &pb.LeaseKeepAliveRequest{ID: lresp.ID} md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) - mctx := metadata.NewContext(context.Background(), md) + mctx := metadata.NewOutgoingContext(context.Background(), md) ctx, cancel := context.WithCancel(mctx) defer cancel() lac, err := lc.LeaseKeepAlive(ctx) @@ -504,7 +508,7 @@ func TestV3LeaseRequireLeader(t *testing.T) { clus.Members[2].Stop(t) md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) - mctx := metadata.NewContext(context.Background(), md) + mctx := metadata.NewOutgoingContext(context.Background(), md) ctx, cancel := context.WithCancel(mctx) defer cancel() lac, err := lc.LeaseKeepAlive(ctx) @@ -524,8 +528,8 @@ func TestV3LeaseRequireLeader(t *testing.T) { } }() select { - case <-time.After(time.Duration(5*electionTicks) * tickDuration): - t.Fatalf("did not receive leader loss error") + case <-time.After(5 * time.Second): + t.Fatal("did not receive leader loss error (in 5-sec)") case <-donec: } } diff --git a/github.com/coreos/etcd/integration/v3election_grpc_test.go b/github.com/coreos/etcd/integration/v3election_grpc_test.go index f92c41d308..be320286e8 100644 --- a/github.com/coreos/etcd/integration/v3election_grpc_test.go +++ b/github.com/coreos/etcd/integration/v3election_grpc_test.go @@ -41,7 +41,7 @@ func TestV3ElectionCampaign(t *testing.T) { t.Fatal(err2) } - lc := epb.NewElectionClient(clus.Client(0).ActiveConnection()) + lc := toGRPC(clus.Client(0)).Election req1 := &epb.CampaignRequest{Name: []byte("foo"), Lease: lease1.ID, Value: []byte("abc")} l1, lerr1 := lc.Campaign(context.TODO(), req1) if lerr1 != nil { @@ -94,9 +94,9 @@ func TestV3ElectionObserve(t *testing.T) { clus := NewClusterV3(t, &ClusterConfig{Size: 1}) defer clus.Terminate(t) - lc := epb.NewElectionClient(clus.Client(0).ActiveConnection()) + lc := toGRPC(clus.Client(0)).Election - // observe 10 leadership events + // observe leadership events observec := make(chan struct{}) go func() { defer close(observec) @@ -110,9 +110,13 @@ func TestV3ElectionObserve(t *testing.T) { if rerr != nil { t.Fatal(rerr) } - if string(resp.Kv.Value) != fmt.Sprintf("%d", i) { - t.Fatalf(`got observe value %q, expected "%d"`, string(resp.Kv.Value), i) + respV := 0 + fmt.Sscanf(string(resp.Kv.Value), "%d", &respV) + // leader transitions should not go backwards + if respV < i { + t.Fatalf(`got observe value %q, expected >= "%d"`, string(resp.Kv.Value), i) } + i = respV } }() diff --git a/github.com/coreos/etcd/integration/v3lock_grpc_test.go b/github.com/coreos/etcd/integration/v3lock_grpc_test.go index 04b7281d66..a66a8cf460 100644 --- a/github.com/coreos/etcd/integration/v3lock_grpc_test.go +++ b/github.com/coreos/etcd/integration/v3lock_grpc_test.go @@ -40,7 +40,7 @@ func TestV3LockLockWaiter(t *testing.T) { t.Fatal(err2) } - lc := lockpb.NewLockClient(clus.Client(0).ActiveConnection()) + lc := toGRPC(clus.Client(0)).Lock l1, lerr1 := lc.Lock(context.TODO(), &lockpb.LockRequest{Name: []byte("foo"), Lease: lease1.ID}) if lerr1 != nil { t.Fatal(lerr1) diff --git a/github.com/coreos/etcd/lease/lessor.go b/github.com/coreos/etcd/lease/lessor.go index 5120d1cfcd..3418cf565e 100644 --- a/github.com/coreos/etcd/lease/lessor.go +++ b/github.com/coreos/etcd/lease/lessor.go @@ -31,12 +31,15 @@ import ( const ( // NoLease is a special LeaseID representing the absence of a lease. NoLease = LeaseID(0) + + forever = monotime.Time(math.MaxInt64) ) var ( leaseBucketName = []byte("lease") - forever = monotime.Time(math.MaxInt64) + // maximum number of leases to revoke per second; configurable for tests + leaseRevokeRate = 1000 ErrNotPrimary = errors.New("not a primary lessor") ErrLeaseNotFound = errors.New("lease not found") @@ -324,8 +327,53 @@ func (le *lessor) Promote(extend time.Duration) { for _, l := range le.leaseMap { l.refresh(extend) } + + if len(le.leaseMap) < leaseRevokeRate { + // no possibility of lease pile-up + return + } + + // adjust expiries in case of overlap + leases := make([]*Lease, 0, len(le.leaseMap)) + for _, l := range le.leaseMap { + leases = append(leases, l) + } + sort.Sort(leasesByExpiry(leases)) + + baseWindow := leases[0].Remaining() + nextWindow := baseWindow + time.Second + expires := 0 + // have fewer expires than the total revoke rate so piled up leases + // don't consume the entire revoke limit + targetExpiresPerSecond := (3 * leaseRevokeRate) / 4 + for _, l := range leases { + remaining := l.Remaining() + if remaining > nextWindow { + baseWindow = remaining + nextWindow = baseWindow + time.Second + expires = 1 + continue + } + expires++ + if expires <= targetExpiresPerSecond { + continue + } + rateDelay := float64(time.Second) * (float64(expires) / float64(targetExpiresPerSecond)) + // If leases are extended by n seconds, leases n seconds ahead of the + // base window should be extended by only one second. + rateDelay -= float64(remaining - baseWindow) + delay := time.Duration(rateDelay) + nextWindow = baseWindow + delay + l.refresh(delay + extend) + } } +type leasesByExpiry []*Lease + +func (le leasesByExpiry) Len() int { return len(le) } +func (le leasesByExpiry) Less(i, j int) bool { return le[i].Remaining() < le[j].Remaining() } +func (le leasesByExpiry) Swap(i, j int) { le[i], le[j] = le[j], le[i] } + func (le *lessor) Demote() { le.mu.Lock() defer le.mu.Unlock() @@ -422,6 +470,10 @@ func (le *lessor) runLoop() { le.mu.Unlock() if len(ls) != 0 { + // rate limit + if len(ls) > leaseRevokeRate/2 { + ls = ls[:leaseRevokeRate/2] + } select { case <-le.stopC: return diff --git a/github.com/coreos/etcd/lease/lessor_test.go b/github.com/coreos/etcd/lease/lessor_test.go index bfada89932..7ea2972a91 100644 --- a/github.com/coreos/etcd/lease/lessor_test.go +++ b/github.com/coreos/etcd/lease/lessor_test.go @@ -42,6 +42,7 @@ func TestLessorGrant(t *testing.T) { defer be.Close() le := newLessor(be, minLeaseTTL) + defer le.Stop() le.Promote(0) l, err := le.Grant(1, 1) @@ -87,6 +88,7 @@ func TestLeaseConcurrentKeys(t *testing.T) { defer be.Close() le := newLessor(be, minLeaseTTL) + defer le.Stop() le.SetRangeDeleter(func() TxnDelete { return newFakeDeleter(be) }) // grant a lease with long term (100 seconds) to @@ -134,6 +136,7 @@ func TestLessorRevoke(t *testing.T) { defer be.Close() le := newLessor(be, minLeaseTTL) + defer le.Stop() var fd *fakeDeleter le.SetRangeDeleter(func() TxnDelete { fd = newFakeDeleter(be) @@ -185,6 +188,7 @@ func TestLessorRenew(t *testing.T) { defer os.RemoveAll(dir) le := newLessor(be, minLeaseTTL) + defer le.Stop() le.Promote(0) l, err := le.Grant(1, minLeaseTTL) @@ -210,12 +214,66 @@ func TestLessorRenew(t *testing.T) { } } +// TestLessorRenewExtendPileup ensures Lessor extends leases on promotion if too many +// expire at the same time. +func TestLessorRenewExtendPileup(t *testing.T) { + oldRevokeRate := leaseRevokeRate + defer func() { leaseRevokeRate = oldRevokeRate }() + leaseRevokeRate = 10 + + dir, be := NewTestBackend(t) + defer os.RemoveAll(dir) + + le := newLessor(be, minLeaseTTL) + ttl := int64(10) + for i := 1; i <= leaseRevokeRate*10; i++ { + if _, err := le.Grant(LeaseID(2*i), ttl); err != nil { + t.Fatal(err) + } + // ttls that overlap spillover for ttl=10 + if _, err := le.Grant(LeaseID(2*i+1), ttl+1); err != nil { + t.Fatal(err) + } + } + + // simulate stop and recovery + le.Stop() + be.Close() + bcfg := backend.DefaultBackendConfig() + bcfg.Path = filepath.Join(dir, "be") + be = backend.New(bcfg) + defer be.Close() + le = newLessor(be, minLeaseTTL) + defer le.Stop() + + // extend after recovery should extend expiration on lease pile-up + le.Promote(0) + + windowCounts := make(map[int64]int) + for _, l := range le.leaseMap { + // round up slightly for baseline ttl + s := int64(l.Remaining().Seconds() + 0.1) + windowCounts[s]++ + } + + for i := ttl; i < ttl+20; i++ { + c := windowCounts[i] + if c > leaseRevokeRate { + t.Errorf("expected at most %d expiring at %ds, got %d", leaseRevokeRate, i, c) + } + if c < leaseRevokeRate/2 { + t.Errorf("expected at least %d expiring at %ds, got %d", leaseRevokeRate/2, i, c) + } + } +} + func TestLessorDetach(t *testing.T) { dir, be := NewTestBackend(t) defer os.RemoveAll(dir) defer be.Close() le := newLessor(be, minLeaseTTL) + defer le.Stop() le.SetRangeDeleter(func() TxnDelete { return newFakeDeleter(be) }) // grant a lease with long term (100 seconds) to @@ -255,6 +313,7 @@ func TestLessorRecover(t *testing.T) { defer be.Close() le := newLessor(be, minLeaseTTL) + defer le.Stop() l1, err1 := le.Grant(1, 10) l2, err2 := le.Grant(2, 20) if err1 != nil || err2 != nil { @@ -263,6 +322,7 @@ func TestLessorRecover(t *testing.T) { // Create a new lessor with the same backend nle := newLessor(be, minLeaseTTL) + defer nle.Stop() nl1 := nle.Lookup(l1.ID) if nl1 == nil || nl1.ttl != l1.ttl { t.Errorf("nl1 = %v, want nl1.ttl= %d", nl1.ttl, l1.ttl) diff --git a/github.com/coreos/etcd/mvcc/backend/backend.go b/github.com/coreos/etcd/mvcc/backend/backend.go index 37b8ef016b..87edd25f42 100644 --- a/github.com/coreos/etcd/mvcc/backend/backend.go +++ b/github.com/coreos/etcd/mvcc/backend/backend.go @@ -25,7 +25,7 @@ import ( "sync/atomic" "time" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" "github.com/coreos/pkg/capnslog" ) @@ -41,6 +41,9 @@ var ( initialMmapSize = uint64(10 * 1024 * 1024 * 1024) plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc/backend") + + // minSnapshotWarningTimeout is the minimum threshold to trigger a long running snapshot warning. + minSnapshotWarningTimeout = time.Duration(30 * time.Second) ) type Backend interface { @@ -121,7 +124,7 @@ func newBackend(bcfg BackendConfig) *backend { if boltOpenOptions != nil { *bopts = *boltOpenOptions } - bopts.InitialMmapSize = int(bcfg.MmapSize) + bopts.InitialMmapSize = bcfg.mmapSize() db, err := bolt.Open(bcfg.Path, 0600, bopts) if err != nil { @@ -171,7 +174,33 @@ func (b *backend) Snapshot() Snapshot { if err != nil { plog.Fatalf("cannot begin tx (%s)", err) } - return &snapshot{tx} + + stopc, donec := make(chan struct{}), make(chan struct{}) + dbBytes := tx.Size() + go func() { + defer close(donec) + // sendRateBytes is based on transferring snapshot data over a 1 gigabit/s connection + // assuming a min tcp throughput of 100MB/s. + var sendRateBytes int64 = 100 * 1024 * 1014 + warningTimeout := time.Duration(int64((float64(dbBytes) / float64(sendRateBytes)) * float64(time.Second))) + if warningTimeout < minSnapshotWarningTimeout { + warningTimeout = minSnapshotWarningTimeout + } + start := time.Now() + ticker := time.NewTicker(warningTimeout) + defer ticker.Stop() + for { + select { + case <-ticker.C: + plog.Warningf("snapshotting is taking more than %v seconds to finish transferring %v MB [started at %v]", time.Since(start).Seconds(), float64(dbBytes)/float64(1024*1014), start) + case <-stopc: + snapshotDurations.Observe(time.Since(start).Seconds()) + return + } + } + }() + + return &snapshot{tx, stopc, donec} } type IgnoreKey struct { @@ -403,6 +432,12 @@ func NewDefaultTmpBackend() (*backend, string) { type snapshot struct { *bolt.Tx + stopc chan struct{} + donec chan struct{} } -func (s *snapshot) Close() error { return s.Tx.Rollback() } +func (s *snapshot) Close() error { + close(s.stopc) + <-s.donec + return s.Tx.Rollback() +} diff --git a/github.com/coreos/etcd/mvcc/backend/backend_test.go b/github.com/coreos/etcd/mvcc/backend/backend_test.go index af898b5ad3..664579bbab 100644 --- a/github.com/coreos/etcd/mvcc/backend/backend_test.go +++ b/github.com/coreos/etcd/mvcc/backend/backend_test.go @@ -22,7 +22,7 @@ import ( "testing" "time" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" ) func TestBackendClose(t *testing.T) { diff --git a/github.com/coreos/etcd/mvcc/backend/batch_tx.go b/github.com/coreos/etcd/mvcc/backend/batch_tx.go index a47f67d49b..fed9d69c94 100644 --- a/github.com/coreos/etcd/mvcc/backend/batch_tx.go +++ b/github.com/coreos/etcd/mvcc/backend/batch_tx.go @@ -22,7 +22,7 @@ import ( "sync/atomic" "time" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" ) type BatchTx interface { @@ -45,6 +45,13 @@ type batchTx struct { pending int } +var nopLock sync.Locker = &nopLocker{} + +type nopLocker struct{} + +func (*nopLocker) Lock() {} +func (*nopLocker) Unlock() {} + func (t *batchTx) UnsafeCreateBucket(name []byte) { _, err := t.tx.CreateBucket(name) if err != nil && err != bolt.ErrBucketExists { @@ -81,28 +88,34 @@ func (t *batchTx) unsafePut(bucketName []byte, key []byte, value []byte, seq boo // UnsafeRange must be called holding the lock on the tx. func (t *batchTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) { - k, v, err := unsafeRange(t.tx, bucketName, key, endKey, limit) + // nop lock since a write txn should already hold a lock over t.tx + k, v, err := unsafeRange(t.tx, bucketName, key, endKey, limit, nopLock) if err != nil { plog.Fatal(err) } return k, v } -func unsafeRange(tx *bolt.Tx, bucketName, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte, err error) { +func unsafeRange(tx *bolt.Tx, bucketName, key, endKey []byte, limit int64, l sync.Locker) (keys [][]byte, vs [][]byte, err error) { + l.Lock() bucket := tx.Bucket(bucketName) if bucket == nil { + l.Unlock() return nil, nil, fmt.Errorf("bucket %s does not exist", bucketName) } if len(endKey) == 0 { - if v := bucket.Get(key); v != nil { + v := bucket.Get(key) + l.Unlock() + if v != nil { return append(keys, key), append(vs, v), nil } return nil, nil, nil } + c := bucket.Cursor() + l.Unlock() if limit <= 0 { limit = math.MaxInt64 } - c := bucket.Cursor() for ck, cv := c.Seek(key); ck != nil && bytes.Compare(ck, endKey) < 0; ck, cv = c.Next() { vs = append(vs, cv) keys = append(keys, ck) diff --git a/github.com/coreos/etcd/mvcc/backend/batch_tx_test.go b/github.com/coreos/etcd/mvcc/backend/batch_tx_test.go index 582cbf84e7..57549b98b9 100644 --- a/github.com/coreos/etcd/mvcc/backend/batch_tx_test.go +++ b/github.com/coreos/etcd/mvcc/backend/batch_tx_test.go @@ -19,7 +19,7 @@ import ( "testing" "time" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" ) func TestBatchTxPut(t *testing.T) { diff --git a/github.com/coreos/etcd/mvcc/backend/boltoption_default.go b/github.com/coreos/etcd/mvcc/backend/config_default.go similarity index 82% rename from github.com/coreos/etcd/mvcc/backend/boltoption_default.go rename to github.com/coreos/etcd/mvcc/backend/config_default.go index 92019c1841..edfed0025c 100644 --- a/github.com/coreos/etcd/mvcc/backend/boltoption_default.go +++ b/github.com/coreos/etcd/mvcc/backend/config_default.go @@ -12,10 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build !linux +// +build !linux,!windows package backend -import "github.com/boltdb/bolt" +import bolt "github.com/coreos/bbolt" var boltOpenOptions *bolt.Options = nil + +func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) } diff --git a/github.com/coreos/etcd/mvcc/backend/boltoption_linux.go b/github.com/coreos/etcd/mvcc/backend/config_linux.go similarity index 86% rename from github.com/coreos/etcd/mvcc/backend/boltoption_linux.go rename to github.com/coreos/etcd/mvcc/backend/config_linux.go index c65b477a0d..b01785f3b3 100644 --- a/github.com/coreos/etcd/mvcc/backend/boltoption_linux.go +++ b/github.com/coreos/etcd/mvcc/backend/config_linux.go @@ -17,7 +17,7 @@ package backend import ( "syscall" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" ) // syscall.MAP_POPULATE on linux 2.6.23+ does sequential read-ahead @@ -27,5 +27,8 @@ import ( // (https://github.com/torvalds/linux/releases/tag/v2.6.23), mmap might // silently ignore this flag. Please update your kernel to prevent this. var boltOpenOptions = &bolt.Options{ - MmapFlags: syscall.MAP_POPULATE, + MmapFlags: syscall.MAP_POPULATE, + NoFreelistSync: true, } + +func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) } diff --git a/github.com/coreos/etcd/mvcc/backend/config_windows.go b/github.com/coreos/etcd/mvcc/backend/config_windows.go new file mode 100644 index 0000000000..71d02700bc --- /dev/null +++ b/github.com/coreos/etcd/mvcc/backend/config_windows.go @@ -0,0 +1,26 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package backend + +import bolt "github.com/coreos/bbolt" + +var boltOpenOptions *bolt.Options = nil + +// setting mmap size != 0 on windows will allocate the entire +// mmap size for the file, instead of growing it. So, force 0. + +func (bcfg *BackendConfig) mmapSize() int { return 0 } diff --git a/github.com/coreos/etcd/mvcc/backend/metrics.go b/github.com/coreos/etcd/mvcc/backend/metrics.go index 34a56a9195..30a3880147 100644 --- a/github.com/coreos/etcd/mvcc/backend/metrics.go +++ b/github.com/coreos/etcd/mvcc/backend/metrics.go @@ -24,8 +24,18 @@ var ( Help: "The latency distributions of commit called by backend.", Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), }) + + snapshotDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "etcd", + Subsystem: "disk", + Name: "backend_snapshot_duration_seconds", + Help: "The latency distribution of backend snapshots.", + // 10 ms -> 655 seconds + Buckets: prometheus.ExponentialBuckets(.01, 2, 17), + }) ) func init() { prometheus.MustRegister(commitDurations) + prometheus.MustRegister(snapshotDurations) } diff --git a/github.com/coreos/etcd/mvcc/backend/read_tx.go b/github.com/coreos/etcd/mvcc/backend/read_tx.go index 51596ffdf2..a5ceeaeb7d 100644 --- a/github.com/coreos/etcd/mvcc/backend/read_tx.go +++ b/github.com/coreos/etcd/mvcc/backend/read_tx.go @@ -19,7 +19,7 @@ import ( "math" "sync" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" ) // safeRangeBucket is a hack to avoid inadvertently reading duplicate keys; @@ -63,10 +63,8 @@ func (rt *readTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][] if int64(len(keys)) == limit { return keys, vals } - rt.txmu.Lock() // ignore error since bucket may have been created in this batch - k2, v2, _ := unsafeRange(rt.tx, bucketName, key, endKey, limit-int64(len(keys))) - rt.txmu.Unlock() + k2, v2, _ := unsafeRange(rt.tx, bucketName, key, endKey, limit-int64(len(keys)), &rt.txmu) return append(k2, keys...), append(v2, vals...) } diff --git a/github.com/coreos/etcd/mvcc/index.go b/github.com/coreos/etcd/mvcc/index.go index 397098a7ba..991289cdd5 100644 --- a/github.com/coreos/etcd/mvcc/index.go +++ b/github.com/coreos/etcd/mvcc/index.go @@ -29,7 +29,9 @@ type index interface { RangeSince(key, end []byte, rev int64) []revision Compact(rev int64) map[revision]struct{} Equal(b index) bool + Insert(ki *keyIndex) + KeyIndex(ki *keyIndex) *keyIndex } type treeIndex struct { @@ -60,18 +62,27 @@ func (ti *treeIndex) Put(key []byte, rev revision) { func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, ver int64, err error) { keyi := &keyIndex{key: key} - ti.RLock() defer ti.RUnlock() - item := ti.tree.Get(keyi) - if item == nil { + if keyi = ti.keyIndex(keyi); keyi == nil { return revision{}, revision{}, 0, ErrRevisionNotFound } - - keyi = item.(*keyIndex) return keyi.get(atRev) } +func (ti *treeIndex) KeyIndex(keyi *keyIndex) *keyIndex { + ti.RLock() + defer ti.RUnlock() + return ti.keyIndex(keyi) +} + +func (ti *treeIndex) keyIndex(keyi *keyIndex) *keyIndex { + if item := ti.tree.Get(keyi); item != nil { + return item.(*keyIndex) + } + return nil +} + func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []revision) { if end == nil { rev, _, _, err := ti.Get(key, atRev) diff --git a/github.com/coreos/etcd/mvcc/key_index.go b/github.com/coreos/etcd/mvcc/key_index.go index 983c64e2f6..9104f9b2d3 100644 --- a/github.com/coreos/etcd/mvcc/key_index.go +++ b/github.com/coreos/etcd/mvcc/key_index.go @@ -222,7 +222,6 @@ func (ki *keyIndex) compact(atRev int64, available map[revision]struct{}) { } // remove the previous generations. ki.generations = ki.generations[i:] - return } func (ki *keyIndex) isEmpty() bool { diff --git a/github.com/coreos/etcd/mvcc/kv.go b/github.com/coreos/etcd/mvcc/kv.go index e13cd64794..6636347aa4 100644 --- a/github.com/coreos/etcd/mvcc/kv.go +++ b/github.com/coreos/etcd/mvcc/kv.go @@ -93,7 +93,9 @@ func (trw *txnReadWrite) DeleteRange(key, end []byte) (n, rev int64) { panic("un func (trw *txnReadWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) { panic("unexpected Put") } -func (trw *txnReadWrite) Changes() []mvccpb.KeyValue { panic("unexpected Changes") } +func (trw *txnReadWrite) Changes() []mvccpb.KeyValue { return nil } + +func NewReadOnlyTxnWrite(txn TxnRead) TxnWrite { return &txnReadWrite{txn} } type KV interface { ReadView diff --git a/github.com/coreos/etcd/mvcc/kvstore.go b/github.com/coreos/etcd/mvcc/kvstore.go index 36b3d9a261..34fc761723 100644 --- a/github.com/coreos/etcd/mvcc/kvstore.go +++ b/github.com/coreos/etcd/mvcc/kvstore.go @@ -19,6 +19,7 @@ import ( "errors" "math" "sync" + "sync/atomic" "time" "github.com/coreos/etcd/lease" @@ -33,13 +34,6 @@ var ( keyBucketName = []byte("key") metaBucketName = []byte("meta") - // markedRevBytesLen is the byte length of marked revision. - // The first `revBytesLen` bytes represents a normal revision. The last - // one byte is the mark. - markedRevBytesLen = revBytesLen + 1 - markBytePosition = markedRevBytesLen - 1 - markTombstone byte = 't' - consistentIndexKeyName = []byte("consistent_index") scheduledCompactKeyName = []byte("scheduledCompactRev") finishedCompactKeyName = []byte("finishedCompactRev") @@ -52,6 +46,17 @@ var ( plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc") ) +const ( + // markedRevBytesLen is the byte length of marked revision. + // The first `revBytesLen` bytes represents a normal revision. The last + // one byte is the mark. + markedRevBytesLen = revBytesLen + 1 + markBytePosition = markedRevBytesLen - 1 + markTombstone byte = 't' +) + +var restoreChunkKeys = 10000 // non-const for testing + // ConsistentIndexGetter is an interface that wraps the Get method. // Consistent index is the offset of an entry in a consistent replicated log. type ConsistentIndexGetter interface { @@ -63,6 +68,10 @@ type store struct { ReadView WriteView + // consistentIndex caches the "consistent_index" key's value. Accessed + // through atomics so must be 64-bit aligned. + consistentIndex uint64 + // mu read locks for txns and write locks for non-txn store changes. mu sync.RWMutex @@ -230,6 +239,7 @@ func (s *store) Restore(b backend.Backend) error { close(s.stopc) s.fifoSched.Stop() + atomic.StoreUint64(&s.consistentIndex, 0) s.b = b s.kvindex = newTreeIndex() s.currentRev = 1 @@ -241,67 +251,53 @@ func (s *store) Restore(b backend.Backend) error { } func (s *store) restore() error { + reportDbTotalSizeInBytesMu.Lock() + b := s.b + reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) } + reportDbTotalSizeInBytesMu.Unlock() + min, max := newRevBytes(), newRevBytes() revToBytes(revision{main: 1}, min) revToBytes(revision{main: math.MaxInt64, sub: math.MaxInt64}, max) keyToLease := make(map[string]lease.LeaseID) - // use an unordered map to hold the temp index data to speed up - // the initial key index recovery. - // we will convert this unordered map into the tree index later. - unordered := make(map[string]*keyIndex, 100000) - // restore index tx := s.b.BatchTx() tx.Lock() + _, finishedCompactBytes := tx.UnsafeRange(metaBucketName, finishedCompactKeyName, nil, 0) if len(finishedCompactBytes) != 0 { s.compactMainRev = bytesToRev(finishedCompactBytes[0]).main plog.Printf("restore compact to %d", s.compactMainRev) } + _, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0) + scheduledCompact := int64(0) + if len(scheduledCompactBytes) != 0 { + scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main + } - // TODO: limit N to reduce max memory usage - keys, vals := tx.UnsafeRange(keyBucketName, min, max, 0) - for i, key := range keys { - var kv mvccpb.KeyValue - if err := kv.Unmarshal(vals[i]); err != nil { - plog.Fatalf("cannot unmarshal event: %v", err) + // index keys concurrently as they're loaded in from tx + rkvc, revc := restoreIntoIndex(s.kvindex) + for { + keys, vals := tx.UnsafeRange(keyBucketName, min, max, int64(restoreChunkKeys)) + if len(keys) == 0 { + break } - - rev := bytesToRev(key[:revBytesLen]) - s.currentRev = rev.main - - // restore index - switch { - case isTombstone(key): - if ki, ok := unordered[string(kv.Key)]; ok { - ki.tombstone(rev.main, rev.sub) - } - delete(keyToLease, string(kv.Key)) - - default: - ki, ok := unordered[string(kv.Key)] - if ok { - ki.put(rev.main, rev.sub) - } else { - ki = &keyIndex{key: kv.Key} - ki.restore(revision{kv.CreateRevision, 0}, rev, kv.Version) - unordered[string(kv.Key)] = ki - } - - if lid := lease.LeaseID(kv.Lease); lid != lease.NoLease { - keyToLease[string(kv.Key)] = lid - } else { - delete(keyToLease, string(kv.Key)) - } + // rkvc blocks if the total pending keys exceeds the restore + // chunk size to keep keys from consuming too much memory. + restoreChunk(rkvc, keys, vals, keyToLease) + if len(keys) < restoreChunkKeys { + // partial set implies final set + break } + // next set begins after where this one ended + newMin := bytesToRev(keys[len(keys)-1][:revBytesLen]) + newMin.sub++ + revToBytes(newMin, min) } - - // restore the tree index from the unordered index. - for _, v := range unordered { - s.kvindex.Insert(v) - } + close(rkvc) + s.currentRev = <-revc // keys in the range [compacted revision -N, compaction] might all be deleted due to compaction. // the correct revision should be set to compaction revision in the case, not the largest revision @@ -309,6 +305,9 @@ func (s *store) restore() error { if s.currentRev < s.compactMainRev { s.currentRev = s.compactMainRev } + if scheduledCompact <= s.compactMainRev { + scheduledCompact = 0 + } for key, lid := range keyToLease { if s.le == nil { @@ -320,15 +319,6 @@ func (s *store) restore() error { } } - _, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0) - scheduledCompact := int64(0) - if len(scheduledCompactBytes) != 0 { - scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main - if scheduledCompact <= s.compactMainRev { - scheduledCompact = 0 - } - } - tx.Unlock() if scheduledCompact != 0 { @@ -339,35 +329,98 @@ func (s *store) restore() error { return nil } +type revKeyValue struct { + key []byte + kv mvccpb.KeyValue + kstr string +} + +func restoreIntoIndex(idx index) (chan<- revKeyValue, <-chan int64) { + rkvc, revc := make(chan revKeyValue, restoreChunkKeys), make(chan int64, 1) + go func() { + currentRev := int64(1) + defer func() { revc <- currentRev }() + // restore the tree index from streaming the unordered index. + kiCache := make(map[string]*keyIndex, restoreChunkKeys) + for rkv := range rkvc { + ki, ok := kiCache[rkv.kstr] + // purge kiCache if many keys but still missing in the cache + if !ok && len(kiCache) >= restoreChunkKeys { + i := 10 + for k := range kiCache { + delete(kiCache, k) + if i--; i == 0 { + break + } + } + } + // cache miss, fetch from tree index if there + if !ok { + ki = &keyIndex{key: rkv.kv.Key} + if idxKey := idx.KeyIndex(ki); idxKey != nil { + kiCache[rkv.kstr], ki = idxKey, idxKey + ok = true + } + } + rev := bytesToRev(rkv.key) + currentRev = rev.main + if ok { + if isTombstone(rkv.key) { + ki.tombstone(rev.main, rev.sub) + continue + } + ki.put(rev.main, rev.sub) + } else if !isTombstone(rkv.key) { + ki.restore(revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version) + idx.Insert(ki) + kiCache[rkv.kstr] = ki + } + } + }() + return rkvc, revc +} + +func restoreChunk(kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) { + for i, key := range keys { + rkv := revKeyValue{key: key} + if err := rkv.kv.Unmarshal(vals[i]); err != nil { + plog.Fatalf("cannot unmarshal event: %v", err) + } + rkv.kstr = string(rkv.kv.Key) + if isTombstone(key) { + delete(keyToLease, rkv.kstr) + } else if lid := lease.LeaseID(rkv.kv.Lease); lid != lease.NoLease { + keyToLease[rkv.kstr] = lid + } else { + delete(keyToLease, rkv.kstr) + } + kvc <- rkv + } +} + func (s *store) Close() error { close(s.stopc) s.fifoSched.Stop() return nil } -func (a *store) Equal(b *store) bool { - if a.currentRev != b.currentRev { - return false - } - if a.compactMainRev != b.compactMainRev { - return false - } - return a.kvindex.Equal(b.kvindex) -} - func (s *store) saveIndex(tx backend.BatchTx) { if s.ig == nil { return } bs := s.bytesBuf8 - binary.BigEndian.PutUint64(bs, s.ig.ConsistentIndex()) + ci := s.ig.ConsistentIndex() + binary.BigEndian.PutUint64(bs, ci) // put the index into the underlying backend // tx has been locked in TxnBegin, so there is no need to lock it again tx.UnsafePut(metaBucketName, consistentIndexKeyName, bs) + atomic.StoreUint64(&s.consistentIndex, ci) } func (s *store) ConsistentIndex() uint64 { - // TODO: cache index in a uint64 field? + if ci := atomic.LoadUint64(&s.consistentIndex); ci > 0 { + return ci + } tx := s.b.BatchTx() tx.Lock() defer tx.Unlock() @@ -375,7 +428,9 @@ func (s *store) ConsistentIndex() uint64 { if len(vs) == 0 { return 0 } - return binary.BigEndian.Uint64(vs[0]) + v := binary.BigEndian.Uint64(vs[0]) + atomic.StoreUint64(&s.consistentIndex, v) + return v } // appendMarkTombstone appends tombstone mark to normal revision bytes. @@ -390,16 +445,3 @@ func appendMarkTombstone(b []byte) []byte { func isTombstone(b []byte) bool { return len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone } - -// revBytesRange returns the range of revision bytes at -// the given revision. -func revBytesRange(rev revision) (start, end []byte) { - start = newRevBytes() - revToBytes(rev, start) - - end = newRevBytes() - endRev := revision{main: rev.main, sub: rev.sub + 1} - revToBytes(endRev, end) - - return start, end -} diff --git a/github.com/coreos/etcd/mvcc/kvstore_bench_test.go b/github.com/coreos/etcd/mvcc/kvstore_bench_test.go index 821fb67553..b0db47f111 100644 --- a/github.com/coreos/etcd/mvcc/kvstore_bench_test.go +++ b/github.com/coreos/etcd/mvcc/kvstore_bench_test.go @@ -45,6 +45,24 @@ func BenchmarkStorePut(b *testing.B) { } } +func BenchmarkConsistentIndex(b *testing.B) { + fci := fakeConsistentIndex(10) + be, tmpPath := backend.NewDefaultTmpBackend() + s := NewStore(be, &lease.FakeLessor{}, &fci) + defer cleanup(s, be, tmpPath) + + tx := s.b.BatchTx() + tx.Lock() + s.saveIndex(tx) + tx.Unlock() + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + s.ConsistentIndex() + } +} + // BenchmarkStoreTxnPutUpdate is same as above, but instead updates single key func BenchmarkStorePutUpdate(b *testing.B) { var i fakeConsistentIndex @@ -77,6 +95,7 @@ func BenchmarkStoreTxnPut(b *testing.B) { vals := createBytesSlice(bytesN, b.N) b.ResetTimer() + b.ReportAllocs() for i := 0; i < b.N; i++ { txn := s.Write() txn.Put(keys[i], vals[i], lease.NoLease) @@ -89,7 +108,8 @@ func benchmarkStoreRestore(revsPerKey int, b *testing.B) { var i fakeConsistentIndex be, tmpPath := backend.NewDefaultTmpBackend() s := NewStore(be, &lease.FakeLessor{}, &i) - defer cleanup(s, be, tmpPath) + // use closure to capture 's' to pick up the reassignment + defer func() { cleanup(s, be, tmpPath) }() // arbitrary number of bytes bytesN := 64 @@ -103,7 +123,11 @@ func benchmarkStoreRestore(revsPerKey int, b *testing.B) { txn.End() } } + s.Close() + + b.ReportAllocs() b.ResetTimer() + s = NewStore(be, &lease.FakeLessor{}, &i) } func BenchmarkStoreRestoreRevs1(b *testing.B) { diff --git a/github.com/coreos/etcd/mvcc/kvstore_compaction.go b/github.com/coreos/etcd/mvcc/kvstore_compaction.go index bbd38f547f..1726490c11 100644 --- a/github.com/coreos/etcd/mvcc/kvstore_compaction.go +++ b/github.com/coreos/etcd/mvcc/kvstore_compaction.go @@ -22,6 +22,8 @@ import ( func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struct{}) bool { totalStart := time.Now() defer dbCompactionTotalDurations.Observe(float64(time.Since(totalStart) / time.Millisecond)) + keyCompactions := 0 + defer func() { dbCompactionKeysCounter.Add(float64(keyCompactions)) }() end := make([]byte, 8) binary.BigEndian.PutUint64(end, uint64(compactMainRev+1)) @@ -40,6 +42,7 @@ func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struc rev = bytesToRev(key) if _, ok := keep[rev]; !ok { tx.UnsafeDelete(keyBucketName, key) + keyCompactions++ } } diff --git a/github.com/coreos/etcd/mvcc/kvstore_test.go b/github.com/coreos/etcd/mvcc/kvstore_test.go index f1e8167c3b..e3970af7c7 100644 --- a/github.com/coreos/etcd/mvcc/kvstore_test.go +++ b/github.com/coreos/etcd/mvcc/kvstore_test.go @@ -17,7 +17,9 @@ package mvcc import ( "crypto/rand" "encoding/binary" + "fmt" "math" + mrand "math/rand" "os" "reflect" "testing" @@ -215,9 +217,10 @@ func TestStoreRange(t *testing.T) { t.Errorf("#%d: rev = %d, want %d", i, ret.Rev, wrev) } - wstart, wend := revBytesRange(tt.idxr.revs[0]) + wstart := newRevBytes() + revToBytes(tt.idxr.revs[0], wstart) wact := []testutil.Action{ - {"range", []interface{}{keyBucketName, wstart, wend, int64(0)}}, + {"range", []interface{}{keyBucketName, wstart, []byte(nil), int64(0)}}, } if g := b.tx.Action(); !reflect.DeepEqual(g, wact) { t.Errorf("#%d: tx action = %+v, want %+v", i, g, wact) @@ -373,9 +376,11 @@ func TestStoreRestore(t *testing.T) { t.Fatal(err) } b.tx.rangeRespc <- rangeResp{[][]byte{finishedCompactKeyName}, [][]byte{newTestRevBytes(revision{3, 0})}} - b.tx.rangeRespc <- rangeResp{[][]byte{putkey, delkey}, [][]byte{putkvb, delkvb}} b.tx.rangeRespc <- rangeResp{[][]byte{scheduledCompactKeyName}, [][]byte{newTestRevBytes(revision{3, 0})}} + b.tx.rangeRespc <- rangeResp{[][]byte{putkey, delkey}, [][]byte{putkvb, delkvb}} + b.tx.rangeRespc <- rangeResp{nil, nil} + s.restore() if s.compactMainRev != 3 { @@ -386,8 +391,8 @@ func TestStoreRestore(t *testing.T) { } wact := []testutil.Action{ {"range", []interface{}{metaBucketName, finishedCompactKeyName, []byte(nil), int64(0)}}, - {"range", []interface{}{keyBucketName, newTestRevBytes(revision{1, 0}), newTestRevBytes(revision{math.MaxInt64, math.MaxInt64}), int64(0)}}, {"range", []interface{}{metaBucketName, scheduledCompactKeyName, []byte(nil), int64(0)}}, + {"range", []interface{}{keyBucketName, newTestRevBytes(revision{1, 0}), newTestRevBytes(revision{math.MaxInt64, math.MaxInt64}), int64(restoreChunkKeys)}}, } if g := b.tx.Action(); !reflect.DeepEqual(g, wact) { t.Errorf("tx actions = %+v, want %+v", g, wact) @@ -399,6 +404,7 @@ func TestStoreRestore(t *testing.T) { } ki := &keyIndex{key: []byte("foo"), modified: revision{5, 0}, generations: gens} wact = []testutil.Action{ + {"keyIndex", []interface{}{ki}}, {"insert", []interface{}{ki}}, } if g := fi.Action(); !reflect.DeepEqual(g, wact) { @@ -406,6 +412,56 @@ func TestStoreRestore(t *testing.T) { } } +func TestRestoreDelete(t *testing.T) { + oldChunk := restoreChunkKeys + restoreChunkKeys = mrand.Intn(3) + 2 + defer func() { restoreChunkKeys = oldChunk }() + + b, tmpPath := backend.NewDefaultTmpBackend() + s := NewStore(b, &lease.FakeLessor{}, nil) + defer os.Remove(tmpPath) + + keys := make(map[string]struct{}) + for i := 0; i < 20; i++ { + ks := fmt.Sprintf("foo-%d", i) + k := []byte(ks) + s.Put(k, []byte("bar"), lease.NoLease) + keys[ks] = struct{}{} + switch mrand.Intn(3) { + case 0: + // put random key from past via random range on map + ks = fmt.Sprintf("foo-%d", mrand.Intn(i+1)) + s.Put([]byte(ks), []byte("baz"), lease.NoLease) + keys[ks] = struct{}{} + case 1: + // delete random key via random range on map + for k := range keys { + s.DeleteRange([]byte(k), nil) + delete(keys, k) + break + } + } + } + s.Close() + + s = NewStore(b, &lease.FakeLessor{}, nil) + defer s.Close() + for i := 0; i < 20; i++ { + ks := fmt.Sprintf("foo-%d", i) + r, err := s.Range([]byte(ks), nil, RangeOptions{}) + if err != nil { + t.Fatal(err) + } + if _, ok := keys[ks]; ok { + if len(r.KVs) == 0 { + t.Errorf("#%d: expected %q, got deleted", i, ks) + } + } else if len(r.KVs) != 0 { + t.Errorf("#%d: expected deleted, got %q", i, ks) + } + } +} + func TestRestoreContinueUnfinishedCompaction(t *testing.T) { b, tmpPath := backend.NewDefaultTmpBackend() s0 := NewStore(b, &lease.FakeLessor{}, nil) @@ -644,6 +700,11 @@ func (i *fakeIndex) Insert(ki *keyIndex) { i.Recorder.Record(testutil.Action{Name: "insert", Params: []interface{}{ki}}) } +func (i *fakeIndex) KeyIndex(ki *keyIndex) *keyIndex { + i.Recorder.Record(testutil.Action{Name: "keyIndex", Params: []interface{}{ki}}) + return nil +} + func createBytesSlice(bytesN, sliceN int) [][]byte { rs := [][]byte{} for len(rs) != sliceN { diff --git a/github.com/coreos/etcd/mvcc/kvstore_txn.go b/github.com/coreos/etcd/mvcc/kvstore_txn.go index 1e61475519..b0832fa9e3 100644 --- a/github.com/coreos/etcd/mvcc/kvstore_txn.go +++ b/github.com/coreos/etcd/mvcc/kvstore_txn.go @@ -51,7 +51,7 @@ func (tr *storeTxnRead) End() { } type storeTxnWrite struct { - *storeTxnRead + storeTxnRead tx backend.BatchTx // beginRev is the revision where the txn begins; it will write to the next revision. beginRev int64 @@ -63,7 +63,7 @@ func (s *store) Write() TxnWrite { tx := s.b.BatchTx() tx.Lock() tw := &storeTxnWrite{ - storeTxnRead: &storeTxnRead{s, tx, 0, 0}, + storeTxnRead: storeTxnRead{s, tx, 0, 0}, tx: tx, beginRev: s.currentRev, changes: make([]mvccpb.KeyValue, 0, 4), @@ -105,7 +105,6 @@ func (tw *storeTxnWrite) End() { if len(tw.changes) != 0 { tw.s.revMu.Unlock() } - dbTotalSize.Set(float64(tw.s.b.Size())) tw.s.mu.RUnlock() } @@ -129,22 +128,22 @@ func (tr *storeTxnRead) rangeKeys(key, end []byte, curRev int64, ro RangeOptions return &RangeResult{KVs: nil, Count: len(revpairs), Rev: curRev}, nil } - var kvs []mvccpb.KeyValue - for _, revpair := range revpairs { - start, end := revBytesRange(revpair) - _, vs := tr.tx.UnsafeRange(keyBucketName, start, end, 0) + limit := int(ro.Limit) + if limit <= 0 || limit > len(revpairs) { + limit = len(revpairs) + } + + kvs := make([]mvccpb.KeyValue, limit) + revBytes := newRevBytes() + for i, revpair := range revpairs[:len(kvs)] { + revToBytes(revpair, revBytes) + _, vs := tr.tx.UnsafeRange(keyBucketName, revBytes, nil, 0) if len(vs) != 1 { plog.Fatalf("range cannot find rev (%d,%d)", revpair.main, revpair.sub) } - - var kv mvccpb.KeyValue - if err := kv.Unmarshal(vs[0]); err != nil { + if err := kvs[i].Unmarshal(vs[0]); err != nil { plog.Fatalf("cannot unmarshal event: %v", err) } - kvs = append(kvs, kv) - if ro.Limit > 0 && len(kvs) >= int(ro.Limit) { - break - } } return &RangeResult{KVs: kvs, Count: len(revpairs), Rev: curRev}, nil } diff --git a/github.com/coreos/etcd/mvcc/metrics.go b/github.com/coreos/etcd/mvcc/metrics.go index aa8af6aa55..e44eb12d23 100644 --- a/github.com/coreos/etcd/mvcc/metrics.go +++ b/github.com/coreos/etcd/mvcc/metrics.go @@ -15,6 +15,8 @@ package mvcc import ( + "sync" + "github.com/prometheus/client_golang/prometheus" ) @@ -129,12 +131,29 @@ var ( Buckets: prometheus.ExponentialBuckets(100, 2, 14), }) - dbTotalSize = prometheus.NewGauge(prometheus.GaugeOpts{ + dbCompactionKeysCounter = prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "mvcc", + Name: "db_compaction_keys_total", + Help: "Total number of db keys compacted.", + }) + + dbTotalSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Namespace: "etcd_debugging", Subsystem: "mvcc", Name: "db_total_size_in_bytes", Help: "Total size of the underlying database in bytes.", - }) + }, + func() float64 { + reportDbTotalSizeInBytesMu.RLock() + defer reportDbTotalSizeInBytesMu.RUnlock() + return reportDbTotalSizeInBytes() + }, + ) + // overridden by mvcc initialization + reportDbTotalSizeInBytesMu sync.RWMutex + reportDbTotalSizeInBytes func() float64 = func() float64 { return 0 } ) func init() { @@ -151,6 +170,7 @@ func init() { prometheus.MustRegister(indexCompactionPauseDurations) prometheus.MustRegister(dbCompactionPauseDurations) prometheus.MustRegister(dbCompactionTotalDurations) + prometheus.MustRegister(dbCompactionKeysCounter) prometheus.MustRegister(dbTotalSize) } diff --git a/github.com/coreos/etcd/mvcc/metrics_txn.go b/github.com/coreos/etcd/mvcc/metrics_txn.go index fd2144279a..911d64875d 100644 --- a/github.com/coreos/etcd/mvcc/metrics_txn.go +++ b/github.com/coreos/etcd/mvcc/metrics_txn.go @@ -50,18 +50,10 @@ func (tw *metricsTxnWrite) Put(key, value []byte, lease lease.LeaseID) (rev int6 func (tw *metricsTxnWrite) End() { defer tw.TxnWrite.End() - if sum := tw.ranges + tw.puts + tw.deletes; sum != 1 { - if sum > 1 { - txnCounter.Inc() - } - return - } - switch { - case tw.ranges == 1: - rangeCounter.Inc() - case tw.puts == 1: - putCounter.Inc() - case tw.deletes == 1: - deleteCounter.Inc() + if sum := tw.ranges + tw.puts + tw.deletes; sum > 1 { + txnCounter.Inc() } + rangeCounter.Add(float64(tw.ranges)) + putCounter.Add(float64(tw.puts)) + deleteCounter.Add(float64(tw.deletes)) } diff --git a/github.com/coreos/etcd/mvcc/watchable_store.go b/github.com/coreos/etcd/mvcc/watchable_store.go index ce852fddea..3205cf8952 100644 --- a/github.com/coreos/etcd/mvcc/watchable_store.go +++ b/github.com/coreos/etcd/mvcc/watchable_store.go @@ -23,7 +23,8 @@ import ( "github.com/coreos/etcd/mvcc/mvccpb" ) -const ( +// non-const so modifiable by tests +var ( // chanBufLen is the length of the buffered chan // for sending out watched events. // TODO: find a good buf value. 1024 is just a random one that diff --git a/github.com/coreos/etcd/mvcc/watchable_store_test.go b/github.com/coreos/etcd/mvcc/watchable_store_test.go index 37bd01d887..a72be9cd90 100644 --- a/github.com/coreos/etcd/mvcc/watchable_store_test.go +++ b/github.com/coreos/etcd/mvcc/watchable_store_test.go @@ -16,8 +16,10 @@ package mvcc import ( "bytes" + "fmt" "os" "reflect" + "sync" "testing" "time" @@ -424,3 +426,83 @@ func TestNewMapwatcherToEventMap(t *testing.T) { } } } + +// TestWatchVictims tests that watchable store delivers watch events +// when the watch channel is temporarily clogged with too many events. +func TestWatchVictims(t *testing.T) { + oldChanBufLen, oldMaxWatchersPerSync := chanBufLen, maxWatchersPerSync + + b, tmpPath := backend.NewDefaultTmpBackend() + s := newWatchableStore(b, &lease.FakeLessor{}, nil) + + defer func() { + s.store.Close() + os.Remove(tmpPath) + chanBufLen, maxWatchersPerSync = oldChanBufLen, oldMaxWatchersPerSync + }() + + chanBufLen, maxWatchersPerSync = 1, 2 + numPuts := chanBufLen * 64 + testKey, testValue := []byte("foo"), []byte("bar") + + var wg sync.WaitGroup + numWatches := maxWatchersPerSync * 128 + errc := make(chan error, numWatches) + wg.Add(numWatches) + for i := 0; i < numWatches; i++ { + go func() { + w := s.NewWatchStream() + w.Watch(testKey, nil, 1) + defer func() { + w.Close() + wg.Done() + }() + tc := time.After(10 * time.Second) + evs, nextRev := 0, int64(2) + for evs < numPuts { + select { + case <-tc: + errc <- fmt.Errorf("time out") + return + case wr := <-w.Chan(): + evs += len(wr.Events) + for _, ev := range wr.Events { + if ev.Kv.ModRevision != nextRev { + errc <- fmt.Errorf("expected rev=%d, got %d", nextRev, ev.Kv.ModRevision) + return + } + nextRev++ + } + time.Sleep(time.Millisecond) + } + } + if evs != numPuts { + errc <- fmt.Errorf("expected %d events, got %d", numPuts, evs) + return + } + select { + case <-w.Chan(): + errc <- fmt.Errorf("unexpected response") + default: + } + }() + time.Sleep(time.Millisecond) + } + + var wgPut sync.WaitGroup + wgPut.Add(numPuts) + for i := 0; i < numPuts; i++ { + go func() { + defer wgPut.Done() + s.Put(testKey, testValue, lease.NoLease) + }() + } + wgPut.Wait() + + wg.Wait() + select { + case err := <-errc: + t.Fatal(err) + default: + } +} diff --git a/github.com/coreos/etcd/pkg/adt/interval_tree.go b/github.com/coreos/etcd/pkg/adt/interval_tree.go index 9769771ea4..ec302e4a7a 100644 --- a/github.com/coreos/etcd/pkg/adt/interval_tree.go +++ b/github.com/coreos/etcd/pkg/adt/interval_tree.go @@ -485,6 +485,15 @@ func (ivt *IntervalTree) Stab(iv Interval) (ivs []*IntervalValue) { return ivs } +// Union merges a given interval tree into the receiver. +func (ivt *IntervalTree) Union(inIvt IntervalTree, ivl Interval) { + f := func(n *IntervalValue) bool { + ivt.Insert(n.Ivl, n.Val) + return true + } + inIvt.Visit(ivl, f) +} + type StringComparable string func (s StringComparable) Compare(c Comparable) int { diff --git a/github.com/coreos/etcd/pkg/debugutil/pprof.go b/github.com/coreos/etcd/pkg/debugutil/pprof.go index 2e87124b82..8d5544a3dc 100644 --- a/github.com/coreos/etcd/pkg/debugutil/pprof.go +++ b/github.com/coreos/etcd/pkg/debugutil/pprof.go @@ -17,12 +17,19 @@ package debugutil import ( "net/http" "net/http/pprof" + "runtime" ) const HTTPPrefixPProf = "/debug/pprof" // PProfHandlers returns a map of pprof handlers keyed by the HTTP path. func PProfHandlers() map[string]http.Handler { + // set only when there's no existing setting + if runtime.SetMutexProfileFraction(-1) == 0 { + // 1 out of 5 mutex events are reported, on average + runtime.SetMutexProfileFraction(5) + } + m := make(map[string]http.Handler) m[HTTPPrefixPProf+"/"] = http.HandlerFunc(pprof.Index) @@ -34,6 +41,7 @@ func PProfHandlers() map[string]http.Handler { m[HTTPPrefixPProf+"/goroutine"] = pprof.Handler("goroutine") m[HTTPPrefixPProf+"/threadcreate"] = pprof.Handler("threadcreate") m[HTTPPrefixPProf+"/block"] = pprof.Handler("block") + m[HTTPPrefixPProf+"/mutex"] = pprof.Handler("mutex") return m } diff --git a/github.com/coreos/etcd/pkg/expect/expect.go b/github.com/coreos/etcd/pkg/expect/expect.go index a58121ccf9..fe27ef7fe9 100644 --- a/github.com/coreos/etcd/pkg/expect/expect.go +++ b/github.com/coreos/etcd/pkg/expect/expect.go @@ -44,8 +44,6 @@ type ExpectProcess struct { StopSignal os.Signal } -var printDebugLines = os.Getenv("EXPECT_DEBUG") != "" - // NewExpect creates a new process for expect testing. func NewExpect(name string, arg ...string) (ep *ExpectProcess, err error) { // if env[] is nil, use current system env @@ -75,6 +73,7 @@ func NewExpectWithEnv(name string, args []string, env []string) (ep *ExpectProce func (ep *ExpectProcess) read() { defer ep.wg.Done() + printDebugLines := os.Getenv("EXPECT_DEBUG") != "" r := bufio.NewReader(ep.fpty) for ep.err == nil { ep.ptyMu.Lock() diff --git a/github.com/coreos/etcd/pkg/fileutil/fileutil_test.go b/github.com/coreos/etcd/pkg/fileutil/fileutil_test.go index 35f9ac5ace..cde2f516ad 100644 --- a/github.com/coreos/etcd/pkg/fileutil/fileutil_test.go +++ b/github.com/coreos/etcd/pkg/fileutil/fileutil_test.go @@ -127,6 +127,11 @@ func TestZeroToEnd(t *testing.T) { } defer f.Close() + // Ensure 0 size is a nop so zero-to-end on an empty file won't give EINVAL. + if err = ZeroToEnd(f); err != nil { + t.Fatal(err) + } + b := make([]byte, 1024) for i := range b { b[i] = 12 diff --git a/github.com/coreos/etcd/pkg/fileutil/lock_linux_test.go b/github.com/coreos/etcd/pkg/fileutil/lock_linux_test.go new file mode 100644 index 0000000000..0f57d57a6c --- /dev/null +++ b/github.com/coreos/etcd/pkg/fileutil/lock_linux_test.go @@ -0,0 +1,29 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package fileutil + +import "testing" + +// TestLockAndUnlockSyscallFlock tests the fallback flock using the flock syscall. +func TestLockAndUnlockSyscallFlock(t *testing.T) { + oldTryLock, oldLock := linuxTryLockFile, linuxLockFile + defer func() { + linuxTryLockFile, linuxLockFile = oldTryLock, oldLock + }() + linuxTryLockFile, linuxLockFile = flockTryLockFile, flockLockFile + TestLockAndUnlock(t) +} diff --git a/github.com/coreos/etcd/pkg/fileutil/preallocate.go b/github.com/coreos/etcd/pkg/fileutil/preallocate.go index 3270a32986..c747b7cf81 100644 --- a/github.com/coreos/etcd/pkg/fileutil/preallocate.go +++ b/github.com/coreos/etcd/pkg/fileutil/preallocate.go @@ -25,6 +25,10 @@ import ( // If the operation is unsupported, no error will be returned. // Otherwise, the error encountered will be returned. func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error { + if sizeInBytes == 0 { + // fallocate will return EINVAL if length is 0; skip + return nil + } if extendFile { return preallocExtend(f, sizeInBytes) } diff --git a/github.com/coreos/etcd/pkg/fileutil/preallocate_test.go b/github.com/coreos/etcd/pkg/fileutil/preallocate_test.go index c6a357c59b..c132b8510d 100644 --- a/github.com/coreos/etcd/pkg/fileutil/preallocate_test.go +++ b/github.com/coreos/etcd/pkg/fileutil/preallocate_test.go @@ -20,10 +20,20 @@ import ( "testing" ) -func TestPreallocateExtend(t *testing.T) { runPreallocTest(t, testPreallocateExtend) } -func testPreallocateExtend(t *testing.T, f *os.File) { +func TestPreallocateExtend(t *testing.T) { + pf := func(f *os.File, sz int64) error { return Preallocate(f, sz, true) } + tf := func(t *testing.T, f *os.File) { testPreallocateExtend(t, f, pf) } + runPreallocTest(t, tf) +} + +func TestPreallocateExtendTrunc(t *testing.T) { + tf := func(t *testing.T, f *os.File) { testPreallocateExtend(t, f, preallocExtendTrunc) } + runPreallocTest(t, tf) +} + +func testPreallocateExtend(t *testing.T, f *os.File, pf func(*os.File, int64) error) { size := int64(64 * 1000) - if err := Preallocate(f, size, true); err != nil { + if err := pf(f, size); err != nil { t.Fatal(err) } diff --git a/github.com/coreos/etcd/pkg/mock/mockstorage/storage_recorder.go b/github.com/coreos/etcd/pkg/mock/mockstorage/storage_recorder.go index 4a592545e3..4ecab9831b 100644 --- a/github.com/coreos/etcd/pkg/mock/mockstorage/storage_recorder.go +++ b/github.com/coreos/etcd/pkg/mock/mockstorage/storage_recorder.go @@ -15,8 +15,6 @@ package mockstorage import ( - "fmt" - "github.com/coreos/etcd/pkg/testutil" "github.com/coreos/etcd/raft" "github.com/coreos/etcd/raft/raftpb" @@ -47,13 +45,4 @@ func (p *storageRecorder) SaveSnap(st raftpb.Snapshot) error { return nil } -func (p *storageRecorder) DBFilePath(id uint64) (string, error) { - p.Record(testutil.Action{Name: "DBFilePath"}) - path := p.dbPath - if path != "" { - path = path + "/" - } - return fmt.Sprintf("%s%016x.snap.db", path, id), nil -} - func (p *storageRecorder) Close() error { return nil } diff --git a/github.com/coreos/etcd/pkg/netutil/netutil.go b/github.com/coreos/etcd/pkg/netutil/netutil.go index bb5f392b34..5e38dc98db 100644 --- a/github.com/coreos/etcd/pkg/netutil/netutil.go +++ b/github.com/coreos/etcd/pkg/netutil/netutil.go @@ -16,14 +16,13 @@ package netutil import ( + "context" "net" "net/url" "reflect" "sort" "time" - "golang.org/x/net/context" - "github.com/coreos/etcd/pkg/types" "github.com/coreos/pkg/capnslog" ) @@ -32,11 +31,38 @@ var ( plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/netutil") // indirection for testing - resolveTCPAddr = net.ResolveTCPAddr + resolveTCPAddr = resolveTCPAddrDefault ) const retryInterval = time.Second +// taken from go's ResolveTCP code but uses configurable ctx +func resolveTCPAddrDefault(ctx context.Context, addr string) (*net.TCPAddr, error) { + host, port, serr := net.SplitHostPort(addr) + if serr != nil { + return nil, serr + } + portnum, perr := net.DefaultResolver.LookupPort(ctx, "tcp", port) + if perr != nil { + return nil, perr + } + + var ips []net.IPAddr + if ip := net.ParseIP(host); ip != nil { + ips = []net.IPAddr{{IP: ip}} + } else { + // Try as a DNS name. + ipss, err := net.DefaultResolver.LookupIPAddr(ctx, host) + if err != nil { + return nil, err + } + ips = ipss + } + // randomize? + ip := ips[0] + return &net.TCPAddr{IP: ip.IP, Port: portnum, Zone: ip.Zone}, nil +} + // resolveTCPAddrs is a convenience wrapper for net.ResolveTCPAddr. // resolveTCPAddrs return a new set of url.URLs, in which all DNS hostnames // are resolved. @@ -75,7 +101,7 @@ func resolveURL(ctx context.Context, u url.URL) (string, error) { if host == "localhost" || net.ParseIP(host) != nil { return "", nil } - tcpAddr, err := resolveTCPAddr("tcp", u.Host) + tcpAddr, err := resolveTCPAddr(ctx, u.Host) if err == nil { plog.Infof("resolving %s to %s", u.Host, tcpAddr.String()) return tcpAddr.String(), nil diff --git a/github.com/coreos/etcd/pkg/netutil/netutil_test.go b/github.com/coreos/etcd/pkg/netutil/netutil_test.go index c8d9f79942..82abe6d12a 100644 --- a/github.com/coreos/etcd/pkg/netutil/netutil_test.go +++ b/github.com/coreos/etcd/pkg/netutil/netutil_test.go @@ -15,6 +15,7 @@ package netutil import ( + "context" "errors" "net" "net/url" @@ -22,12 +23,10 @@ import ( "strconv" "testing" "time" - - "golang.org/x/net/context" ) func TestResolveTCPAddrs(t *testing.T) { - defer func() { resolveTCPAddr = net.ResolveTCPAddr }() + defer func() { resolveTCPAddr = resolveTCPAddrDefault }() tests := []struct { urls [][]url.URL expected [][]url.URL @@ -113,7 +112,7 @@ func TestResolveTCPAddrs(t *testing.T) { }, } for _, tt := range tests { - resolveTCPAddr = func(network, addr string) (*net.TCPAddr, error) { + resolveTCPAddr = func(ctx context.Context, addr string) (*net.TCPAddr, error) { host, port, err := net.SplitHostPort(addr) if err != nil { return nil, err @@ -143,13 +142,13 @@ func TestResolveTCPAddrs(t *testing.T) { } func TestURLsEqual(t *testing.T) { - defer func() { resolveTCPAddr = net.ResolveTCPAddr }() + defer func() { resolveTCPAddr = resolveTCPAddrDefault }() hostm := map[string]string{ "example.com": "10.0.10.1", "first.com": "10.0.11.1", "second.com": "10.0.11.2", } - resolveTCPAddr = func(network, addr string) (*net.TCPAddr, error) { + resolveTCPAddr = func(ctx context.Context, addr string) (*net.TCPAddr, error) { host, port, herr := net.SplitHostPort(addr) if herr != nil { return nil, herr diff --git a/github.com/coreos/etcd/pkg/osutil/interrupt_unix.go b/github.com/coreos/etcd/pkg/osutil/interrupt_unix.go index 2d1518a3bf..b9feaffc95 100644 --- a/github.com/coreos/etcd/pkg/osutil/interrupt_unix.go +++ b/github.com/coreos/etcd/pkg/osutil/interrupt_unix.go @@ -68,6 +68,7 @@ func HandleInterrupts() { if pid == 1 { os.Exit(0) } + setDflSignal(sig.(syscall.Signal)) syscall.Kill(pid, sig.(syscall.Signal)) }() } diff --git a/github.com/coreos/etcd/pkg/osutil/osutil.go b/github.com/coreos/etcd/pkg/osutil/osutil.go index 616e439eac..ef38280e71 100644 --- a/github.com/coreos/etcd/pkg/osutil/osutil.go +++ b/github.com/coreos/etcd/pkg/osutil/osutil.go @@ -24,6 +24,9 @@ import ( var ( plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/osutil") + + // support to override setting SIG_DFL so tests don't terminate early + setDflSignal = dflSignal ) func Unsetenv(key string) error { diff --git a/github.com/coreos/etcd/pkg/osutil/osutil_test.go b/github.com/coreos/etcd/pkg/osutil/osutil_test.go index 8bc5ea8df1..9fbc7a44cc 100644 --- a/github.com/coreos/etcd/pkg/osutil/osutil_test.go +++ b/github.com/coreos/etcd/pkg/osutil/osutil_test.go @@ -23,6 +23,8 @@ import ( "time" ) +func init() { setDflSignal = func(syscall.Signal) {} } + func TestUnsetenv(t *testing.T) { tests := []string{ "data", diff --git a/github.com/coreos/etcd/pkg/osutil/signal.go b/github.com/coreos/etcd/pkg/osutil/signal.go new file mode 100644 index 0000000000..687397fdd5 --- /dev/null +++ b/github.com/coreos/etcd/pkg/osutil/signal.go @@ -0,0 +1,21 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux cov + +package osutil + +import "syscall" + +func dflSignal(sig syscall.Signal) { /* nop */ } diff --git a/github.com/coreos/etcd/pkg/osutil/signal_linux.go b/github.com/coreos/etcd/pkg/osutil/signal_linux.go new file mode 100644 index 0000000000..b94d80c58b --- /dev/null +++ b/github.com/coreos/etcd/pkg/osutil/signal_linux.go @@ -0,0 +1,30 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux,!cov + +package osutil + +import ( + "syscall" + "unsafe" +) + +// dflSignal sets the given signal to SIG_DFL +func dflSignal(sig syscall.Signal) { + // clearing out the sigact sets the signal to SIG_DFL + var sigactBuf [32]uint64 + ptr := unsafe.Pointer(&sigactBuf) + syscall.Syscall6(uintptr(syscall.SYS_RT_SIGACTION), uintptr(sig), uintptr(ptr), 0, 8, 0, 0) +} diff --git a/github.com/coreos/etcd/pkg/report/report.go b/github.com/coreos/etcd/pkg/report/report.go index 10adcd52db..77e29838a2 100644 --- a/github.com/coreos/etcd/pkg/report/report.go +++ b/github.com/coreos/etcd/pkg/report/report.go @@ -30,9 +30,10 @@ const ( // Result describes the timings for an operation. type Result struct { - Start time.Time - End time.Time - Err error + Start time.Time + End time.Time + Err error + Weight float64 } func (res *Result) Duration() time.Duration { return res.End.Sub(res.Start) } @@ -41,18 +42,8 @@ type report struct { results chan Result precision string - avgTotal float64 - fastest float64 - slowest float64 - average float64 - stddev float64 - rps float64 - total time.Duration - - errorDist map[string]int - lats []float64 - - sps *secondPoints + stats Stats + sps *secondPoints } // Stats exposes results raw data. @@ -69,6 +60,13 @@ type Stats struct { TimeSeries TimeSeries } +func (s *Stats) copy() Stats { + ss := *s + ss.ErrorDist = copyMap(ss.ErrorDist) + ss.Lats = copyFloats(ss.Lats) + return ss +} + // Report processes a result stream until it is closed, then produces a // string with information about the consumed result data. type Report interface { @@ -81,12 +79,15 @@ type Report interface { Stats() <-chan Stats } -func NewReport(precision string) Report { - return &report{ +func NewReport(precision string) Report { return newReport(precision) } + +func newReport(precision string) *report { + r := &report{ results: make(chan Result, 16), precision: precision, - errorDist: make(map[string]int), } + r.stats.ErrorDist = make(map[string]int) + return r } func NewReportSample(precision string) Report { @@ -112,22 +113,11 @@ func (r *report) Stats() <-chan Stats { go func() { defer close(donec) r.processResults() - var ts TimeSeries + s := r.stats.copy() if r.sps != nil { - ts = r.sps.getTimeSeries() - } - donec <- Stats{ - AvgTotal: r.avgTotal, - Fastest: r.fastest, - Slowest: r.slowest, - Average: r.average, - Stddev: r.stddev, - RPS: r.rps, - Total: r.total, - ErrorDist: copyMap(r.errorDist), - Lats: copyFloats(r.lats), - TimeSeries: ts, + s.TimeSeries = r.sps.getTimeSeries() } + donec <- s }() return donec } @@ -147,21 +137,21 @@ func copyFloats(s []float64) (c []float64) { } func (r *report) String() (s string) { - if len(r.lats) > 0 { + if len(r.stats.Lats) > 0 { s += fmt.Sprintf("\nSummary:\n") - s += fmt.Sprintf(" Total:\t%s.\n", r.sec2str(r.total.Seconds())) - s += fmt.Sprintf(" Slowest:\t%s.\n", r.sec2str(r.slowest)) - s += fmt.Sprintf(" Fastest:\t%s.\n", r.sec2str(r.fastest)) - s += fmt.Sprintf(" Average:\t%s.\n", r.sec2str(r.average)) - s += fmt.Sprintf(" Stddev:\t%s.\n", r.sec2str(r.stddev)) - s += fmt.Sprintf(" Requests/sec:\t"+r.precision+"\n", r.rps) + s += fmt.Sprintf(" Total:\t%s.\n", r.sec2str(r.stats.Total.Seconds())) + s += fmt.Sprintf(" Slowest:\t%s.\n", r.sec2str(r.stats.Slowest)) + s += fmt.Sprintf(" Fastest:\t%s.\n", r.sec2str(r.stats.Fastest)) + s += fmt.Sprintf(" Average:\t%s.\n", r.sec2str(r.stats.Average)) + s += fmt.Sprintf(" Stddev:\t%s.\n", r.sec2str(r.stats.Stddev)) + s += fmt.Sprintf(" Requests/sec:\t"+r.precision+"\n", r.stats.RPS) s += r.histogram() s += r.sprintLatencies() if r.sps != nil { s += fmt.Sprintf("%v\n", r.sps.getTimeSeries()) } } - if len(r.errorDist) > 0 { + if len(r.stats.ErrorDist) > 0 { s += r.errors() } return s @@ -176,17 +166,17 @@ func NewReportRate(precision string) Report { } func (r *reportRate) String() string { - return fmt.Sprintf(" Requests/sec:\t"+r.precision+"\n", r.rps) + return fmt.Sprintf(" Requests/sec:\t"+r.precision+"\n", r.stats.RPS) } func (r *report) processResult(res *Result) { if res.Err != nil { - r.errorDist[res.Err.Error()]++ + r.stats.ErrorDist[res.Err.Error()]++ return } dur := res.Duration() - r.lats = append(r.lats, dur.Seconds()) - r.avgTotal += dur.Seconds() + r.stats.Lats = append(r.stats.Lats, dur.Seconds()) + r.stats.AvgTotal += dur.Seconds() if r.sps != nil { r.sps.Add(res.Start, dur) } @@ -197,19 +187,19 @@ func (r *report) processResults() { for res := range r.results { r.processResult(&res) } - r.total = time.Since(st) + r.stats.Total = time.Since(st) - r.rps = float64(len(r.lats)) / r.total.Seconds() - r.average = r.avgTotal / float64(len(r.lats)) - for i := range r.lats { - dev := r.lats[i] - r.average - r.stddev += dev * dev + r.stats.RPS = float64(len(r.stats.Lats)) / r.stats.Total.Seconds() + r.stats.Average = r.stats.AvgTotal / float64(len(r.stats.Lats)) + for i := range r.stats.Lats { + dev := r.stats.Lats[i] - r.stats.Average + r.stats.Stddev += dev * dev } - r.stddev = math.Sqrt(r.stddev / float64(len(r.lats))) - sort.Float64s(r.lats) - if len(r.lats) > 0 { - r.fastest = r.lats[0] - r.slowest = r.lats[len(r.lats)-1] + r.stats.Stddev = math.Sqrt(r.stats.Stddev / float64(len(r.stats.Lats))) + sort.Float64s(r.stats.Lats) + if len(r.stats.Lats) > 0 { + r.stats.Fastest = r.stats.Lats[0] + r.stats.Slowest = r.stats.Lats[len(r.stats.Lats)-1] } } @@ -235,7 +225,7 @@ func percentiles(nums []float64) (data []float64) { } func (r *report) sprintLatencies() string { - data := percentiles(r.lats) + data := percentiles(r.stats.Lats) s := fmt.Sprintf("\nLatency distribution:\n") for i := 0; i < len(pctls); i++ { if data[i] > 0 { @@ -249,15 +239,15 @@ func (r *report) histogram() string { bc := 10 buckets := make([]float64, bc+1) counts := make([]int, bc+1) - bs := (r.slowest - r.fastest) / float64(bc) + bs := (r.stats.Slowest - r.stats.Fastest) / float64(bc) for i := 0; i < bc; i++ { - buckets[i] = r.fastest + bs*float64(i) + buckets[i] = r.stats.Fastest + bs*float64(i) } - buckets[bc] = r.slowest + buckets[bc] = r.stats.Slowest var bi int var max int - for i := 0; i < len(r.lats); { - if r.lats[i] <= buckets[bi] { + for i := 0; i < len(r.stats.Lats); { + if r.stats.Lats[i] <= buckets[bi] { i++ counts[bi]++ if max < counts[bi] { @@ -281,7 +271,7 @@ func (r *report) histogram() string { func (r *report) errors() string { s := fmt.Sprintf("\nError distribution:\n") - for err, num := range r.errorDist { + for err, num := range r.stats.ErrorDist { s += fmt.Sprintf(" [%d]\t%s\n", num, err) } return s diff --git a/github.com/coreos/etcd/pkg/report/report_test.go b/github.com/coreos/etcd/pkg/report/report_test.go index b56183e4a7..6f073f3e8d 100644 --- a/github.com/coreos/etcd/pkg/report/report_test.go +++ b/github.com/coreos/etcd/pkg/report/report_test.go @@ -81,3 +81,34 @@ func TestReport(t *testing.T) { } } } + +func TestWeightedReport(t *testing.T) { + r := NewWeightedReport(NewReport("%f"), "%f") + go func() { + start := time.Now() + for i := 0; i < 5; i++ { + end := start.Add(time.Second) + r.Results() <- Result{Start: start, End: end, Weight: 2.0} + start = end + } + r.Results() <- Result{Start: start, End: start.Add(time.Second), Err: fmt.Errorf("oops")} + close(r.Results()) + }() + + stats := <-r.Stats() + stats.TimeSeries = nil // ignore timeseries since it uses wall clock + wStats := Stats{ + AvgTotal: 10.0, + Fastest: 0.5, + Slowest: 0.5, + Average: 0.5, + Stddev: 0.0, + Total: stats.Total, + RPS: 10.0 / stats.Total.Seconds(), + ErrorDist: map[string]int{"oops": 1}, + Lats: []float64{0.5, 0.5, 0.5, 0.5, 0.5}, + } + if !reflect.DeepEqual(stats, wStats) { + t.Fatalf("got %+v, want %+v", stats, wStats) + } +} diff --git a/github.com/coreos/etcd/pkg/report/weighted.go b/github.com/coreos/etcd/pkg/report/weighted.go new file mode 100644 index 0000000000..411214f6d1 --- /dev/null +++ b/github.com/coreos/etcd/pkg/report/weighted.go @@ -0,0 +1,101 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// the file is borrowed from github.com/rakyll/boom/boomer/print.go + +package report + +import ( + "time" +) + +type weightedReport struct { + baseReport Report + + report *report + results chan Result + weightTotal float64 +} + +// NewWeightedReport returns a report that includes +// both weighted and unweighted statistics. +func NewWeightedReport(r Report, precision string) Report { + return &weightedReport{ + baseReport: r, + report: newReport(precision), + results: make(chan Result, 16), + } +} + +func (wr *weightedReport) Results() chan<- Result { return wr.results } + +func (wr *weightedReport) Run() <-chan string { + donec := make(chan string, 2) + go func() { + defer close(donec) + basec, rc := make(chan string, 1), make(chan Stats, 1) + go func() { basec <- (<-wr.baseReport.Run()) }() + go func() { rc <- (<-wr.report.Stats()) }() + go wr.processResults() + wr.report.stats = wr.reweighStat(<-rc) + donec <- wr.report.String() + donec <- (<-basec) + }() + return donec +} + +func (wr *weightedReport) Stats() <-chan Stats { + donec := make(chan Stats, 2) + go func() { + defer close(donec) + basec, rc := make(chan Stats, 1), make(chan Stats, 1) + go func() { basec <- (<-wr.baseReport.Stats()) }() + go func() { rc <- (<-wr.report.Stats()) }() + go wr.processResults() + donec <- wr.reweighStat(<-rc) + donec <- (<-basec) + }() + return donec +} + +func (wr *weightedReport) processResults() { + defer close(wr.report.results) + defer close(wr.baseReport.Results()) + for res := range wr.results { + wr.processResult(res) + wr.baseReport.Results() <- res + } +} + +func (wr *weightedReport) processResult(res Result) { + if res.Err != nil { + wr.report.results <- res + return + } + if res.Weight == 0 { + res.Weight = 1.0 + } + wr.weightTotal += res.Weight + res.End = res.Start.Add(time.Duration(float64(res.End.Sub(res.Start)) / res.Weight)) + res.Weight = 1.0 + wr.report.results <- res +} + +func (wr *weightedReport) reweighStat(s Stats) Stats { + weightCoef := wr.weightTotal / float64(len(s.Lats)) + // weight > 1 => processing more than one request + s.RPS *= weightCoef + s.AvgTotal *= weightCoef * weightCoef + return s +} diff --git a/github.com/coreos/etcd/pkg/schedule/schedule.go b/github.com/coreos/etcd/pkg/schedule/schedule.go index 79c59b0128..bf8528b753 100644 --- a/github.com/coreos/etcd/pkg/schedule/schedule.go +++ b/github.com/coreos/etcd/pkg/schedule/schedule.go @@ -88,8 +88,6 @@ func (f *fifo) Schedule(j Job) { } } f.pendings = append(f.pendings, j) - - return } func (f *fifo) Pending() int { diff --git a/github.com/coreos/etcd/pkg/srv/srv.go b/github.com/coreos/etcd/pkg/srv/srv.go new file mode 100644 index 0000000000..fefcbcb4b8 --- /dev/null +++ b/github.com/coreos/etcd/pkg/srv/srv.go @@ -0,0 +1,140 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package srv looks up DNS SRV records. +package srv + +import ( + "fmt" + "net" + "net/url" + "strings" + + "github.com/coreos/etcd/pkg/types" +) + +var ( + // indirection for testing + lookupSRV = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict + resolveTCPAddr = net.ResolveTCPAddr +) + +// GetCluster gets the cluster information via DNS discovery. +// Also sees each entry as a separate instance. +func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error) { + tempName := int(0) + tcp2ap := make(map[string]url.URL) + + // First, resolve the apurls + for _, url := range apurls { + tcpAddr, err := resolveTCPAddr("tcp", url.Host) + if err != nil { + return nil, err + } + tcp2ap[tcpAddr.String()] = url + } + + stringParts := []string{} + updateNodeMap := func(service, scheme string) error { + _, addrs, err := lookupSRV(service, "tcp", dns) + if err != nil { + return err + } + for _, srv := range addrs { + port := fmt.Sprintf("%d", srv.Port) + host := net.JoinHostPort(srv.Target, port) + tcpAddr, terr := resolveTCPAddr("tcp", host) + if terr != nil { + err = terr + continue + } + n := "" + url, ok := tcp2ap[tcpAddr.String()] + if ok { + n = name + } + if n == "" { + n = fmt.Sprintf("%d", tempName) + tempName++ + } + // SRV records have a trailing dot but URL shouldn't. + shortHost := strings.TrimSuffix(srv.Target, ".") + urlHost := net.JoinHostPort(shortHost, port) + stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost)) + if ok && url.Scheme != scheme { + err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String()) + } + } + if len(stringParts) == 0 { + return err + } + return nil + } + + failCount := 0 + err := updateNodeMap(service+"-ssl", "https") + srvErr := make([]string, 2) + if err != nil { + srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _%s-ssl %s", service, err) + failCount++ + } + err = updateNodeMap(service, "http") + if err != nil { + srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _%s %s", service, err) + failCount++ + } + if failCount == 2 { + return nil, fmt.Errorf("srv: too many errors querying DNS SRV records (%q, %q)", srvErr[0], srvErr[1]) + } + return stringParts, nil +} + +type SRVClients struct { + Endpoints []string + SRVs []*net.SRV +} + +// GetClient looks up the client endpoints for a service and domain. +func GetClient(service, domain string) (*SRVClients, error) { + var urls []*url.URL + var srvs []*net.SRV + + updateURLs := func(service, scheme string) error { + _, addrs, err := lookupSRV(service, "tcp", domain) + if err != nil { + return err + } + for _, srv := range addrs { + urls = append(urls, &url.URL{ + Scheme: scheme, + Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), + }) + } + srvs = append(srvs, addrs...) + return nil + } + + errHTTPS := updateURLs(service+"-ssl", "https") + errHTTP := updateURLs(service, "http") + + if errHTTPS != nil && errHTTP != nil { + return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) + } + + endpoints := make([]string, len(urls)) + for i := range urls { + endpoints[i] = urls[i].String() + } + return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil +} diff --git a/github.com/coreos/etcd/discovery/srv_test.go b/github.com/coreos/etcd/pkg/srv/srv_test.go similarity index 59% rename from github.com/coreos/etcd/discovery/srv_test.go rename to github.com/coreos/etcd/pkg/srv/srv_test.go index b9914a5544..0386c9d2a0 100644 --- a/github.com/coreos/etcd/discovery/srv_test.go +++ b/github.com/coreos/etcd/pkg/srv/srv_test.go @@ -12,11 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -package discovery +package srv import ( "errors" "net" + "reflect" "strings" "testing" @@ -110,12 +111,90 @@ func TestSRVGetCluster(t *testing.T) { return "", nil, errors.New("Unknown service in mock") } urls := testutil.MustNewURLs(t, tt.urls) - str, err := SRVGetCluster(name, "example.com", urls) + str, err := GetCluster("etcd-server", name, "example.com", urls) if err != nil { t.Fatalf("%d: err: %#v", i, err) } - if str != tt.expected { + if strings.Join(str, ",") != tt.expected { t.Errorf("#%d: cluster = %s, want %s", i, str, tt.expected) } } } + +func TestSRVDiscover(t *testing.T) { + defer func() { lookupSRV = net.LookupSRV }() + + tests := []struct { + withSSL []*net.SRV + withoutSSL []*net.SRV + expected []string + }{ + { + []*net.SRV{}, + []*net.SRV{}, + []string{}, + }, + { + []*net.SRV{ + {Target: "10.0.0.1", Port: 2480}, + {Target: "10.0.0.2", Port: 2480}, + {Target: "10.0.0.3", Port: 2480}, + }, + []*net.SRV{}, + []string{"https://10.0.0.1:2480", "https://10.0.0.2:2480", "https://10.0.0.3:2480"}, + }, + { + []*net.SRV{ + {Target: "10.0.0.1", Port: 2480}, + {Target: "10.0.0.2", Port: 2480}, + {Target: "10.0.0.3", Port: 2480}, + }, + []*net.SRV{ + {Target: "10.0.0.1", Port: 7001}, + }, + []string{"https://10.0.0.1:2480", "https://10.0.0.2:2480", "https://10.0.0.3:2480", "http://10.0.0.1:7001"}, + }, + { + []*net.SRV{ + {Target: "10.0.0.1", Port: 2480}, + {Target: "10.0.0.2", Port: 2480}, + {Target: "10.0.0.3", Port: 2480}, + }, + []*net.SRV{ + {Target: "10.0.0.1", Port: 7001}, + }, + []string{"https://10.0.0.1:2480", "https://10.0.0.2:2480", "https://10.0.0.3:2480", "http://10.0.0.1:7001"}, + }, + { + []*net.SRV{ + {Target: "a.example.com", Port: 2480}, + {Target: "b.example.com", Port: 2480}, + {Target: "c.example.com", Port: 2480}, + }, + []*net.SRV{}, + []string{"https://a.example.com:2480", "https://b.example.com:2480", "https://c.example.com:2480"}, + }, + } + + for i, tt := range tests { + lookupSRV = func(service string, proto string, domain string) (string, []*net.SRV, error) { + if service == "etcd-client-ssl" { + return "", tt.withSSL, nil + } + if service == "etcd-client" { + return "", tt.withoutSSL, nil + } + return "", nil, errors.New("Unknown service in mock") + } + + srvs, err := GetClient("etcd-client", "example.com") + if err != nil { + t.Fatalf("%d: err: %#v", i, err) + } + + if !reflect.DeepEqual(srvs.Endpoints, tt.expected) { + t.Errorf("#%d: endpoints = %v, want %v", i, srvs.Endpoints, tt.expected) + } + + } +} diff --git a/github.com/coreos/etcd/pkg/stringutil/stringutil.go b/github.com/coreos/etcd/pkg/stringutil/stringutil.go index 7f6aeb8a6a..44d637c20a 100644 --- a/github.com/coreos/etcd/pkg/stringutil/stringutil.go +++ b/github.com/coreos/etcd/pkg/stringutil/stringutil.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package stringutil exports string utility functions. package stringutil import "math/rand" @@ -20,7 +21,7 @@ const ( chars = "abcdefghijklmnopqrstuvwxyz0123456789" ) -// UniqueStrings retruns a slice of randomly generated unique strings. +// UniqueStrings returns a slice of randomly generated unique strings. func UniqueStrings(maxlen uint, n int) []string { exist := make(map[string]bool) ss := make([]string, 0) @@ -36,7 +37,7 @@ func UniqueStrings(maxlen uint, n int) []string { return ss } -// RandomStrings retruns a slice of randomly generated strings. +// RandomStrings returns a slice of randomly generated strings. func RandomStrings(maxlen uint, n int) []string { ss := make([]string, 0) for i := 0; i < n; i++ { diff --git a/github.com/coreos/etcd/pkg/testutil/assert.go b/github.com/coreos/etcd/pkg/testutil/assert.go new file mode 100644 index 0000000000..8bd3922ee0 --- /dev/null +++ b/github.com/coreos/etcd/pkg/testutil/assert.go @@ -0,0 +1,58 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "fmt" + "reflect" + "testing" +) + +func AssertEqual(t *testing.T, e, a interface{}, msg ...string) { + if (e == nil || a == nil) && (isNil(e) && isNil(a)) { + return + } + if reflect.DeepEqual(e, a) { + return + } + s := "" + if len(msg) > 1 { + s = msg[0] + ": " + } + s = fmt.Sprintf("%sexpected %+v, got %+v", s, e, a) + FatalStack(t, s) +} + +func AssertNil(t *testing.T, v interface{}) { + AssertEqual(t, nil, v) +} + +func AssertNotNil(t *testing.T, v interface{}) { + if v == nil { + t.Fatalf("expected non-nil, got %+v", v) + } +} + +func AssertTrue(t *testing.T, v bool, msg ...string) { + AssertEqual(t, true, v, msg...) +} + +func AssertFalse(t *testing.T, v bool, msg ...string) { + AssertEqual(t, false, v, msg...) +} + +func isNil(v interface{}) bool { + return v == nil || reflect.ValueOf(v).IsNil() +} diff --git a/github.com/coreos/etcd/pkg/testutil/leak.go b/github.com/coreos/etcd/pkg/testutil/leak.go index 12d4408e32..3bf7e6b671 100644 --- a/github.com/coreos/etcd/pkg/testutil/leak.go +++ b/github.com/coreos/etcd/pkg/testutil/leak.go @@ -118,6 +118,8 @@ func interestingGoroutines() (gs []string) { } stack := strings.TrimSpace(sl[1]) if stack == "" || + strings.Contains(stack, "sync.(*WaitGroup).Done") || + strings.Contains(stack, "os.(*file).close") || strings.Contains(stack, "created by os/signal.init") || strings.Contains(stack, "runtime/panic.go") || strings.Contains(stack, "created by testing.RunTests") || diff --git a/github.com/coreos/etcd/pkg/transport/listener.go b/github.com/coreos/etcd/pkg/transport/listener.go index e024f3c6bf..33ba17fe12 100644 --- a/github.com/coreos/etcd/pkg/transport/listener.go +++ b/github.com/coreos/etcd/pkg/transport/listener.go @@ -52,20 +52,22 @@ func wrapTLS(addr, scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listene if scheme != "https" && scheme != "unixs" { return l, nil } - return newTLSListener(l, tlsinfo) + return newTLSListener(l, tlsinfo, checkSAN) } type TLSInfo struct { - CertFile string - KeyFile string - CAFile string - TrustedCAFile string - ClientCertAuth bool + CertFile string + KeyFile string + CAFile string + TrustedCAFile string + ClientCertAuth bool + CRLFile string + InsecureSkipVerify bool // ServerName ensures the cert matches the given host in case of discovery / virtual hosting ServerName string - // HandshakeFailure is optinally called when a connection fails to handshake. The + // HandshakeFailure is optionally called when a connection fails to handshake. The // connection will be closed immediately afterwards. HandshakeFailure func(*tls.Conn, error) @@ -77,7 +79,7 @@ type TLSInfo struct { } func (info TLSInfo) String() string { - return fmt.Sprintf("cert = %s, key = %s, ca = %s, trusted-ca = %s, client-cert-auth = %v", info.CertFile, info.KeyFile, info.CAFile, info.TrustedCAFile, info.ClientCertAuth) + return fmt.Sprintf("cert = %s, key = %s, ca = %s, trusted-ca = %s, client-cert-auth = %v, crl-file = %s", info.CertFile, info.KeyFile, info.CAFile, info.TrustedCAFile, info.ClientCertAuth, info.CRLFile) } func (info TLSInfo) Empty() bool { @@ -172,6 +174,14 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) { MinVersion: tls.VersionTLS12, ServerName: info.ServerName, } + // this only reloads certs when there's a client request + // TODO: support server-side refresh (e.g. inotify, SIGHUP), caching + cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { + return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + } + cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) { + return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + } return cfg, nil } @@ -227,6 +237,7 @@ func (info TLSInfo) ClientConfig() (*tls.Config, error) { } else { cfg = &tls.Config{ServerName: info.ServerName} } + cfg.InsecureSkipVerify = info.InsecureSkipVerify CAFiles := info.cafiles() if len(CAFiles) > 0 { diff --git a/github.com/coreos/etcd/pkg/transport/listener_tls.go b/github.com/coreos/etcd/pkg/transport/listener_tls.go index 53e6a10489..6f1600945c 100644 --- a/github.com/coreos/etcd/pkg/transport/listener_tls.go +++ b/github.com/coreos/etcd/pkg/transport/listener_tls.go @@ -15,23 +15,37 @@ package transport import ( + "context" "crypto/tls" + "crypto/x509" "fmt" + "io/ioutil" "net" + "strings" "sync" ) // tlsListener overrides a TLS listener so it will reject client -// certificates with insufficient SAN credentials. +// certificates with insufficient SAN credentials or CRL revoked +// certificates. type tlsListener struct { net.Listener connc chan net.Conn donec chan struct{} err error handshakeFailure func(*tls.Conn, error) + check tlsCheckFunc } -func newTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) { +type tlsCheckFunc func(context.Context, *tls.Conn) error + +// NewTLSListener handshakes TLS connections and performs optional CRL checking. +func NewTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) { + check := func(context.Context, *tls.Conn) error { return nil } + return newTLSListener(l, tlsinfo, check) +} + +func newTLSListener(l net.Listener, tlsinfo *TLSInfo, check tlsCheckFunc) (net.Listener, error) { if tlsinfo == nil || tlsinfo.Empty() { l.Close() return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", l.Addr().String()) @@ -40,11 +54,32 @@ func newTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) { if err != nil { return nil, err } + + hf := tlsinfo.HandshakeFailure + if hf == nil { + hf = func(*tls.Conn, error) {} + } + + if len(tlsinfo.CRLFile) > 0 { + prevCheck := check + check = func(ctx context.Context, tlsConn *tls.Conn) error { + if err := prevCheck(ctx, tlsConn); err != nil { + return err + } + st := tlsConn.ConnectionState() + if certs := st.PeerCertificates; len(certs) > 0 { + return checkCRL(tlsinfo.CRLFile, certs) + } + return nil + } + } + tlsl := &tlsListener{ Listener: tls.NewListener(l, tlscfg), connc: make(chan net.Conn), donec: make(chan struct{}), - handshakeFailure: tlsinfo.HandshakeFailure, + handshakeFailure: hf, + check: check, } go tlsl.acceptLoop() return tlsl, nil @@ -59,6 +94,15 @@ func (l *tlsListener) Accept() (net.Conn, error) { } } +func checkSAN(ctx context.Context, tlsConn *tls.Conn) error { + st := tlsConn.ConnectionState() + if certs := st.PeerCertificates; len(certs) > 0 { + addr := tlsConn.RemoteAddr().String() + return checkCertSAN(ctx, certs[0], addr) + } + return nil +} + // acceptLoop launches each TLS handshake in a separate goroutine // to prevent a hanging TLS connection from blocking other connections. func (l *tlsListener) acceptLoop() { @@ -66,9 +110,9 @@ func (l *tlsListener) acceptLoop() { var pendingMu sync.Mutex pending := make(map[net.Conn]struct{}) - stopc := make(chan struct{}) + ctx, cancel := context.WithCancel(context.Background()) defer func() { - close(stopc) + cancel() pendingMu.Lock() for c := range pending { c.Close() @@ -103,33 +147,124 @@ func (l *tlsListener) acceptLoop() { pendingMu.Lock() delete(pending, conn) pendingMu.Unlock() + if herr != nil { - if l.handshakeFailure != nil { - l.handshakeFailure(tlsConn, herr) - } + l.handshakeFailure(tlsConn, herr) return } - - st := tlsConn.ConnectionState() - if len(st.PeerCertificates) > 0 { - cert := st.PeerCertificates[0] - if len(cert.IPAddresses) > 0 || len(cert.DNSNames) > 0 { - addr := tlsConn.RemoteAddr().String() - h, _, herr := net.SplitHostPort(addr) - if herr != nil || cert.VerifyHostname(h) != nil { - return - } - } + if err := l.check(ctx, tlsConn); err != nil { + l.handshakeFailure(tlsConn, err) + return } + select { case l.connc <- tlsConn: conn = nil - case <-stopc: + case <-ctx.Done(): } }() } } +func checkCRL(crlPath string, cert []*x509.Certificate) error { + // TODO: cache + crlBytes, err := ioutil.ReadFile(crlPath) + if err != nil { + return err + } + certList, err := x509.ParseCRL(crlBytes) + if err != nil { + return err + } + revokedSerials := make(map[string]struct{}) + for _, rc := range certList.TBSCertList.RevokedCertificates { + revokedSerials[string(rc.SerialNumber.Bytes())] = struct{}{} + } + for _, c := range cert { + serial := string(c.SerialNumber.Bytes()) + if _, ok := revokedSerials[serial]; ok { + return fmt.Errorf("transport: certificate serial %x revoked", serial) + } + } + return nil +} + +func checkCertSAN(ctx context.Context, cert *x509.Certificate, remoteAddr string) error { + if len(cert.IPAddresses) == 0 && len(cert.DNSNames) == 0 { + return nil + } + h, _, herr := net.SplitHostPort(remoteAddr) + if herr != nil { + return herr + } + if len(cert.IPAddresses) > 0 { + cerr := cert.VerifyHostname(h) + if cerr == nil { + return nil + } + if len(cert.DNSNames) == 0 { + return cerr + } + } + if len(cert.DNSNames) > 0 { + ok, err := isHostInDNS(ctx, h, cert.DNSNames) + if ok { + return nil + } + errStr := "" + if err != nil { + errStr = " (" + err.Error() + ")" + } + return fmt.Errorf("tls: %q does not match any of DNSNames %q"+errStr, h, cert.DNSNames) + } + return nil +} + +func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) { + // reverse lookup + wildcards, names := []string{}, []string{} + for _, dns := range dnsNames { + if strings.HasPrefix(dns, "*.") { + wildcards = append(wildcards, dns[1:]) + } else { + names = append(names, dns) + } + } + lnames, lerr := net.DefaultResolver.LookupAddr(ctx, host) + for _, name := range lnames { + // strip trailing '.' from PTR record + if name[len(name)-1] == '.' { + name = name[:len(name)-1] + } + for _, wc := range wildcards { + if strings.HasSuffix(name, wc) { + return true, nil + } + } + for _, n := range names { + if n == name { + return true, nil + } + } + } + err = lerr + + // forward lookup + for _, dns := range names { + addrs, lerr := net.DefaultResolver.LookupHost(ctx, dns) + if lerr != nil { + err = lerr + continue + } + for _, addr := range addrs { + if addr == host { + return true, nil + } + } + } + return false, err +} + func (l *tlsListener) Close() error { err := l.Listener.Close() <-l.donec diff --git a/github.com/coreos/etcd/pkg/wait/wait.go b/github.com/coreos/etcd/pkg/wait/wait.go index 0f31eeb979..34fa237e82 100644 --- a/github.com/coreos/etcd/pkg/wait/wait.go +++ b/github.com/coreos/etcd/pkg/wait/wait.go @@ -21,22 +21,29 @@ import ( "sync" ) +// Wait is an interface that provides the ability to wait and trigger events that +// are associated with IDs. type Wait interface { + // Register waits returns a chan that waits on the given ID. + // The chan will be triggered when Trigger is called with + // the same ID. Register(id uint64) <-chan interface{} + // Trigger triggers the waiting chans with the given ID. Trigger(id uint64, x interface{}) IsRegistered(id uint64) bool } -type List struct { +type list struct { l sync.Mutex m map[uint64]chan interface{} } -func New() *List { - return &List{m: make(map[uint64]chan interface{})} +// New creates a Wait. +func New() Wait { + return &list{m: make(map[uint64]chan interface{})} } -func (w *List) Register(id uint64) <-chan interface{} { +func (w *list) Register(id uint64) <-chan interface{} { w.l.Lock() defer w.l.Unlock() ch := w.m[id] @@ -49,7 +56,7 @@ func (w *List) Register(id uint64) <-chan interface{} { return ch } -func (w *List) Trigger(id uint64, x interface{}) { +func (w *list) Trigger(id uint64, x interface{}) { w.l.Lock() ch := w.m[id] delete(w.m, id) @@ -60,7 +67,7 @@ func (w *List) Trigger(id uint64, x interface{}) { } } -func (w *List) IsRegistered(id uint64) bool { +func (w *list) IsRegistered(id uint64) bool { w.l.Lock() defer w.l.Unlock() _, ok := w.m[id] diff --git a/github.com/coreos/etcd/proxy/grpcproxy/adapter/auth_client_adapter.go b/github.com/coreos/etcd/proxy/grpcproxy/adapter/auth_client_adapter.go new file mode 100644 index 0000000000..7f38a9ac76 --- /dev/null +++ b/github.com/coreos/etcd/proxy/grpcproxy/adapter/auth_client_adapter.go @@ -0,0 +1,91 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapter + +import ( + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +type as2ac struct{ as pb.AuthServer } + +func AuthServerToAuthClient(as pb.AuthServer) pb.AuthClient { + return &as2ac{as} +} + +func (s *as2ac) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (*pb.AuthEnableResponse, error) { + return s.as.AuthEnable(ctx, in) +} + +func (s *as2ac) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (*pb.AuthDisableResponse, error) { + return s.as.AuthDisable(ctx, in) +} + +func (s *as2ac) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (*pb.AuthenticateResponse, error) { + return s.as.Authenticate(ctx, in) +} + +func (s *as2ac) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (*pb.AuthRoleAddResponse, error) { + return s.as.RoleAdd(ctx, in) +} + +func (s *as2ac) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (*pb.AuthRoleDeleteResponse, error) { + return s.as.RoleDelete(ctx, in) +} + +func (s *as2ac) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (*pb.AuthRoleGetResponse, error) { + return s.as.RoleGet(ctx, in) +} + +func (s *as2ac) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (*pb.AuthRoleListResponse, error) { + return s.as.RoleList(ctx, in) +} + +func (s *as2ac) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*pb.AuthRoleRevokePermissionResponse, error) { + return s.as.RoleRevokePermission(ctx, in) +} + +func (s *as2ac) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*pb.AuthRoleGrantPermissionResponse, error) { + return s.as.RoleGrantPermission(ctx, in) +} + +func (s *as2ac) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (*pb.AuthUserDeleteResponse, error) { + return s.as.UserDelete(ctx, in) +} + +func (s *as2ac) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (*pb.AuthUserAddResponse, error) { + return s.as.UserAdd(ctx, in) +} + +func (s *as2ac) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (*pb.AuthUserGetResponse, error) { + return s.as.UserGet(ctx, in) +} + +func (s *as2ac) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (*pb.AuthUserListResponse, error) { + return s.as.UserList(ctx, in) +} + +func (s *as2ac) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*pb.AuthUserGrantRoleResponse, error) { + return s.as.UserGrantRole(ctx, in) +} + +func (s *as2ac) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*pb.AuthUserRevokeRoleResponse, error) { + return s.as.UserRevokeRole(ctx, in) +} + +func (s *as2ac) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*pb.AuthUserChangePasswordResponse, error) { + return s.as.UserChangePassword(ctx, in) +} diff --git a/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go b/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go new file mode 100644 index 0000000000..383c1b9d8f --- /dev/null +++ b/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go @@ -0,0 +1,79 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapter + +import ( + "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" + + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +type es2ec struct{ es v3electionpb.ElectionServer } + +func ElectionServerToElectionClient(es v3electionpb.ElectionServer) v3electionpb.ElectionClient { + return &es2ec{es} +} + +func (s *es2ec) Campaign(ctx context.Context, r *v3electionpb.CampaignRequest, opts ...grpc.CallOption) (*v3electionpb.CampaignResponse, error) { + return s.es.Campaign(ctx, r) +} + +func (s *es2ec) Proclaim(ctx context.Context, r *v3electionpb.ProclaimRequest, opts ...grpc.CallOption) (*v3electionpb.ProclaimResponse, error) { + return s.es.Proclaim(ctx, r) +} + +func (s *es2ec) Leader(ctx context.Context, r *v3electionpb.LeaderRequest, opts ...grpc.CallOption) (*v3electionpb.LeaderResponse, error) { + return s.es.Leader(ctx, r) +} + +func (s *es2ec) Resign(ctx context.Context, r *v3electionpb.ResignRequest, opts ...grpc.CallOption) (*v3electionpb.ResignResponse, error) { + return s.es.Resign(ctx, r) +} + +func (s *es2ec) Observe(ctx context.Context, in *v3electionpb.LeaderRequest, opts ...grpc.CallOption) (v3electionpb.Election_ObserveClient, error) { + cs := newPipeStream(ctx, func(ss chanServerStream) error { + return s.es.Observe(in, &es2ecServerStream{ss}) + }) + return &es2ecClientStream{cs}, nil +} + +// es2ecClientStream implements Election_ObserveClient +type es2ecClientStream struct{ chanClientStream } + +// es2ecServerStream implements Election_ObserveServer +type es2ecServerStream struct{ chanServerStream } + +func (s *es2ecClientStream) Send(rr *v3electionpb.LeaderRequest) error { + return s.SendMsg(rr) +} +func (s *es2ecClientStream) Recv() (*v3electionpb.LeaderResponse, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*v3electionpb.LeaderResponse), nil +} + +func (s *es2ecServerStream) Send(rr *v3electionpb.LeaderResponse) error { + return s.SendMsg(rr) +} +func (s *es2ecServerStream) Recv() (*v3electionpb.LeaderRequest, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*v3electionpb.LeaderRequest), nil +} diff --git a/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go b/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go new file mode 100644 index 0000000000..05e5cb020a --- /dev/null +++ b/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go @@ -0,0 +1,36 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapter + +import ( + "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" + + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +type ls2lsc struct{ ls v3lockpb.LockServer } + +func LockServerToLockClient(ls v3lockpb.LockServer) v3lockpb.LockClient { + return &ls2lsc{ls} +} + +func (s *ls2lsc) Lock(ctx context.Context, r *v3lockpb.LockRequest, opts ...grpc.CallOption) (*v3lockpb.LockResponse, error) { + return s.ls.Lock(ctx, r) +} + +func (s *ls2lsc) Unlock(ctx context.Context, r *v3lockpb.UnlockRequest, opts ...grpc.CallOption) (*v3lockpb.UnlockResponse, error) { + return s.ls.Unlock(ctx, r) +} diff --git a/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go b/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go index 9b21bf2576..84c56f4eac 100644 --- a/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go +++ b/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go @@ -43,6 +43,10 @@ func (s *mts2mtc) Hash(ctx context.Context, r *pb.HashRequest, opts ...grpc.Call return s.mts.Hash(ctx, r) } +func (s *mts2mtc) MoveLeader(ctx context.Context, r *pb.MoveLeaderRequest, opts ...grpc.CallOption) (*pb.MoveLeaderResponse, error) { + return s.mts.MoveLeader(ctx, r) +} + func (s *mts2mtc) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (pb.Maintenance_SnapshotClient, error) { cs := newPipeStream(ctx, func(ss chanServerStream) error { return s.mts.Snapshot(in, &ss2scServerStream{ss}) diff --git a/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go b/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go index 9936ab3d21..70715e499d 100644 --- a/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go +++ b/github.com/coreos/etcd/proxy/grpcproxy/cache/store.go @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package cache exports functionality for efficiently caching and mapping +// `RangeRequest`s to corresponding `RangeResponse`s. package cache import ( @@ -38,7 +40,7 @@ type Cache interface { Close() } -// keyFunc returns the key of an request, which is used to look up in the cache for it's caching response. +// keyFunc returns the key of a request, which is used to look up its caching response in the cache. func keyFunc(req *pb.RangeRequest) string { // TODO: use marshalTo to reduce allocation b, err := req.Marshal() diff --git a/github.com/coreos/etcd/proxy/grpcproxy/election.go b/github.com/coreos/etcd/proxy/grpcproxy/election.go new file mode 100644 index 0000000000..27115a81d7 --- /dev/null +++ b/github.com/coreos/etcd/proxy/grpcproxy/election.go @@ -0,0 +1,65 @@ +// Copyright 2017 The etcd Lockors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcproxy + +import ( + "golang.org/x/net/context" + + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" +) + +type electionProxy struct { + client *clientv3.Client +} + +func NewElectionProxy(client *clientv3.Client) v3electionpb.ElectionServer { + return &electionProxy{client: client} +} + +func (ep *electionProxy) Campaign(ctx context.Context, req *v3electionpb.CampaignRequest) (*v3electionpb.CampaignResponse, error) { + return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Campaign(ctx, req) +} + +func (ep *electionProxy) Proclaim(ctx context.Context, req *v3electionpb.ProclaimRequest) (*v3electionpb.ProclaimResponse, error) { + return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Proclaim(ctx, req) +} + +func (ep *electionProxy) Leader(ctx context.Context, req *v3electionpb.LeaderRequest) (*v3electionpb.LeaderResponse, error) { + return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Leader(ctx, req) +} + +func (ep *electionProxy) Observe(req *v3electionpb.LeaderRequest, s v3electionpb.Election_ObserveServer) error { + conn := ep.client.ActiveConnection() + ctx, cancel := context.WithCancel(s.Context()) + defer cancel() + sc, err := v3electionpb.NewElectionClient(conn).Observe(ctx, req) + if err != nil { + return err + } + for { + rr, err := sc.Recv() + if err != nil { + return err + } + if err = s.Send(rr); err != nil { + return err + } + } +} + +func (ep *electionProxy) Resign(ctx context.Context, req *v3electionpb.ResignRequest) (*v3electionpb.ResignResponse, error) { + return v3electionpb.NewElectionClient(ep.client.ActiveConnection()).Resign(ctx, req) +} diff --git a/github.com/coreos/etcd/proxy/grpcproxy/kv.go b/github.com/coreos/etcd/proxy/grpcproxy/kv.go index 65f850ed42..7799d817e3 100644 --- a/github.com/coreos/etcd/proxy/grpcproxy/kv.go +++ b/github.com/coreos/etcd/proxy/grpcproxy/kv.go @@ -48,8 +48,9 @@ func (p *kvProxy) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRespo cacheHits.Inc() return nil, err } + + cachedMisses.Inc() } - cachedMisses.Inc() resp, err := p.kv.Do(ctx, RangeRequestToOp(r)) if err != nil { @@ -99,31 +100,16 @@ func (p *kvProxy) txnToCache(reqs []*pb.RequestOp, resps []*pb.ResponseOp) { } func (p *kvProxy) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - txn := p.kv.Txn(ctx) - cmps := make([]clientv3.Cmp, len(r.Compare)) - thenops := make([]clientv3.Op, len(r.Success)) - elseops := make([]clientv3.Op, len(r.Failure)) - - for i := range r.Compare { - cmps[i] = (clientv3.Cmp)(*r.Compare[i]) - } - - for i := range r.Success { - thenops[i] = requestOpToOp(r.Success[i]) - } - - for i := range r.Failure { - elseops[i] = requestOpToOp(r.Failure[i]) - } - - resp, err := txn.If(cmps...).Then(thenops...).Else(elseops...).Commit() - + op := TxnRequestToOp(r) + opResp, err := p.kv.Do(ctx, op) if err != nil { return nil, err } + resp := opResp.Txn() + // txn may claim an outdated key is updated; be safe and invalidate for _, cmp := range r.Compare { - p.cache.Invalidate(cmp.Key, nil) + p.cache.Invalidate(cmp.Key, cmp.RangeEnd) } // update any fetched keys if resp.Succeeded { @@ -167,6 +153,10 @@ func requestOpToOp(union *pb.RequestOp) clientv3.Op { if tv.RequestDeleteRange != nil { return DelRequestToOp(tv.RequestDeleteRange) } + case *pb.RequestOp_RequestTxn: + if tv.RequestTxn != nil { + return TxnRequestToOp(tv.RequestTxn) + } } panic("unknown request") } @@ -219,3 +209,19 @@ func DelRequestToOp(r *pb.DeleteRangeRequest) clientv3.Op { } return clientv3.OpDelete(string(r.Key), opts...) } + +func TxnRequestToOp(r *pb.TxnRequest) clientv3.Op { + cmps := make([]clientv3.Cmp, len(r.Compare)) + thenops := make([]clientv3.Op, len(r.Success)) + elseops := make([]clientv3.Op, len(r.Failure)) + for i := range r.Compare { + cmps[i] = (clientv3.Cmp)(*r.Compare[i]) + } + for i := range r.Success { + thenops[i] = requestOpToOp(r.Success[i]) + } + for i := range r.Failure { + elseops[i] = requestOpToOp(r.Failure[i]) + } + return clientv3.OpTxn(cmps, thenops, elseops) +} diff --git a/github.com/coreos/etcd/proxy/grpcproxy/lease.go b/github.com/coreos/etcd/proxy/grpcproxy/lease.go index dd23425a28..cd7481da8a 100644 --- a/github.com/coreos/etcd/proxy/grpcproxy/lease.go +++ b/github.com/coreos/etcd/proxy/grpcproxy/lease.go @@ -73,7 +73,7 @@ func NewLeaseProxy(c *clientv3.Client) (pb.LeaseServer, <-chan struct{}) { } func (lp *leaseProxy) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) { - rp, err := lp.leaseClient.LeaseGrant(ctx, cr) + rp, err := lp.leaseClient.LeaseGrant(ctx, cr, grpc.FailFast(false)) if err != nil { return nil, err } @@ -137,7 +137,7 @@ func (lp *leaseProxy) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error errc := make(chan error, 2) var lostLeaderC <-chan struct{} - if md, ok := metadata.FromContext(stream.Context()); ok { + if md, ok := metadata.FromOutgoingContext(stream.Context()); ok { v := md[rpctypes.MetadataRequireLeaderKey] if len(v) > 0 && v[0] == rpctypes.MetadataHasLeader { lostLeaderC = lp.leader.lostNotify() diff --git a/github.com/coreos/etcd/proxy/grpcproxy/lock.go b/github.com/coreos/etcd/proxy/grpcproxy/lock.go new file mode 100644 index 0000000000..804aff64a9 --- /dev/null +++ b/github.com/coreos/etcd/proxy/grpcproxy/lock.go @@ -0,0 +1,38 @@ +// Copyright 2017 The etcd Lockors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package grpcproxy + +import ( + "golang.org/x/net/context" + + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" +) + +type lockProxy struct { + client *clientv3.Client +} + +func NewLockProxy(client *clientv3.Client) v3lockpb.LockServer { + return &lockProxy{client: client} +} + +func (lp *lockProxy) Lock(ctx context.Context, req *v3lockpb.LockRequest) (*v3lockpb.LockResponse, error) { + return v3lockpb.NewLockClient(lp.client.ActiveConnection()).Lock(ctx, req) +} + +func (lp *lockProxy) Unlock(ctx context.Context, req *v3lockpb.UnlockRequest) (*v3lockpb.UnlockResponse, error) { + return v3lockpb.NewLockClient(lp.client.ActiveConnection()).Unlock(ctx, req) +} diff --git a/github.com/coreos/etcd/proxy/grpcproxy/maintenance.go b/github.com/coreos/etcd/proxy/grpcproxy/maintenance.go index 209dc94a71..2f57cbb307 100644 --- a/github.com/coreos/etcd/proxy/grpcproxy/maintenance.go +++ b/github.com/coreos/etcd/proxy/grpcproxy/maintenance.go @@ -15,6 +15,8 @@ package grpcproxy import ( + "io" + "golang.org/x/net/context" "github.com/coreos/etcd/clientv3" @@ -49,6 +51,9 @@ func (mp *maintenanceProxy) Snapshot(sr *pb.SnapshotRequest, stream pb.Maintenan for { rr, err := sc.Recv() if err != nil { + if err == io.EOF { + return nil + } return err } err = stream.Send(rr) @@ -72,3 +77,8 @@ func (mp *maintenanceProxy) Status(ctx context.Context, r *pb.StatusRequest) (*p conn := mp.client.ActiveConnection() return pb.NewMaintenanceClient(conn).Status(ctx, r) } + +func (mp *maintenanceProxy) MoveLeader(ctx context.Context, r *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) { + conn := mp.client.ActiveConnection() + return pb.NewMaintenanceClient(conn).MoveLeader(ctx, r) +} diff --git a/github.com/coreos/etcd/proxy/grpcproxy/watch.go b/github.com/coreos/etcd/proxy/grpcproxy/watch.go index 42748fd4a4..b960c94769 100644 --- a/github.com/coreos/etcd/proxy/grpcproxy/watch.go +++ b/github.com/coreos/etcd/proxy/grpcproxy/watch.go @@ -95,7 +95,7 @@ func (wp *watchProxy) Watch(stream pb.Watch_WatchServer) (err error) { } var lostLeaderC <-chan struct{} - if md, ok := metadata.FromContext(stream.Context()); ok { + if md, ok := metadata.FromOutgoingContext(stream.Context()); ok { v := md[rpctypes.MetadataRequireLeaderKey] if len(v) > 0 && v[0] == rpctypes.MetadataHasLeader { lostLeaderC = wp.leader.lostNotify() diff --git a/github.com/coreos/etcd/proxy/tcpproxy/userspace.go b/github.com/coreos/etcd/proxy/tcpproxy/userspace.go index 5de017a70d..807e76a3c9 100644 --- a/github.com/coreos/etcd/proxy/tcpproxy/userspace.go +++ b/github.com/coreos/etcd/proxy/tcpproxy/userspace.go @@ -15,7 +15,9 @@ package tcpproxy import ( + "fmt" "io" + "math/rand" "net" "sync" "time" @@ -29,6 +31,7 @@ var ( type remote struct { mu sync.Mutex + srv *net.SRV addr string inactive bool } @@ -59,14 +62,14 @@ func (r *remote) isActive() bool { type TCPProxy struct { Listener net.Listener - Endpoints []string + Endpoints []*net.SRV MonitorInterval time.Duration donec chan struct{} - mu sync.Mutex // guards the following fields - remotes []*remote - nextRemote int + mu sync.Mutex // guards the following fields + remotes []*remote + pickCount int // for round robin } func (tp *TCPProxy) Run() error { @@ -74,11 +77,17 @@ func (tp *TCPProxy) Run() error { if tp.MonitorInterval == 0 { tp.MonitorInterval = 5 * time.Minute } + for _, srv := range tp.Endpoints { + addr := fmt.Sprintf("%s:%d", srv.Target, srv.Port) + tp.remotes = append(tp.remotes, &remote{srv: srv, addr: addr}) + } + + eps := []string{} for _, ep := range tp.Endpoints { - tp.remotes = append(tp.remotes, &remote{addr: ep}) + eps = append(eps, fmt.Sprintf("%s:%d", ep.Target, ep.Port)) } + plog.Printf("ready to proxy client requests to %+v", eps) - plog.Printf("ready to proxy client requests to %v", tp.Endpoints) go tp.runMonitor() for { in, err := tp.Listener.Accept() @@ -90,10 +99,61 @@ func (tp *TCPProxy) Run() error { } } -func (tp *TCPProxy) numRemotes() int { - tp.mu.Lock() - defer tp.mu.Unlock() - return len(tp.remotes) +func (tp *TCPProxy) pick() *remote { + var weighted []*remote + var unweighted []*remote + + bestPr := uint16(65535) + w := 0 + // find best priority class + for _, r := range tp.remotes { + switch { + case !r.isActive(): + case r.srv.Priority < bestPr: + bestPr = r.srv.Priority + w = 0 + weighted, unweighted = nil, nil + unweighted = []*remote{r} + fallthrough + case r.srv.Priority == bestPr: + if r.srv.Weight > 0 { + weighted = append(weighted, r) + w += int(r.srv.Weight) + } else { + unweighted = append(unweighted, r) + } + } + } + if weighted != nil { + if len(unweighted) > 0 && rand.Intn(100) == 1 { + // In the presence of records containing weights greater + // than 0, records with weight 0 should have a very small + // chance of being selected. + r := unweighted[tp.pickCount%len(unweighted)] + tp.pickCount++ + return r + } + // choose a uniform random number between 0 and the sum computed + // (inclusive), and select the RR whose running sum value is the + // first in the selected order + choose := rand.Intn(w) + for i := 0; i < len(weighted); i++ { + choose -= int(weighted[i].srv.Weight) + if choose <= 0 { + return weighted[i] + } + } + } + if unweighted != nil { + for i := 0; i < len(tp.remotes); i++ { + picked := tp.remotes[tp.pickCount%len(tp.remotes)] + tp.pickCount++ + if picked.isActive() { + return picked + } + } + } + return nil } func (tp *TCPProxy) serve(in net.Conn) { @@ -102,10 +162,12 @@ func (tp *TCPProxy) serve(in net.Conn) { out net.Conn ) - for i := 0; i < tp.numRemotes(); i++ { + for { + tp.mu.Lock() remote := tp.pick() - if !remote.isActive() { - continue + tp.mu.Unlock() + if remote == nil { + break } // TODO: add timeout out, err = net.Dial("tcp", remote.addr) @@ -132,16 +194,6 @@ func (tp *TCPProxy) serve(in net.Conn) { in.Close() } -// pick picks a remote in round-robin fashion -func (tp *TCPProxy) pick() *remote { - tp.mu.Lock() - defer tp.mu.Unlock() - - picked := tp.remotes[tp.nextRemote] - tp.nextRemote = (tp.nextRemote + 1) % len(tp.remotes) - return picked -} - func (tp *TCPProxy) runMonitor() { for { select { diff --git a/github.com/coreos/etcd/proxy/tcpproxy/userspace_test.go b/github.com/coreos/etcd/proxy/tcpproxy/userspace_test.go index e239c19c66..bf65f570c2 100644 --- a/github.com/coreos/etcd/proxy/tcpproxy/userspace_test.go +++ b/github.com/coreos/etcd/proxy/tcpproxy/userspace_test.go @@ -42,9 +42,11 @@ func TestUserspaceProxy(t *testing.T) { t.Fatal(err) } + var port uint16 + fmt.Sscanf(u.Port(), "%d", &port) p := TCPProxy{ Listener: l, - Endpoints: []string{u.Host}, + Endpoints: []*net.SRV{{Target: u.Hostname(), Port: port}}, } go p.Run() defer p.Stop() diff --git a/github.com/coreos/etcd/raft/README.md b/github.com/coreos/etcd/raft/README.md index 45c48ae66f..f485b83977 100644 --- a/github.com/coreos/etcd/raft/README.md +++ b/github.com/coreos/etcd/raft/README.md @@ -49,6 +49,7 @@ This raft implementation also includes a few optional enhancements: - [etcd](https://github.com/coreos/etcd) A distributed reliable key-value store - [tikv](https://github.com/pingcap/tikv) A Distributed transactional key value database powered by Rust and Raft - [swarmkit](https://github.com/docker/swarmkit) A toolkit for orchestrating distributed systems at any scale. +- [chain core](https://github.com/chain/chain) Software for operating permissioned, multi-asset blockchain networks ## Usage diff --git a/github.com/coreos/etcd/raft/node_test.go b/github.com/coreos/etcd/raft/node_test.go index c57fd05fab..f4c726ea86 100644 --- a/github.com/coreos/etcd/raft/node_test.go +++ b/github.com/coreos/etcd/raft/node_test.go @@ -190,6 +190,38 @@ func TestNodeReadIndex(t *testing.T) { } } +// TestDisableProposalForwarding ensures that proposals are not forwarded to +// the leader when DisableProposalForwarding is true. +func TestDisableProposalForwarding(t *testing.T) { + r1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + r2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + cfg3 := newTestConfig(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + cfg3.DisableProposalForwarding = true + r3 := newRaft(cfg3) + nt := newNetwork(r1, r2, r3) + + // elect r1 as leader + nt.send(raftpb.Message{From: 1, To: 1, Type: raftpb.MsgHup}) + + var testEntries = []raftpb.Entry{{Data: []byte("testdata")}} + + // send proposal to r2(follower) where DisableProposalForwarding is false + r2.Step(raftpb.Message{From: 2, To: 2, Type: raftpb.MsgProp, Entries: testEntries}) + + // verify r2(follower) does forward the proposal when DisableProposalForwarding is false + if len(r2.msgs) != 1 { + t.Fatalf("len(r2.msgs) expected 1, got %d", len(r2.msgs)) + } + + // send proposal to r3(follower) where DisableProposalForwarding is true + r3.Step(raftpb.Message{From: 3, To: 3, Type: raftpb.MsgProp, Entries: testEntries}) + + // verify r3(follower) does not forward the proposal when DisableProposalForwarding is true + if len(r3.msgs) != 0 { + t.Fatalf("len(r3.msgs) expected 0, got %d", len(r3.msgs)) + } +} + // TestNodeReadIndexToOldLeader ensures that raftpb.MsgReadIndex to old leader // gets forwarded to the new leader and 'send' method does not attach its term. func TestNodeReadIndexToOldLeader(t *testing.T) { diff --git a/github.com/coreos/etcd/raft/raft.go b/github.com/coreos/etcd/raft/raft.go index 7be4407ee2..d289de6a1b 100644 --- a/github.com/coreos/etcd/raft/raft.go +++ b/github.com/coreos/etcd/raft/raft.go @@ -176,6 +176,16 @@ type Config struct { // Logger is the logger used for raft log. For multinode which can host // multiple raft group, each raft group can have its own logger Logger Logger + + // DisableProposalForwarding set to true means that followers will drop + // proposals, rather than forwarding them to the leader. One use case for + // this feature would be in a situation where the Raft leader is used to + // compute the data of a proposal, for example, adding a timestamp from a + // hybrid logical clock to data in a monotonically increasing way. Forwarding + // should be disabled to prevent a follower with an innaccurate hybrid + // logical clock from assigning the timestamp and then forwarding the data + // to the leader. + DisableProposalForwarding bool } func (c *Config) validate() error { @@ -256,6 +266,7 @@ type raft struct { // [electiontimeout, 2 * electiontimeout - 1]. It gets reset // when raft changes its state to follower or candidate. randomizedElectionTimeout int + disableProposalForwarding bool tick func() step stepFunc @@ -283,18 +294,19 @@ func newRaft(c *Config) *raft { peers = cs.Nodes } r := &raft{ - id: c.ID, - lead: None, - raftLog: raftlog, - maxMsgSize: c.MaxSizePerMsg, - maxInflight: c.MaxInflightMsgs, - prs: make(map[uint64]*Progress), - electionTimeout: c.ElectionTick, - heartbeatTimeout: c.HeartbeatTick, - logger: c.Logger, - checkQuorum: c.CheckQuorum, - preVote: c.PreVote, - readOnly: newReadOnly(c.ReadOnlyOption), + id: c.ID, + lead: None, + raftLog: raftlog, + maxMsgSize: c.MaxSizePerMsg, + maxInflight: c.MaxInflightMsgs, + prs: make(map[uint64]*Progress), + electionTimeout: c.ElectionTick, + heartbeatTimeout: c.HeartbeatTick, + logger: c.Logger, + checkQuorum: c.CheckQuorum, + preVote: c.PreVote, + readOnly: newReadOnly(c.ReadOnlyOption), + disableProposalForwarding: c.DisableProposalForwarding, } for _, p := range peers { r.prs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight)} @@ -343,10 +355,20 @@ func (r *raft) nodes() []uint64 { // send persists state to stable storage and then sends to its mailbox. func (r *raft) send(m pb.Message) { m.From = r.id - if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote { + if m.Type == pb.MsgVote || m.Type == pb.MsgVoteResp || m.Type == pb.MsgPreVote || m.Type == pb.MsgPreVoteResp { if m.Term == 0 { - // PreVote RPCs are sent at a term other than our actual term, so the code - // that sends these messages is responsible for setting the term. + // All {pre-,}campaign messages need to have the term set when + // sending. + // - MsgVote: m.Term is the term the node is campaigning for, + // non-zero as we increment the term when campaigning. + // - MsgVoteResp: m.Term is the new r.Term if the MsgVote was + // granted, non-zero for the same reason MsgVote is + // - MsgPreVote: m.Term is the term the node will campaign, + // non-zero as we use m.Term to indicate the next term we'll be + // campaigning for + // - MsgPreVoteResp: m.Term is the term received in the original + // MsgPreVote if the pre-vote was granted, non-zero for the + // same reasons MsgPreVote is panic(fmt.Sprintf("term should be set when sending %s", m.Type)) } } else { @@ -762,7 +784,16 @@ func (r *raft) Step(m pb.Message) error { if (r.Vote == None || m.Term > r.Term || r.Vote == m.From) && r.raftLog.isUpToDate(m.Index, m.LogTerm) { r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] cast %s for %x [logterm: %d, index: %d] at term %d", r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) - r.send(pb.Message{To: m.From, Type: voteRespMsgType(m.Type)}) + // When responding to Msg{Pre,}Vote messages we include the term + // from the message, not the local term. To see why consider the + // case where a single node was previously partitioned away and + // it's local term is now of date. If we include the local term + // (recall that for pre-votes we don't update the local term), the + // (pre-)campaigning node on the other end will proceed to ignore + // the message (it ignores all out of date messages). + // The term in the original message and current local term are the + // same in the case of regular votes, but different for pre-votes. + r.send(pb.Message{To: m.From, Term: m.Term, Type: voteRespMsgType(m.Type)}) if m.Type == pb.MsgVote { // Only record real votes. r.electionElapsed = 0 @@ -771,7 +802,7 @@ func (r *raft) Step(m pb.Message) error { } else { r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d", r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term) - r.send(pb.Message{To: m.From, Type: voteRespMsgType(m.Type), Reject: true}) + r.send(pb.Message{To: m.From, Term: r.Term, Type: voteRespMsgType(m.Type), Reject: true}) } default: @@ -1033,6 +1064,9 @@ func stepFollower(r *raft, m pb.Message) { if r.lead == None { r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term) return + } else if r.disableProposalForwarding { + r.logger.Infof("%x not forwarding to leader %x at term %d; dropping proposal", r.id, r.lead, r.Term) + return } m.To = r.lead r.send(m) @@ -1159,6 +1193,10 @@ func (r *raft) addNode(id uint64) { } r.setProgress(id, 0, r.raftLog.lastIndex()+1) + // When a node is first added, we should mark it as recently active. + // Otherwise, CheckQuorum may cause us to step down if it is invoked + // before the added node has a chance to communicate with us. + r.prs[id].RecentActive = true } func (r *raft) removeNode(id uint64) { diff --git a/github.com/coreos/etcd/raft/raft_test.go b/github.com/coreos/etcd/raft/raft_test.go index 2dffe7acc6..1fa2854447 100644 --- a/github.com/coreos/etcd/raft/raft_test.go +++ b/github.com/coreos/etcd/raft/raft_test.go @@ -1363,10 +1363,10 @@ func TestRecvMsgVote(t *testing.T) { func testRecvMsgVote(t *testing.T, msgType pb.MessageType) { tests := []struct { - state StateType - i, term uint64 - voteFor uint64 - wreject bool + state StateType + index, logTerm uint64 + voteFor uint64 + wreject bool }{ {StateFollower, 0, 0, None, true}, {StateFollower, 0, 1, None, true}, @@ -1396,6 +1396,13 @@ func testRecvMsgVote(t *testing.T, msgType pb.MessageType) { {StateCandidate, 3, 3, 1, true}, } + max := func(a, b uint64) uint64 { + if a > b { + return a + } + return b + } + for i, tt := range tests { sm := newTestRaft(1, []uint64{1}, 10, 1, NewMemoryStorage()) sm.state = tt.state @@ -1413,7 +1420,17 @@ func testRecvMsgVote(t *testing.T, msgType pb.MessageType) { unstable: unstable{offset: 3}, } - sm.Step(pb.Message{Type: msgType, From: 2, Index: tt.i, LogTerm: tt.term}) + // raft.Term is greater than or equal to raft.raftLog.lastTerm. In this + // test we're only testing MsgVote responses when the campaigning node + // has a different raft log compared to the recipient node. + // Additionally we're verifying behaviour when the recipient node has + // already given out its vote for its current term. We're not testing + // what the recipient node does when receiving a message with a + // different term number, so we simply initialize both term numbers to + // be the same. + term := max(sm.raftLog.lastTerm(), tt.logTerm) + sm.Term = term + sm.Step(pb.Message{Type: msgType, Term: term, From: 2, Index: tt.index, LogTerm: tt.logTerm}) msgs := sm.readMessages() if g := len(msgs); g != 1 { @@ -2565,6 +2582,41 @@ func TestAddNode(t *testing.T) { } } +// TestAddNodeCheckQuorum tests that addNode does not trigger a leader election +// immediately when checkQuorum is set. +func TestAddNodeCheckQuorum(t *testing.T) { + r := newTestRaft(1, []uint64{1}, 10, 1, NewMemoryStorage()) + r.pendingConf = true + r.checkQuorum = true + + r.becomeCandidate() + r.becomeLeader() + + for i := 0; i < r.electionTimeout-1; i++ { + r.tick() + } + + r.addNode(2) + + // This tick will reach electionTimeout, which triggers a quorum check. + r.tick() + + // Node 1 should still be the leader after a single tick. + if r.state != StateLeader { + t.Errorf("state = %v, want %v", r.state, StateLeader) + } + + // After another electionTimeout ticks without hearing from node 2, + // node 1 should step down. + for i := 0; i < r.electionTimeout; i++ { + r.tick() + } + + if r.state != StateFollower { + t.Errorf("state = %v, want %v", r.state, StateFollower) + } +} + // TestRemoveNode tests that removeNode could update pendingConf, nodes and // and removed list correctly. func TestRemoveNode(t *testing.T) { @@ -3059,6 +3111,104 @@ func TestTransferNonMember(t *testing.T) { } } +// TestNodeWithSmallerTermCanCompleteElection tests the scenario where a node +// that has been partitioned away (and fallen behind) rejoins the cluster at +// about the same time the leader node gets partitioned away. +// Previously the cluster would come to a standstill when run with PreVote +// enabled. +func TestNodeWithSmallerTermCanCompleteElection(t *testing.T) { + n1 := newTestRaft(1, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + n2 := newTestRaft(2, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + n3 := newTestRaft(3, []uint64{1, 2, 3}, 10, 1, NewMemoryStorage()) + + n1.becomeFollower(1, None) + n2.becomeFollower(1, None) + n3.becomeFollower(1, None) + + n1.preVote = true + n2.preVote = true + n3.preVote = true + + // cause a network partition to isolate node 3 + nt := newNetwork(n1, n2, n3) + nt.cut(1, 3) + nt.cut(2, 3) + + nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup}) + + sm := nt.peers[1].(*raft) + if sm.state != StateLeader { + t.Errorf("peer 1 state: %s, want %s", sm.state, StateLeader) + } + + sm = nt.peers[2].(*raft) + if sm.state != StateFollower { + t.Errorf("peer 2 state: %s, want %s", sm.state, StateFollower) + } + + nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup}) + sm = nt.peers[3].(*raft) + if sm.state != StatePreCandidate { + t.Errorf("peer 3 state: %s, want %s", sm.state, StatePreCandidate) + } + + nt.send(pb.Message{From: 2, To: 2, Type: pb.MsgHup}) + + // check whether the term values are expected + // a.Term == 3 + // b.Term == 3 + // c.Term == 1 + sm = nt.peers[1].(*raft) + if sm.Term != 3 { + t.Errorf("peer 1 term: %d, want %d", sm.Term, 3) + } + + sm = nt.peers[2].(*raft) + if sm.Term != 3 { + t.Errorf("peer 2 term: %d, want %d", sm.Term, 3) + } + + sm = nt.peers[3].(*raft) + if sm.Term != 1 { + t.Errorf("peer 3 term: %d, want %d", sm.Term, 1) + } + + // check state + // a == follower + // b == leader + // c == pre-candidate + sm = nt.peers[1].(*raft) + if sm.state != StateFollower { + t.Errorf("peer 1 state: %s, want %s", sm.state, StateFollower) + } + sm = nt.peers[2].(*raft) + if sm.state != StateLeader { + t.Errorf("peer 2 state: %s, want %s", sm.state, StateLeader) + } + sm = nt.peers[3].(*raft) + if sm.state != StatePreCandidate { + t.Errorf("peer 3 state: %s, want %s", sm.state, StatePreCandidate) + } + + sm.logger.Infof("going to bring back peer 3 and kill peer 2") + // recover the network then immediately isolate b which is currently + // the leader, this is to emulate the crash of b. + nt.recover() + nt.cut(2, 1) + nt.cut(2, 3) + + // call for election + nt.send(pb.Message{From: 3, To: 3, Type: pb.MsgHup}) + nt.send(pb.Message{From: 1, To: 1, Type: pb.MsgHup}) + + // do we have a leader? + sma := nt.peers[1].(*raft) + smb := nt.peers[3].(*raft) + if sma.state != StateLeader && smb.state != StateLeader { + t.Errorf("no leader") + } +} + func entsWithConfig(configFunc func(*Config), terms ...uint64) *raft { storage := NewMemoryStorage() for i, term := range terms { diff --git a/github.com/coreos/etcd/raft/raftpb/raft.pb.go b/github.com/coreos/etcd/raft/raftpb/raft.pb.go index 3c45eef003..4c6e79d58a 100644 --- a/github.com/coreos/etcd/raft/raftpb/raft.pb.go +++ b/github.com/coreos/etcd/raft/raftpb/raft.pb.go @@ -1558,25 +1558,67 @@ func (m *ConfState) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) - } - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + m.Nodes = append(m.Nodes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - v |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Nodes = append(m.Nodes, v) } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) } - m.Nodes = append(m.Nodes, v) default: iNdEx = preIndex skippy, err := skipRaft(dAtA[iNdEx:]) diff --git a/github.com/coreos/etcd/raft/read_only.go b/github.com/coreos/etcd/raft/read_only.go index d0085237e3..ae746fa73e 100644 --- a/github.com/coreos/etcd/raft/read_only.go +++ b/github.com/coreos/etcd/raft/read_only.go @@ -18,7 +18,7 @@ import pb "github.com/coreos/etcd/raft/raftpb" // ReadState provides state for read only query. // It's caller's responsibility to call ReadIndex first before getting -// this state from ready, It's also caller's duty to differentiate if this +// this state from ready, it's also caller's duty to differentiate if this // state is what it requests through RequestCtx, eg. given a unique id as // RequestCtx type ReadState struct { diff --git a/github.com/coreos/etcd/raft/status.go b/github.com/coreos/etcd/raft/status.go index b690fa56b9..468f13f3bd 100644 --- a/github.com/coreos/etcd/raft/status.go +++ b/github.com/coreos/etcd/raft/status.go @@ -28,11 +28,17 @@ type Status struct { Applied uint64 Progress map[uint64]Progress + + LeadTransferee uint64 } // getStatus gets a copy of the current raft status. func getStatus(r *raft) Status { - s := Status{ID: r.id} + s := Status{ + ID: r.id, + LeadTransferee: r.leadTransferee, + } + s.HardState = r.hardState() s.SoftState = *r.softState() @@ -51,19 +57,21 @@ func getStatus(r *raft) Status { // MarshalJSON translates the raft status into JSON. // TODO: try to simplify this by introducing ID type into raft func (s Status) MarshalJSON() ([]byte, error) { - j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"progress":{`, - s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState) + j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"applied":%d,"progress":{`, + s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState, s.Applied) if len(s.Progress) == 0 { - j += "}}" + j += "}," } else { for k, v := range s.Progress { subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State) j += subj } // remove the trailing "," - j = j[:len(j)-1] + "}}" + j = j[:len(j)-1] + "}," } + + j += fmt.Sprintf(`"leadtransferee":"%x"}`, s.LeadTransferee) return []byte(j), nil } diff --git a/github.com/coreos/etcd/rafthttp/functional_test.go b/github.com/coreos/etcd/rafthttp/functional_test.go index f2c80ffaf3..b4f04d3ebb 100644 --- a/github.com/coreos/etcd/rafthttp/functional_test.go +++ b/github.com/coreos/etcd/rafthttp/functional_test.go @@ -140,9 +140,7 @@ func TestSendMessageWhenStreamIsBroken(t *testing.T) { } func newServerStats() *stats.ServerStats { - ss := &stats.ServerStats{} - ss.Initialize() - return ss + return stats.NewServerStats("", "") } func waitStreamWorking(p *peer) bool { diff --git a/github.com/coreos/etcd/rafthttp/http.go b/github.com/coreos/etcd/rafthttp/http.go index 471028a615..55df26e9b7 100644 --- a/github.com/coreos/etcd/rafthttp/http.go +++ b/github.com/coreos/etcd/rafthttp/http.go @@ -183,7 +183,8 @@ func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } dec := &messageDecoder{r: r.Body} - m, err := dec.decode() + // let snapshots be very large since they can exceed 512MB for large installations + m, err := dec.decodeLimit(uint64(1 << 63)) if err != nil { msg := fmt.Sprintf("failed to decode raft message (%v)", err) plog.Errorf(msg) diff --git a/github.com/coreos/etcd/rafthttp/msg_codec.go b/github.com/coreos/etcd/rafthttp/msg_codec.go index bf1f6bc003..ef59bc8883 100644 --- a/github.com/coreos/etcd/rafthttp/msg_codec.go +++ b/github.com/coreos/etcd/rafthttp/msg_codec.go @@ -48,12 +48,16 @@ var ( ) func (dec *messageDecoder) decode() (raftpb.Message, error) { + return dec.decodeLimit(readBytesLimit) +} + +func (dec *messageDecoder) decodeLimit(numBytes uint64) (raftpb.Message, error) { var m raftpb.Message var l uint64 if err := binary.Read(dec.r, binary.BigEndian, &l); err != nil { return m, err } - if l > readBytesLimit { + if l > numBytes { return m, ErrExceedSizeLimit } buf := make([]byte, int(l)) diff --git a/github.com/coreos/etcd/rafthttp/peer.go b/github.com/coreos/etcd/rafthttp/peer.go index a82d7beed7..b8de635aa8 100644 --- a/github.com/coreos/etcd/rafthttp/peer.go +++ b/github.com/coreos/etcd/rafthttp/peer.go @@ -24,6 +24,7 @@ import ( "github.com/coreos/etcd/raft/raftpb" "github.com/coreos/etcd/snap" "golang.org/x/net/context" + "golang.org/x/time/rate" ) const ( @@ -188,6 +189,7 @@ func startPeer(transport *Transport, urls types.URLs, peerID types.ID, fs *stats status: status, recvc: p.recvc, propc: p.propc, + rl: rate.NewLimiter(transport.DialRetryFrequency, 1), } p.msgAppReader = &streamReader{ peerID: peerID, @@ -197,7 +199,9 @@ func startPeer(transport *Transport, urls types.URLs, peerID types.ID, fs *stats status: status, recvc: p.recvc, propc: p.propc, + rl: rate.NewLimiter(transport.DialRetryFrequency, 1), } + p.msgAppV2Reader.start() p.msgAppReader.start() diff --git a/github.com/coreos/etcd/rafthttp/stream.go b/github.com/coreos/etcd/rafthttp/stream.go index 2a6c620f56..9dfe22148a 100644 --- a/github.com/coreos/etcd/rafthttp/stream.go +++ b/github.com/coreos/etcd/rafthttp/stream.go @@ -25,6 +25,8 @@ import ( "sync" "time" + "golang.org/x/time/rate" + "github.com/coreos/etcd/etcdserver/stats" "github.com/coreos/etcd/pkg/httputil" "github.com/coreos/etcd/pkg/transport" @@ -243,7 +245,9 @@ func (cw *streamWriter) closeUnlocked() bool { if !cw.working { return false } - cw.closer.Close() + if err := cw.closer.Close(); err != nil { + plog.Errorf("peer %s (writer) connection close error: %v", cw.peerID, err) + } if len(cw.msgc) > 0 { cw.r.ReportUnreachable(uint64(cw.peerID)) } @@ -278,25 +282,28 @@ type streamReader struct { recvc chan<- raftpb.Message propc chan<- raftpb.Message + rl *rate.Limiter // alters the frequency of dial retrial attempts + errorc chan<- error mu sync.Mutex paused bool - cancel func() closer io.Closer - stopc chan struct{} - done chan struct{} + ctx context.Context + cancel context.CancelFunc + done chan struct{} } -func (r *streamReader) start() { - r.stopc = make(chan struct{}) - r.done = make(chan struct{}) - if r.errorc == nil { - r.errorc = r.tr.ErrorC +func (cr *streamReader) start() { + cr.done = make(chan struct{}) + if cr.errorc == nil { + cr.errorc = cr.tr.ErrorC } - - go r.run() + if cr.ctx == nil { + cr.ctx, cr.cancel = context.WithCancel(context.Background()) + } + go cr.run() } func (cr *streamReader) run() { @@ -311,7 +318,7 @@ func (cr *streamReader) run() { } else { cr.status.activate() plog.Infof("established a TCP streaming connection with peer %s (%s reader)", cr.peerID, cr.typ) - err := cr.decodeLoop(rc, t) + err = cr.decodeLoop(rc, t) plog.Warningf("lost the TCP streaming connection with peer %s (%s reader)", cr.peerID, cr.typ) switch { // all data is read out @@ -322,15 +329,16 @@ func (cr *streamReader) run() { cr.status.deactivate(failureType{source: t.String(), action: "read"}, err.Error()) } } - select { - // Wait 100ms to create a new stream, so it doesn't bring too much - // overhead when retry. - case <-time.After(100 * time.Millisecond): - case <-cr.stopc: + // Wait for a while before new dial attempt + err = cr.rl.Wait(cr.ctx) + if cr.ctx.Err() != nil { plog.Infof("stopped streaming with peer %s (%s reader)", cr.peerID, t) close(cr.done) return } + if err != nil { + plog.Errorf("streaming with peer %s (%s reader) rate limiter error: %v", cr.peerID, t, err) + } } } @@ -346,7 +354,7 @@ func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error { plog.Panicf("unhandled stream type %s", t) } select { - case <-cr.stopc: + case <-cr.ctx.Done(): cr.mu.Unlock() if err := rc.Close(); err != nil { return err @@ -401,11 +409,8 @@ func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error { } func (cr *streamReader) stop() { - close(cr.stopc) cr.mu.Lock() - if cr.cancel != nil { - cr.cancel() - } + cr.cancel() cr.close() cr.mu.Unlock() <-cr.done @@ -429,13 +434,11 @@ func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) { setPeerURLsHeader(req, cr.tr.URLs) - ctx, cancel := context.WithCancel(context.Background()) - req = req.WithContext(ctx) + req = req.WithContext(cr.ctx) cr.mu.Lock() - cr.cancel = cancel select { - case <-cr.stopc: + case <-cr.ctx.Done(): cr.mu.Unlock() return nil, fmt.Errorf("stream reader is stopped") default: @@ -497,7 +500,9 @@ func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) { func (cr *streamReader) close() { if cr.closer != nil { - cr.closer.Close() + if err := cr.closer.Close(); err != nil { + plog.Errorf("peer %s (reader) connection close error: %v", cr.peerID, err) + } } cr.closer = nil } diff --git a/github.com/coreos/etcd/rafthttp/stream_test.go b/github.com/coreos/etcd/rafthttp/stream_test.go index f48714e7c5..c9cd2b3d69 100644 --- a/github.com/coreos/etcd/rafthttp/stream_test.go +++ b/github.com/coreos/etcd/rafthttp/stream_test.go @@ -15,6 +15,7 @@ package rafthttp import ( + "context" "errors" "fmt" "io" @@ -25,6 +26,8 @@ import ( "testing" "time" + "golang.org/x/time/rate" + "github.com/coreos/etcd/etcdserver/stats" "github.com/coreos/etcd/pkg/testutil" "github.com/coreos/etcd/pkg/types" @@ -113,6 +116,7 @@ func TestStreamReaderDialRequest(t *testing.T) { peerID: types.ID(2), tr: &Transport{streamRt: tr, ClusterID: types.ID(1), ID: types.ID(1)}, picker: mustNewURLPicker(t, []string{"http://localhost:2380"}), + ctx: context.Background(), } sr.dial(tt) @@ -167,6 +171,7 @@ func TestStreamReaderDialResult(t *testing.T) { tr: &Transport{streamRt: tr, ClusterID: types.ID(1)}, picker: mustNewURLPicker(t, []string{"http://localhost:2380"}), errorc: make(chan error, 1), + ctx: context.Background(), } _, err := sr.dial(streamTypeMessage) @@ -192,6 +197,7 @@ func TestStreamReaderStopOnDial(t *testing.T) { errorc: make(chan error, 1), typ: streamTypeMessage, status: newPeerStatus(types.ID(2)), + rl: rate.NewLimiter(rate.Every(100*time.Millisecond), 1), } tr.onResp = func() { // stop() waits for the run() goroutine to exit, but that exit @@ -246,6 +252,7 @@ func TestStreamReaderDialDetectUnsupport(t *testing.T) { peerID: types.ID(2), tr: &Transport{streamRt: tr, ClusterID: types.ID(1)}, picker: mustNewURLPicker(t, []string{"http://localhost:2380"}), + ctx: context.Background(), } _, err := sr.dial(typ) @@ -311,6 +318,7 @@ func TestStream(t *testing.T) { status: newPeerStatus(types.ID(2)), recvc: recvc, propc: propc, + rl: rate.NewLimiter(rate.Every(100*time.Millisecond), 1), } sr.start() diff --git a/github.com/coreos/etcd/rafthttp/transport.go b/github.com/coreos/etcd/rafthttp/transport.go index 1f0b46836e..50219db71b 100644 --- a/github.com/coreos/etcd/rafthttp/transport.go +++ b/github.com/coreos/etcd/rafthttp/transport.go @@ -29,6 +29,7 @@ import ( "github.com/coreos/pkg/capnslog" "github.com/xiang90/probing" "golang.org/x/net/context" + "golang.org/x/time/rate" ) var plog = logutil.NewMergeLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "rafthttp")) @@ -94,8 +95,12 @@ type Transporter interface { // User needs to call Start before calling other functions, and call // Stop when the Transport is no longer used. type Transport struct { - DialTimeout time.Duration // maximum duration before timing out dial of the request - TLSInfo transport.TLSInfo // TLS information used when creating connection + DialTimeout time.Duration // maximum duration before timing out dial of the request + // DialRetryFrequency defines the frequency of streamReader dial retrial attempts; + // a distinct rate limiter is created per every peer (default value: 10 events/sec) + DialRetryFrequency rate.Limit + + TLSInfo transport.TLSInfo // TLS information used when creating connection ID types.ID // local member ID URLs types.URLs // local peer URLs @@ -135,6 +140,13 @@ func (t *Transport) Start() error { t.remotes = make(map[types.ID]*remote) t.peers = make(map[types.ID]Peer) t.prober = probing.NewProber(t.pipelineRt) + + // If client didn't provide dial retry frequence, use the default + // (100ms backoff between attempts to create a new stream), + // so it doesn't bring too much overhead when retry. + if t.DialRetryFrequency == 0 { + t.DialRetryFrequency = rate.Every(100 * time.Millisecond) + } return nil } diff --git a/github.com/coreos/etcd/rafthttp/transport_test.go b/github.com/coreos/etcd/rafthttp/transport_test.go index c998a44b21..e4cf37154d 100644 --- a/github.com/coreos/etcd/rafthttp/transport_test.go +++ b/github.com/coreos/etcd/rafthttp/transport_test.go @@ -30,12 +30,10 @@ import ( // TestTransportSend tests that transport can send messages using correct // underlying peer, and drop local or unknown-target messages. func TestTransportSend(t *testing.T) { - ss := &stats.ServerStats{} - ss.Initialize() peer1 := newFakePeer() peer2 := newFakePeer() tr := &Transport{ - ServerStats: ss, + ServerStats: stats.NewServerStats("", ""), peers: map[types.ID]Peer{types.ID(1): peer1, types.ID(2): peer2}, } wmsgsIgnored := []raftpb.Message{ @@ -67,12 +65,10 @@ func TestTransportSend(t *testing.T) { } func TestTransportCutMend(t *testing.T) { - ss := &stats.ServerStats{} - ss.Initialize() peer1 := newFakePeer() peer2 := newFakePeer() tr := &Transport{ - ServerStats: ss, + ServerStats: stats.NewServerStats("", ""), peers: map[types.ID]Peer{types.ID(1): peer1, types.ID(2): peer2}, } diff --git a/github.com/coreos/etcd/scripts/build-aci b/github.com/coreos/etcd/scripts/build-aci index f13e9a7a56..7a4cff93b0 100755 --- a/github.com/coreos/etcd/scripts/build-aci +++ b/github.com/coreos/etcd/scripts/build-aci @@ -11,6 +11,13 @@ ACBUILD=${ACBUILD:-acbuild} VERSION=$1 +go2aci() { + case "${1}" in + "arm64") echo "aarch64";; + *) echo "${1}";; + esac +} + if ! command -v $ACBUILD >/dev/null; then echo "acbuild ($ACBUILD) is not executable" exit 1 @@ -29,6 +36,7 @@ fi acbuild --debug begin TMPHOSTS="$(mktemp)" +ACI_ARCH="$(go2aci ${GOARCH})" acbuildEnd() { rm "$TMPHOSTS" @@ -42,6 +50,7 @@ cat < $TMPHOSTS DF acbuild --debug set-name coreos.com/etcd +acbuild --debug annotation add appc.io/executor/supports-systemd-notify true acbuild --debug copy $BINARYDIR/etcd /usr/local/bin/etcd acbuild --debug copy $BINARYDIR/etcdctl /usr/local/bin/etcdctl @@ -63,6 +72,8 @@ acbuild --debug port add peer tcp 2380 acbuild --debug copy "$TMPHOSTS" /etc/hosts +acbuild --debug label add arch "${ACI_ARCH}" + # mkdir default data-dir mkdir -p .acbuild/currentaci/rootfs/var/lib/etcd @@ -70,4 +81,4 @@ mkdir -p .acbuild/currentaci/rootfs/var/lib/etcd ln -s ./usr/local/bin/etcd .acbuild/currentaci/rootfs/etcd ln -s ./usr/local/bin/etcdctl .acbuild/currentaci/rootfs/etcdctl -acbuild --debug write --overwrite $BUILDDIR/etcd-${1}-linux-amd64.aci +acbuild --debug write --overwrite $BUILDDIR/etcd-${1}-linux-${ACI_ARCH}.aci diff --git a/github.com/coreos/etcd/scripts/build-binary b/github.com/coreos/etcd/scripts/build-binary index 18646c75f8..3b4b763dee 100755 --- a/github.com/coreos/etcd/scripts/build-binary +++ b/github.com/coreos/etcd/scripts/build-binary @@ -62,6 +62,7 @@ function main { TARGET_ARCHS=("amd64") if [ ${GOOS} == "linux" ]; then + TARGET_ARCHS+=("arm64") TARGET_ARCHS+=("ppc64le") fi diff --git a/github.com/coreos/etcd/scripts/build-docker b/github.com/coreos/etcd/scripts/build-docker index 44641322f7..b7aea2bc24 100755 --- a/github.com/coreos/etcd/scripts/build-docker +++ b/github.com/coreos/etcd/scripts/build-docker @@ -1,21 +1,23 @@ #!/usr/bin/env bash +set -e + if [ "$#" -ne 1 ]; then echo "Usage: $0 VERSION" >&2 exit 1 fi +VERSION=${1} ARCH=$(go env GOARCH) DOCKERFILE="Dockerfile-release" -TAG="quay.io/coreos/etcd" +: ${TAG:="quay.io/coreos/etcd"} if [ -z ${BINARYDIR} ]; then RELEASE="etcd-${1}"-`go env GOOS`-`go env GOARCH` BINARYDIR="${RELEASE}" TARFILE="${RELEASE}.tar.gz" TARURL="https://github.com/coreos/etcd/releases/download/${1}/${TARFILE}" - curl -f -L -o ${TARFILE} ${TARURL} - if [ $? != 0 ]; then + if ! curl -f -L -o ${TARFILE} ${TARURL} ; then echo "Failed to download ${TARURL}." exit 1 fi @@ -24,7 +26,7 @@ fi if [ ${ARCH} != "amd64" ]; then DOCKERFILE+=".${ARCH}" - TAG+="-${ARCH}" + VERSION+="-${ARCH}" fi BINARYDIR=${BINARYDIR:-.} @@ -32,9 +34,10 @@ BUILDDIR=${BUILDDIR:-.} IMAGEDIR=${BUILDDIR}/image-docker -mkdir -p ${IMAGEDIR} +mkdir -p ${IMAGEDIR}/var/etcd +mkdir -p ${IMAGEDIR}/var/lib/etcd cp ${BINARYDIR}/etcd ${BINARYDIR}/etcdctl ${IMAGEDIR} cat ./${DOCKERFILE} > ${IMAGEDIR}/Dockerfile -docker build -t ${TAG}:${1} ${IMAGEDIR} +docker build -t ${TAG}:${VERSION} ${IMAGEDIR} diff --git a/github.com/coreos/etcd/scripts/genproto.sh b/github.com/coreos/etcd/scripts/genproto.sh index e42784745f..15aebd4176 100755 --- a/github.com/coreos/etcd/scripts/genproto.sh +++ b/github.com/coreos/etcd/scripts/genproto.sh @@ -10,18 +10,18 @@ if ! [[ "$0" =~ "scripts/genproto.sh" ]]; then exit 255 fi -# for now, be conservative about what version of protoc we expect -if ! [[ $(protoc --version) =~ "3.2.0" ]]; then - echo "could not find protoc 3.2.0, is it installed + in PATH?" +if ! [[ $(protoc --version) =~ "3.3.0" ]]; then + echo "could not find protoc 3.3.0, is it installed + in PATH?" exit 255 fi # directories containing protos to be built DIRS="./wal/walpb ./etcdserver/etcdserverpb ./snap/snappb ./raft/raftpb ./mvcc/mvccpb ./lease/leasepb ./auth/authpb ./etcdserver/api/v3lock/v3lockpb ./etcdserver/api/v3election/v3electionpb" -# exact version of protoc-gen-gogo to build -GOGO_PROTO_SHA="8d70fb3182befc465c4a1eac8ad4d38ff49778e2" -GRPC_GATEWAY_SHA="84398b94e188ee336f307779b57b3aa91af7063c" +# exact version of packages to build +GOGO_PROTO_SHA="100ba4e885062801d56799d78530b73b178a78f3" +GRPC_GATEWAY_SHA="18d159699f2e83fc5bb9ef2f79465ca3f3122676" +SCHWAG_SHA="b7d0fc9aadaaae3d61aaadfc12e4a2f945514912" # set up self-contained GOPATH for building export GOPATH=${PWD}/gopath.proto @@ -31,6 +31,7 @@ export PATH="${GOBIN}:${PATH}" COREOS_ROOT="${GOPATH}/src/github.com/coreos" ETCD_ROOT="${COREOS_ROOT}/etcd" GOGOPROTO_ROOT="${GOPATH}/src/github.com/gogo/protobuf" +SCHWAG_ROOT="${GOPATH}/src/github.com/hexfusion/schwag" GOGOPROTO_PATH="${GOGOPROTO_ROOT}:${GOGOPROTO_ROOT}/protobuf" GRPC_GATEWAY_ROOT="${GOPATH}/src/github.com/grpc-ecosystem/grpc-gateway" @@ -57,31 +58,59 @@ popd for dir in ${DIRS}; do pushd ${dir} - protoc --gofast_out=plugins=grpc,import_prefix=github.com/coreos/:. -I=.:"${GOGOPROTO_PATH}":"${COREOS_ROOT}":"${GRPC_GATEWAY_ROOT}/third_party/googleapis" *.proto + protoc --gofast_out=plugins=grpc,import_prefix=github.com/coreos/:. -I=".:${GOGOPROTO_PATH}:${COREOS_ROOT}:${GRPC_GATEWAY_ROOT}/third_party/googleapis" *.proto sed -i.bak -E "s/github\.com\/coreos\/(gogoproto|github\.com|golang\.org|google\.golang\.org)/\1/g" *.pb.go sed -i.bak -E 's/github\.com\/coreos\/(errors|fmt|io)/\1/g' *.pb.go sed -i.bak -E 's/import _ \"gogoproto\"//g' *.pb.go sed -i.bak -E 's/import fmt \"fmt\"//g' *.pb.go sed -i.bak -E 's/import _ \"github\.com\/coreos\/google\/api\"//g' *.pb.go + sed -i.bak -E 's/import _ \"google\.golang\.org\/genproto\/googleapis\/api\/annotations\"//g' *.pb.go rm -f *.bak goimports -w *.pb.go popd done +# remove old swagger files so it's obvious whether the files fail to generate +rm -rf Documentation/dev-guide/apispec/swagger/*json for pb in etcdserverpb/rpc api/v3lock/v3lockpb/v3lock api/v3election/v3electionpb/v3election; do + protobase="etcdserver/${pb}" protoc -I. \ -I${GRPC_GATEWAY_ROOT}/third_party/googleapis \ -I${GOGOPROTO_PATH} \ -I${COREOS_ROOT} \ --grpc-gateway_out=logtostderr=true:. \ --swagger_out=logtostderr=true:./Documentation/dev-guide/apispec/swagger/. \ - ./etcdserver/${pb}.proto - name=`basename ${pb}` + ${protobase}.proto + # hack to move gw files around so client won't include them + pkgpath=`dirname ${protobase}` + pkg=`basename ${pkgpath}` + gwfile="${protobase}.pb.gw.go" + sed -i.bak -E "s/package $pkg/package gw/g" ${gwfile} + sed -i.bak -E "s/protoReq /&$pkg\./g" ${gwfile} + sed -i.bak -E "s/, client /, client $pkg./g" ${gwfile} + sed -i.bak -E "s/Client /, client $pkg./g" ${gwfile} + sed -i.bak -E "s/[^(]*Client, runtime/${pkg}.&/" ${gwfile} + sed -i.bak -E "s/New[A-Za-z]*Client/${pkg}.&/" ${gwfile} + # darwin doesn't like newlines in sed... + sed -i.bak -E "s|import \(|& \"github.com/coreos/etcd/${pkgpath}\"|" ${gwfile} + mkdir -p ${pkgpath}/gw/ + go fmt ${gwfile} + mv ${gwfile} ${pkgpath}/gw/ + rm -f ./etcdserver/${pb}*.bak + swaggerName=`basename ${pb}` mv Documentation/dev-guide/apispec/swagger/etcdserver/${pb}.swagger.json \ - Documentation/dev-guide/apispec/swagger/${name}.swagger.json + Documentation/dev-guide/apispec/swagger/${swaggerName}.swagger.json done rm -rf Documentation/dev-guide/apispec/swagger/etcdserver/ +# append security to swagger spec +go get -u "github.com/hexfusion/schwag" +pushd "${SCHWAG_ROOT}" + git reset --hard "${SCHWAG_SHA}" + go install . +popd +schwag -input=Documentation/dev-guide/apispec/swagger/rpc.swagger.json + # install protodoc # go get -v -u github.com/coreos/protodoc # @@ -91,7 +120,7 @@ rm -rf Documentation/dev-guide/apispec/swagger/etcdserver/ if [ "$1" = "-g" ]; then echo "protodoc is auto-generating grpc API reference documentation..." go get -v -u github.com/coreos/protodoc - SHA_PROTODOC="f4164b1cce80b5eba4c835d08483f552dc568b7c" + SHA_PROTODOC="4372ee725035a208404e2d5465ba921469decc32" PROTODOC_PATH="${GOPATH}/src/github.com/coreos/protodoc" pushd "${PROTODOC_PATH}" git reset --hard "${SHA_PROTODOC}" diff --git a/github.com/coreos/etcd/scripts/install-marker.sh b/github.com/coreos/etcd/scripts/install-marker.sh new file mode 100755 index 0000000000..0cca4017df --- /dev/null +++ b/github.com/coreos/etcd/scripts/install-marker.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +set -e + +ARCH=$1 + +if [ -z "$1" ]; then + echo "Usage: ${0} [amd64 or darwin], defaulting to 'amd64'" >> /dev/stderr + ARCH=amd64 +fi + +MARKER_URL=https://storage.googleapis.com/etcd/test-binaries/marker-v0.4.0-x86_64-unknown-linux-gnu +if [ ${ARCH} == "darwin" ]; then + MARKER_URL=https://storage.googleapis.com/etcd/test-binaries/marker-v0.4.0-x86_64-apple-darwin +fi + +echo "Installing marker" +curl -L ${MARKER_URL} -o ${GOPATH}/bin/marker +chmod 755 ${GOPATH}/bin/marker + +${GOPATH}/bin/marker --version diff --git a/github.com/coreos/etcd/scripts/release.sh b/github.com/coreos/etcd/scripts/release.sh index 67b6661ea2..1c846694cd 100755 --- a/github.com/coreos/etcd/scripts/release.sh +++ b/github.com/coreos/etcd/scripts/release.sh @@ -5,7 +5,6 @@ # set -e -ARCH=$(go env GOARCH) VERSION=$1 if [ -z "${VERSION}" ]; then echo "Usage: ${0} VERSION" >> /dev/stderr @@ -22,13 +21,20 @@ if ! command -v docker >/dev/null; then exit 1 fi -ETCD_ROOT=$(dirname "${BASH_SOURCE}")/.. +ETCD_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. pushd ${ETCD_ROOT} >/dev/null echo Building etcd binary... ./scripts/build-binary ${VERSION} - echo Building aci image... - BINARYDIR=release/etcd-${VERSION}-linux-amd64 BUILDDIR=release ./scripts/build-aci ${VERSION} - echo Building docker image... - BINARYDIR=release/etcd-${VERSION}-linux-${ARCH} BUILDDIR=release ./scripts/build-docker ${VERSION} + + # ppc64le not yet supported by acbuild. + for TARGET_ARCH in "amd64" "arm64"; do + echo Building ${TARGET_ARCH} aci image... + GOARCH=${TARGET_ARCH} BINARYDIR=release/etcd-${VERSION}-linux-${TARGET_ARCH} BUILDDIR=release ./scripts/build-aci ${VERSION} + done + + for TARGET_ARCH in "amd64" "arm64" "ppc64le"; do + echo Building ${TARGET_ARCH} docker image... + GOARCH=${TARGET_ARCH} BINARYDIR=release/etcd-${VERSION}-linux-${TARGET_ARCH} BUILDDIR=release ./scripts/build-docker ${VERSION} + done popd >/dev/null diff --git a/github.com/coreos/etcd/scripts/updatebom.sh b/github.com/coreos/etcd/scripts/updatebom.sh new file mode 100755 index 0000000000..08f59f2807 --- /dev/null +++ b/github.com/coreos/etcd/scripts/updatebom.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +set -e + +if ! [[ "$0" =~ "scripts/updatebom.sh" ]]; then + echo "must be run from repository root" + exit 255 +fi + +echo "installing 'bill-of-materials.json'" +go get -v -u github.com/coreos/license-bill-of-materials + +echo "setting up GOPATH" +rm -rf ./gopath +mkdir ./gopath +mv ./cmd/vendor ./gopath/src + +echo "generating bill-of-materials.json" +GOPATH=`pwd`/gopath license-bill-of-materials \ + --override-file ./bill-of-materials.override.json \ + github.com/coreos/etcd github.com/coreos/etcd/etcdctl > bill-of-materials.json + +echo "reverting GOPATH,vendor" +mv ./gopath/src ./cmd/vendor +rm -rf ./gopath + +echo "generated bill-of-materials.json" + diff --git a/github.com/coreos/etcd/scripts/updatedep.sh b/github.com/coreos/etcd/scripts/updatedep.sh index 08bbef6837..651aa3cd49 100755 --- a/github.com/coreos/etcd/scripts/updatedep.sh +++ b/github.com/coreos/etcd/scripts/updatedep.sh @@ -43,7 +43,7 @@ pushd "${GLIDE_VC_ROOT}" popd if [ -n "$1" ]; then - echo "glide get on $(echo $1)" + echo "glide get on $1" matches=`grep "name: $1" glide.lock` if [ ! -z "$matches" ]; then echo "glide update on $1" @@ -57,18 +57,9 @@ else glide update --strip-vendor fi; -# TODO: workaround to keep 'github.com/stretchr/testify/assert' in v2 tests -# TODO: remove this after dropping v2 -echo "copying github.com/stretchr/testify/assert" -cp -rf vendor/github.com/stretchr/testify/assert ./temp-assert - echo "removing test files" glide vc --only-code --no-tests -# TODO: remove this after dropping v2 -mkdir -p vendor/github.com/stretchr/testify -mv ./temp-assert vendor/github.com/stretchr/testify/assert - mv vendor cmd/ echo "recreating symlink to etcd" diff --git a/github.com/coreos/etcd/snap/db.go b/github.com/coreos/etcd/snap/db.go index ae3c743f80..01d897ae86 100644 --- a/github.com/coreos/etcd/snap/db.go +++ b/github.com/coreos/etcd/snap/db.go @@ -15,6 +15,7 @@ package snap import ( + "errors" "fmt" "io" "io/ioutil" @@ -24,6 +25,8 @@ import ( "github.com/coreos/etcd/pkg/fileutil" ) +var ErrNoDBSnapshot = errors.New("snap: snapshot file doesn't exist") + // SaveDBFrom saves snapshot of the database from the given reader. It // guarantees the save operation is atomic. func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) { @@ -41,7 +44,7 @@ func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) { os.Remove(f.Name()) return n, err } - fn := filepath.Join(s.dir, fmt.Sprintf("%016x.snap.db", id)) + fn := s.dbFilePath(id) if fileutil.Exist(fn) { os.Remove(f.Name()) return n, nil @@ -60,15 +63,15 @@ func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) { // DBFilePath returns the file path for the snapshot of the database with // given id. If the snapshot does not exist, it returns error. func (s *Snapshotter) DBFilePath(id uint64) (string, error) { - fns, err := fileutil.ReadDir(s.dir) - if err != nil { + if _, err := fileutil.ReadDir(s.dir); err != nil { return "", err } - wfn := fmt.Sprintf("%016x.snap.db", id) - for _, fn := range fns { - if fn == wfn { - return filepath.Join(s.dir, fn), nil - } + if fn := s.dbFilePath(id); fileutil.Exist(fn) { + return fn, nil } - return "", fmt.Errorf("snap: snapshot file doesn't exist") + return "", ErrNoDBSnapshot +} + +func (s *Snapshotter) dbFilePath(id uint64) string { + return filepath.Join(s.dir, fmt.Sprintf("%016x.snap.db", id)) } diff --git a/github.com/coreos/etcd/store/node.go b/github.com/coreos/etcd/store/node.go index 731327b08b..5415955350 100644 --- a/github.com/coreos/etcd/store/node.go +++ b/github.com/coreos/etcd/store/node.go @@ -332,7 +332,6 @@ func (n *node) UpdateTTL(expireTime time.Time) { n.ExpireTime = expireTime // push into ttl heap n.store.ttlKeyHeap.push(n) - return } // Compare function compares node index and value with provided ones. diff --git a/github.com/coreos/etcd/store/node_extern_test.go b/github.com/coreos/etcd/store/node_extern_test.go index 8caf16173a..7875e47f9d 100644 --- a/github.com/coreos/etcd/store/node_extern_test.go +++ b/github.com/coreos/etcd/store/node_extern_test.go @@ -19,8 +19,9 @@ import ( "testing" "time" "unsafe" + + "github.com/coreos/etcd/pkg/testutil" ) -import "github.com/stretchr/testify/assert" func TestNodeExternClone(t *testing.T) { var eNode *NodeExtern @@ -56,15 +57,15 @@ func TestNodeExternClone(t *testing.T) { gNode := eNode.Clone() // Check the clone is as expected - assert.Equal(t, gNode.Key, key) - assert.Equal(t, gNode.TTL, ttl) - assert.Equal(t, gNode.CreatedIndex, ci) - assert.Equal(t, gNode.ModifiedIndex, mi) + testutil.AssertEqual(t, gNode.Key, key) + testutil.AssertEqual(t, gNode.TTL, ttl) + testutil.AssertEqual(t, gNode.CreatedIndex, ci) + testutil.AssertEqual(t, gNode.ModifiedIndex, mi) // values should be the same - assert.Equal(t, *gNode.Value, val) - assert.Equal(t, *gNode.Expiration, exp) - assert.Equal(t, len(gNode.Nodes), len(childs)) - assert.Equal(t, *gNode.Nodes[0], child) + testutil.AssertEqual(t, *gNode.Value, val) + testutil.AssertEqual(t, *gNode.Expiration, exp) + testutil.AssertEqual(t, len(gNode.Nodes), len(childs)) + testutil.AssertEqual(t, *gNode.Nodes[0], child) // but pointers should differ if gNode.Value == eNode.Value { t.Fatalf("expected value pointers to differ, but got same!") @@ -76,12 +77,12 @@ func TestNodeExternClone(t *testing.T) { t.Fatalf("expected nodes pointers to differ, but got same!") } // Original should be the same - assert.Equal(t, eNode.Key, key) - assert.Equal(t, eNode.TTL, ttl) - assert.Equal(t, eNode.CreatedIndex, ci) - assert.Equal(t, eNode.ModifiedIndex, mi) - assert.Equal(t, eNode.Value, valp) - assert.Equal(t, eNode.Expiration, expp) + testutil.AssertEqual(t, eNode.Key, key) + testutil.AssertEqual(t, eNode.TTL, ttl) + testutil.AssertEqual(t, eNode.CreatedIndex, ci) + testutil.AssertEqual(t, eNode.ModifiedIndex, mi) + testutil.AssertEqual(t, eNode.Value, valp) + testutil.AssertEqual(t, eNode.Expiration, expp) if !sameSlice(eNode.Nodes, childs) { t.Fatalf("expected nodes pointer to same, but got different!") } @@ -89,15 +90,15 @@ func TestNodeExternClone(t *testing.T) { gNode.Key = "/baz" gNode.TTL = 0 gNode.Nodes[0].Key = "uno" - assert.Equal(t, eNode.Key, key) - assert.Equal(t, eNode.TTL, ttl) - assert.Equal(t, eNode.CreatedIndex, ci) - assert.Equal(t, eNode.ModifiedIndex, mi) - assert.Equal(t, *eNode.Nodes[0], child) + testutil.AssertEqual(t, eNode.Key, key) + testutil.AssertEqual(t, eNode.TTL, ttl) + testutil.AssertEqual(t, eNode.CreatedIndex, ci) + testutil.AssertEqual(t, eNode.ModifiedIndex, mi) + testutil.AssertEqual(t, *eNode.Nodes[0], child) // Change the original and ensure the clone is not affected eNode.Key = "/wuf" - assert.Equal(t, eNode.Key, "/wuf") - assert.Equal(t, gNode.Key, "/baz") + testutil.AssertEqual(t, eNode.Key, "/wuf") + testutil.AssertEqual(t, gNode.Key, "/baz") } func sameSlice(a, b []*NodeExtern) bool { diff --git a/github.com/coreos/etcd/store/stats_test.go b/github.com/coreos/etcd/store/stats_test.go index 44efbf9f59..ec105a586f 100644 --- a/github.com/coreos/etcd/store/stats_test.go +++ b/github.com/coreos/etcd/store/stats_test.go @@ -18,7 +18,7 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" + "github.com/coreos/etcd/pkg/testutil" ) // Ensure that a successful Get is recorded in the stats. @@ -26,7 +26,7 @@ func TestStoreStatsGetSuccess(t *testing.T) { s := newStore() s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) s.Get("/foo", false, false) - assert.Equal(t, uint64(1), s.Stats.GetSuccess, "") + testutil.AssertEqual(t, uint64(1), s.Stats.GetSuccess, "") } // Ensure that a failed Get is recorded in the stats. @@ -34,14 +34,14 @@ func TestStoreStatsGetFail(t *testing.T) { s := newStore() s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) s.Get("/no_such_key", false, false) - assert.Equal(t, uint64(1), s.Stats.GetFail, "") + testutil.AssertEqual(t, uint64(1), s.Stats.GetFail, "") } // Ensure that a successful Create is recorded in the stats. func TestStoreStatsCreateSuccess(t *testing.T) { s := newStore() s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) - assert.Equal(t, uint64(1), s.Stats.CreateSuccess, "") + testutil.AssertEqual(t, uint64(1), s.Stats.CreateSuccess, "") } // Ensure that a failed Create is recorded in the stats. @@ -49,7 +49,7 @@ func TestStoreStatsCreateFail(t *testing.T) { s := newStore() s.Create("/foo", true, "", false, TTLOptionSet{ExpireTime: Permanent}) s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) - assert.Equal(t, uint64(1), s.Stats.CreateFail, "") + testutil.AssertEqual(t, uint64(1), s.Stats.CreateFail, "") } // Ensure that a successful Update is recorded in the stats. @@ -57,14 +57,14 @@ func TestStoreStatsUpdateSuccess(t *testing.T) { s := newStore() s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) s.Update("/foo", "baz", TTLOptionSet{ExpireTime: Permanent}) - assert.Equal(t, uint64(1), s.Stats.UpdateSuccess, "") + testutil.AssertEqual(t, uint64(1), s.Stats.UpdateSuccess, "") } // Ensure that a failed Update is recorded in the stats. func TestStoreStatsUpdateFail(t *testing.T) { s := newStore() s.Update("/foo", "bar", TTLOptionSet{ExpireTime: Permanent}) - assert.Equal(t, uint64(1), s.Stats.UpdateFail, "") + testutil.AssertEqual(t, uint64(1), s.Stats.UpdateFail, "") } // Ensure that a successful CAS is recorded in the stats. @@ -72,7 +72,7 @@ func TestStoreStatsCompareAndSwapSuccess(t *testing.T) { s := newStore() s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) s.CompareAndSwap("/foo", "bar", 0, "baz", TTLOptionSet{ExpireTime: Permanent}) - assert.Equal(t, uint64(1), s.Stats.CompareAndSwapSuccess, "") + testutil.AssertEqual(t, uint64(1), s.Stats.CompareAndSwapSuccess, "") } // Ensure that a failed CAS is recorded in the stats. @@ -80,7 +80,7 @@ func TestStoreStatsCompareAndSwapFail(t *testing.T) { s := newStore() s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) s.CompareAndSwap("/foo", "wrong_value", 0, "baz", TTLOptionSet{ExpireTime: Permanent}) - assert.Equal(t, uint64(1), s.Stats.CompareAndSwapFail, "") + testutil.AssertEqual(t, uint64(1), s.Stats.CompareAndSwapFail, "") } // Ensure that a successful Delete is recorded in the stats. @@ -88,14 +88,14 @@ func TestStoreStatsDeleteSuccess(t *testing.T) { s := newStore() s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) s.Delete("/foo", false, false) - assert.Equal(t, uint64(1), s.Stats.DeleteSuccess, "") + testutil.AssertEqual(t, uint64(1), s.Stats.DeleteSuccess, "") } // Ensure that a failed Delete is recorded in the stats. func TestStoreStatsDeleteFail(t *testing.T) { s := newStore() s.Delete("/foo", false, false) - assert.Equal(t, uint64(1), s.Stats.DeleteFail, "") + testutil.AssertEqual(t, uint64(1), s.Stats.DeleteFail, "") } //Ensure that the number of expirations is recorded in the stats. @@ -105,8 +105,8 @@ func TestStoreStatsExpireCount(t *testing.T) { s.clock = fc s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)}) - assert.Equal(t, uint64(0), s.Stats.ExpireCount, "") + testutil.AssertEqual(t, uint64(0), s.Stats.ExpireCount, "") fc.Advance(600 * time.Millisecond) s.DeleteExpiredKeys(fc.Now()) - assert.Equal(t, uint64(1), s.Stats.ExpireCount, "") + testutil.AssertEqual(t, uint64(1), s.Stats.ExpireCount, "") } diff --git a/github.com/coreos/etcd/store/store_test.go b/github.com/coreos/etcd/store/store_test.go index 1a4527352d..49f6fc620e 100644 --- a/github.com/coreos/etcd/store/store_test.go +++ b/github.com/coreos/etcd/store/store_test.go @@ -19,17 +19,17 @@ import ( "time" etcdErr "github.com/coreos/etcd/error" + "github.com/coreos/etcd/pkg/testutil" "github.com/jonboulle/clockwork" - "github.com/stretchr/testify/assert" ) func TestNewStoreWithNamespaces(t *testing.T) { s := newStore("/0", "/1") _, err := s.Get("/0", false, false) - assert.Nil(t, err, "") + testutil.AssertNil(t, err) _, err = s.Get("/1", false, false) - assert.Nil(t, err, "") + testutil.AssertNil(t, err) } // Ensure that the store can retrieve an existing value. @@ -38,11 +38,11 @@ func TestStoreGetValue(t *testing.T) { s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) var eidx uint64 = 1 e, err := s.Get("/foo", false, false) - assert.Nil(t, err, "") - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "get", "") - assert.Equal(t, e.Node.Key, "/foo", "") - assert.Equal(t, *e.Node.Value, "bar", "") + testutil.AssertNil(t, err) + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "get") + testutil.AssertEqual(t, e.Node.Key, "/foo") + testutil.AssertEqual(t, *e.Node.Value, "bar") } // Ensure that any TTL <= minExpireTime becomes Permanent @@ -51,18 +51,18 @@ func TestMinExpireTime(t *testing.T) { fc := clockwork.NewFakeClock() s.clock = fc // FakeClock starts at 0, so minExpireTime should be far in the future.. but just in case - assert.True(t, minExpireTime.After(fc.Now()), "minExpireTime should be ahead of FakeClock!") + testutil.AssertTrue(t, minExpireTime.After(fc.Now()), "minExpireTime should be ahead of FakeClock!") s.Create("/foo", false, "Y", false, TTLOptionSet{ExpireTime: fc.Now().Add(3 * time.Second)}) fc.Advance(5 * time.Second) // Ensure it hasn't expired s.DeleteExpiredKeys(fc.Now()) var eidx uint64 = 1 e, err := s.Get("/foo", true, false) - assert.Nil(t, err, "") - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "get", "") - assert.Equal(t, e.Node.Key, "/foo", "") - assert.Equal(t, e.Node.TTL, int64(0)) + testutil.AssertNil(t, err) + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "get") + testutil.AssertEqual(t, e.Node.Key, "/foo") + testutil.AssertEqual(t, e.Node.TTL, int64(0)) } // Ensure that the store can recursively retrieve a directory listing. @@ -80,20 +80,20 @@ func TestStoreGetDirectory(t *testing.T) { s.Create("/foo/baz/ttl", false, "Y", false, TTLOptionSet{ExpireTime: fc.Now().Add(time.Second * 3)}) var eidx uint64 = 7 e, err := s.Get("/foo", true, false) - assert.Nil(t, err, "") - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "get", "") - assert.Equal(t, e.Node.Key, "/foo", "") - assert.Equal(t, len(e.Node.Nodes), 2, "") + testutil.AssertNil(t, err) + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "get") + testutil.AssertEqual(t, e.Node.Key, "/foo") + testutil.AssertEqual(t, len(e.Node.Nodes), 2) var bazNodes NodeExterns for _, node := range e.Node.Nodes { switch node.Key { case "/foo/bar": - assert.Equal(t, *node.Value, "X", "") - assert.Equal(t, node.Dir, false, "") + testutil.AssertEqual(t, *node.Value, "X") + testutil.AssertEqual(t, node.Dir, false) case "/foo/baz": - assert.Equal(t, node.Dir, true, "") - assert.Equal(t, len(node.Nodes), 2, "") + testutil.AssertEqual(t, node.Dir, true) + testutil.AssertEqual(t, len(node.Nodes), 2) bazNodes = node.Nodes default: t.Errorf("key = %s, not matched", node.Key) @@ -102,12 +102,12 @@ func TestStoreGetDirectory(t *testing.T) { for _, node := range bazNodes { switch node.Key { case "/foo/baz/bat": - assert.Equal(t, *node.Value, "Y", "") - assert.Equal(t, node.Dir, false, "") + testutil.AssertEqual(t, *node.Value, "Y") + testutil.AssertEqual(t, node.Dir, false) case "/foo/baz/ttl": - assert.Equal(t, *node.Value, "Y", "") - assert.Equal(t, node.Dir, false, "") - assert.Equal(t, node.TTL, int64(3), "") + testutil.AssertEqual(t, *node.Value, "Y") + testutil.AssertEqual(t, node.Dir, false) + testutil.AssertEqual(t, node.TTL, int64(3)) default: t.Errorf("key = %s, not matched", node.Key) } @@ -125,8 +125,8 @@ func TestStoreGetSorted(t *testing.T) { s.Create("/foo/y/b", false, "0", false, TTLOptionSet{ExpireTime: Permanent}) var eidx uint64 = 6 e, err := s.Get("/foo", true, true) - assert.Nil(t, err, "") - assert.Equal(t, e.EtcdIndex, eidx, "") + testutil.AssertNil(t, err) + testutil.AssertEqual(t, e.EtcdIndex, eidx) var yNodes NodeExterns sortedStrings := []string{"/foo/x", "/foo/y", "/foo/z"} @@ -155,67 +155,67 @@ func TestSet(t *testing.T) { // Set /foo="" var eidx uint64 = 1 e, err := s.Set("/foo", false, "", TTLOptionSet{ExpireTime: Permanent}) - assert.Nil(t, err, "") - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "set", "") - assert.Equal(t, e.Node.Key, "/foo", "") - assert.False(t, e.Node.Dir, "") - assert.Equal(t, *e.Node.Value, "", "") - assert.Nil(t, e.Node.Nodes, "") - assert.Nil(t, e.Node.Expiration, "") - assert.Equal(t, e.Node.TTL, int64(0), "") - assert.Equal(t, e.Node.ModifiedIndex, uint64(1), "") + testutil.AssertNil(t, err) + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "set") + testutil.AssertEqual(t, e.Node.Key, "/foo") + testutil.AssertFalse(t, e.Node.Dir) + testutil.AssertEqual(t, *e.Node.Value, "") + testutil.AssertNil(t, e.Node.Nodes) + testutil.AssertNil(t, e.Node.Expiration) + testutil.AssertEqual(t, e.Node.TTL, int64(0)) + testutil.AssertEqual(t, e.Node.ModifiedIndex, uint64(1)) // Set /foo="bar" eidx = 2 e, err = s.Set("/foo", false, "bar", TTLOptionSet{ExpireTime: Permanent}) - assert.Nil(t, err, "") - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "set", "") - assert.Equal(t, e.Node.Key, "/foo", "") - assert.False(t, e.Node.Dir, "") - assert.Equal(t, *e.Node.Value, "bar", "") - assert.Nil(t, e.Node.Nodes, "") - assert.Nil(t, e.Node.Expiration, "") - assert.Equal(t, e.Node.TTL, int64(0), "") - assert.Equal(t, e.Node.ModifiedIndex, uint64(2), "") + testutil.AssertNil(t, err) + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "set") + testutil.AssertEqual(t, e.Node.Key, "/foo") + testutil.AssertFalse(t, e.Node.Dir) + testutil.AssertEqual(t, *e.Node.Value, "bar") + testutil.AssertNil(t, e.Node.Nodes) + testutil.AssertNil(t, e.Node.Expiration) + testutil.AssertEqual(t, e.Node.TTL, int64(0)) + testutil.AssertEqual(t, e.Node.ModifiedIndex, uint64(2)) // check prevNode - assert.NotNil(t, e.PrevNode, "") - assert.Equal(t, e.PrevNode.Key, "/foo", "") - assert.Equal(t, *e.PrevNode.Value, "", "") - assert.Equal(t, e.PrevNode.ModifiedIndex, uint64(1), "") + testutil.AssertNotNil(t, e.PrevNode) + testutil.AssertEqual(t, e.PrevNode.Key, "/foo") + testutil.AssertEqual(t, *e.PrevNode.Value, "") + testutil.AssertEqual(t, e.PrevNode.ModifiedIndex, uint64(1)) // Set /foo="baz" (for testing prevNode) eidx = 3 e, err = s.Set("/foo", false, "baz", TTLOptionSet{ExpireTime: Permanent}) - assert.Nil(t, err, "") - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "set", "") - assert.Equal(t, e.Node.Key, "/foo", "") - assert.False(t, e.Node.Dir, "") - assert.Equal(t, *e.Node.Value, "baz", "") - assert.Nil(t, e.Node.Nodes, "") - assert.Nil(t, e.Node.Expiration, "") - assert.Equal(t, e.Node.TTL, int64(0), "") - assert.Equal(t, e.Node.ModifiedIndex, uint64(3), "") + testutil.AssertNil(t, err) + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "set") + testutil.AssertEqual(t, e.Node.Key, "/foo") + testutil.AssertFalse(t, e.Node.Dir) + testutil.AssertEqual(t, *e.Node.Value, "baz") + testutil.AssertNil(t, e.Node.Nodes) + testutil.AssertNil(t, e.Node.Expiration) + testutil.AssertEqual(t, e.Node.TTL, int64(0)) + testutil.AssertEqual(t, e.Node.ModifiedIndex, uint64(3)) // check prevNode - assert.NotNil(t, e.PrevNode, "") - assert.Equal(t, e.PrevNode.Key, "/foo", "") - assert.Equal(t, *e.PrevNode.Value, "bar", "") - assert.Equal(t, e.PrevNode.ModifiedIndex, uint64(2), "") + testutil.AssertNotNil(t, e.PrevNode) + testutil.AssertEqual(t, e.PrevNode.Key, "/foo") + testutil.AssertEqual(t, *e.PrevNode.Value, "bar") + testutil.AssertEqual(t, e.PrevNode.ModifiedIndex, uint64(2)) // Set /dir as a directory eidx = 4 e, err = s.Set("/dir", true, "", TTLOptionSet{ExpireTime: Permanent}) - assert.Nil(t, err, "") - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "set", "") - assert.Equal(t, e.Node.Key, "/dir", "") - assert.True(t, e.Node.Dir, "") - assert.Nil(t, e.Node.Value) - assert.Nil(t, e.Node.Nodes, "") - assert.Nil(t, e.Node.Expiration, "") - assert.Equal(t, e.Node.TTL, int64(0), "") - assert.Equal(t, e.Node.ModifiedIndex, uint64(4), "") + testutil.AssertNil(t, err) + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "set") + testutil.AssertEqual(t, e.Node.Key, "/dir") + testutil.AssertTrue(t, e.Node.Dir) + testutil.AssertNil(t, e.Node.Value) + testutil.AssertNil(t, e.Node.Nodes) + testutil.AssertNil(t, e.Node.Expiration) + testutil.AssertEqual(t, e.Node.TTL, int64(0)) + testutil.AssertEqual(t, e.Node.ModifiedIndex, uint64(4)) } // Ensure that the store can create a new key if it doesn't already exist. @@ -224,30 +224,30 @@ func TestStoreCreateValue(t *testing.T) { // Create /foo=bar var eidx uint64 = 1 e, err := s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) - assert.Nil(t, err, "") - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "create", "") - assert.Equal(t, e.Node.Key, "/foo", "") - assert.False(t, e.Node.Dir, "") - assert.Equal(t, *e.Node.Value, "bar", "") - assert.Nil(t, e.Node.Nodes, "") - assert.Nil(t, e.Node.Expiration, "") - assert.Equal(t, e.Node.TTL, int64(0), "") - assert.Equal(t, e.Node.ModifiedIndex, uint64(1), "") + testutil.AssertNil(t, err) + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "create") + testutil.AssertEqual(t, e.Node.Key, "/foo") + testutil.AssertFalse(t, e.Node.Dir) + testutil.AssertEqual(t, *e.Node.Value, "bar") + testutil.AssertNil(t, e.Node.Nodes) + testutil.AssertNil(t, e.Node.Expiration) + testutil.AssertEqual(t, e.Node.TTL, int64(0)) + testutil.AssertEqual(t, e.Node.ModifiedIndex, uint64(1)) // Create /empty="" eidx = 2 e, err = s.Create("/empty", false, "", false, TTLOptionSet{ExpireTime: Permanent}) - assert.Nil(t, err, "") - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "create", "") - assert.Equal(t, e.Node.Key, "/empty", "") - assert.False(t, e.Node.Dir, "") - assert.Equal(t, *e.Node.Value, "", "") - assert.Nil(t, e.Node.Nodes, "") - assert.Nil(t, e.Node.Expiration, "") - assert.Equal(t, e.Node.TTL, int64(0), "") - assert.Equal(t, e.Node.ModifiedIndex, uint64(2), "") + testutil.AssertNil(t, err) + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "create") + testutil.AssertEqual(t, e.Node.Key, "/empty") + testutil.AssertFalse(t, e.Node.Dir) + testutil.AssertEqual(t, *e.Node.Value, "") + testutil.AssertNil(t, e.Node.Nodes) + testutil.AssertNil(t, e.Node.Expiration) + testutil.AssertEqual(t, e.Node.TTL, int64(0)) + testutil.AssertEqual(t, e.Node.ModifiedIndex, uint64(2)) } @@ -256,11 +256,11 @@ func TestStoreCreateDirectory(t *testing.T) { s := newStore() var eidx uint64 = 1 e, err := s.Create("/foo", true, "", false, TTLOptionSet{ExpireTime: Permanent}) - assert.Nil(t, err, "") - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "create", "") - assert.Equal(t, e.Node.Key, "/foo", "") - assert.True(t, e.Node.Dir, "") + testutil.AssertNil(t, err) + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "create") + testutil.AssertEqual(t, e.Node.Key, "/foo") + testutil.AssertTrue(t, e.Node.Dir) } // Ensure that the store fails to create a key if it already exists. @@ -272,11 +272,11 @@ func TestStoreCreateFailsIfExists(t *testing.T) { // create /foo as dir again e, _err := s.Create("/foo", true, "", false, TTLOptionSet{ExpireTime: Permanent}) err := _err.(*etcdErr.Error) - assert.Equal(t, err.ErrorCode, etcdErr.EcodeNodeExist, "") - assert.Equal(t, err.Message, "Key already exists", "") - assert.Equal(t, err.Cause, "/foo", "") - assert.Equal(t, err.Index, uint64(1), "") - assert.Nil(t, e, 0, "") + testutil.AssertEqual(t, err.ErrorCode, etcdErr.EcodeNodeExist) + testutil.AssertEqual(t, err.Message, "Key already exists") + testutil.AssertEqual(t, err.Cause, "/foo") + testutil.AssertEqual(t, err.Index, uint64(1)) + testutil.AssertNil(t, e) } // Ensure that the store can update a key if it already exists. @@ -287,44 +287,44 @@ func TestStoreUpdateValue(t *testing.T) { // update /foo="bzr" var eidx uint64 = 2 e, err := s.Update("/foo", "baz", TTLOptionSet{ExpireTime: Permanent}) - assert.Nil(t, err, "") - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "update", "") - assert.Equal(t, e.Node.Key, "/foo", "") - assert.False(t, e.Node.Dir, "") - assert.Equal(t, *e.Node.Value, "baz", "") - assert.Equal(t, e.Node.TTL, int64(0), "") - assert.Equal(t, e.Node.ModifiedIndex, uint64(2), "") + testutil.AssertNil(t, err) + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "update") + testutil.AssertEqual(t, e.Node.Key, "/foo") + testutil.AssertFalse(t, e.Node.Dir) + testutil.AssertEqual(t, *e.Node.Value, "baz") + testutil.AssertEqual(t, e.Node.TTL, int64(0)) + testutil.AssertEqual(t, e.Node.ModifiedIndex, uint64(2)) // check prevNode - assert.Equal(t, e.PrevNode.Key, "/foo", "") - assert.Equal(t, *e.PrevNode.Value, "bar", "") - assert.Equal(t, e.PrevNode.TTL, int64(0), "") - assert.Equal(t, e.PrevNode.ModifiedIndex, uint64(1), "") + testutil.AssertEqual(t, e.PrevNode.Key, "/foo") + testutil.AssertEqual(t, *e.PrevNode.Value, "bar") + testutil.AssertEqual(t, e.PrevNode.TTL, int64(0)) + testutil.AssertEqual(t, e.PrevNode.ModifiedIndex, uint64(1)) e, _ = s.Get("/foo", false, false) - assert.Equal(t, *e.Node.Value, "baz", "") - assert.Equal(t, e.EtcdIndex, eidx, "") + testutil.AssertEqual(t, *e.Node.Value, "baz") + testutil.AssertEqual(t, e.EtcdIndex, eidx) // update /foo="" eidx = 3 e, err = s.Update("/foo", "", TTLOptionSet{ExpireTime: Permanent}) - assert.Nil(t, err, "") - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "update", "") - assert.Equal(t, e.Node.Key, "/foo", "") - assert.False(t, e.Node.Dir, "") - assert.Equal(t, *e.Node.Value, "", "") - assert.Equal(t, e.Node.TTL, int64(0), "") - assert.Equal(t, e.Node.ModifiedIndex, uint64(3), "") + testutil.AssertNil(t, err) + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "update") + testutil.AssertEqual(t, e.Node.Key, "/foo") + testutil.AssertFalse(t, e.Node.Dir) + testutil.AssertEqual(t, *e.Node.Value, "") + testutil.AssertEqual(t, e.Node.TTL, int64(0)) + testutil.AssertEqual(t, e.Node.ModifiedIndex, uint64(3)) // check prevNode - assert.Equal(t, e.PrevNode.Key, "/foo", "") - assert.Equal(t, *e.PrevNode.Value, "baz", "") - assert.Equal(t, e.PrevNode.TTL, int64(0), "") - assert.Equal(t, e.PrevNode.ModifiedIndex, uint64(2), "") + testutil.AssertEqual(t, e.PrevNode.Key, "/foo") + testutil.AssertEqual(t, *e.PrevNode.Value, "baz") + testutil.AssertEqual(t, e.PrevNode.TTL, int64(0)) + testutil.AssertEqual(t, e.PrevNode.ModifiedIndex, uint64(2)) e, _ = s.Get("/foo", false, false) - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, *e.Node.Value, "", "") + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, *e.Node.Value, "") } // Ensure that the store cannot update a directory. @@ -333,10 +333,10 @@ func TestStoreUpdateFailsIfDirectory(t *testing.T) { s.Create("/foo", true, "", false, TTLOptionSet{ExpireTime: Permanent}) e, _err := s.Update("/foo", "baz", TTLOptionSet{ExpireTime: Permanent}) err := _err.(*etcdErr.Error) - assert.Equal(t, err.ErrorCode, etcdErr.EcodeNotFile, "") - assert.Equal(t, err.Message, "Not a file", "") - assert.Equal(t, err.Cause, "/foo", "") - assert.Nil(t, e, "") + testutil.AssertEqual(t, err.ErrorCode, etcdErr.EcodeNotFile) + testutil.AssertEqual(t, err.Message, "Not a file") + testutil.AssertEqual(t, err.Cause, "/foo") + testutil.AssertNil(t, e) } // Ensure that the store can update the TTL on a value. @@ -348,15 +348,15 @@ func TestStoreUpdateValueTTL(t *testing.T) { var eidx uint64 = 2 s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) _, err := s.Update("/foo", "baz", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)}) - assert.Nil(t, err, "") + testutil.AssertNil(t, err) e, _ := s.Get("/foo", false, false) - assert.Equal(t, *e.Node.Value, "baz", "") - assert.Equal(t, e.EtcdIndex, eidx, "") + testutil.AssertEqual(t, *e.Node.Value, "baz") + testutil.AssertEqual(t, e.EtcdIndex, eidx) fc.Advance(600 * time.Millisecond) s.DeleteExpiredKeys(fc.Now()) e, err = s.Get("/foo", false, false) - assert.Nil(t, e, "") - assert.Equal(t, err.(*etcdErr.Error).ErrorCode, etcdErr.EcodeKeyNotFound, "") + testutil.AssertNil(t, e) + testutil.AssertEqual(t, err.(*etcdErr.Error).ErrorCode, etcdErr.EcodeKeyNotFound) } // Ensure that the store can update the TTL on a directory. @@ -369,18 +369,18 @@ func TestStoreUpdateDirTTL(t *testing.T) { s.Create("/foo", true, "", false, TTLOptionSet{ExpireTime: Permanent}) s.Create("/foo/bar", false, "baz", false, TTLOptionSet{ExpireTime: Permanent}) e, err := s.Update("/foo", "", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)}) - assert.Nil(t, err, "") - assert.Equal(t, e.Node.Dir, true, "") - assert.Equal(t, e.EtcdIndex, eidx, "") + testutil.AssertNil(t, err) + testutil.AssertEqual(t, e.Node.Dir, true) + testutil.AssertEqual(t, e.EtcdIndex, eidx) e, _ = s.Get("/foo/bar", false, false) - assert.Equal(t, *e.Node.Value, "baz", "") - assert.Equal(t, e.EtcdIndex, eidx, "") + testutil.AssertEqual(t, *e.Node.Value, "baz") + testutil.AssertEqual(t, e.EtcdIndex, eidx) fc.Advance(600 * time.Millisecond) s.DeleteExpiredKeys(fc.Now()) e, err = s.Get("/foo/bar", false, false) - assert.Nil(t, e, "") - assert.Equal(t, err.(*etcdErr.Error).ErrorCode, etcdErr.EcodeKeyNotFound, "") + testutil.AssertNil(t, e) + testutil.AssertEqual(t, err.(*etcdErr.Error).ErrorCode, etcdErr.EcodeKeyNotFound) } // Ensure that the store can delete a value. @@ -389,13 +389,13 @@ func TestStoreDeleteValue(t *testing.T) { var eidx uint64 = 2 s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) e, err := s.Delete("/foo", false, false) - assert.Nil(t, err, "") - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "delete", "") + testutil.AssertNil(t, err) + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "delete") // check prevNode - assert.NotNil(t, e.PrevNode, "") - assert.Equal(t, e.PrevNode.Key, "/foo", "") - assert.Equal(t, *e.PrevNode.Value, "bar", "") + testutil.AssertNotNil(t, e.PrevNode) + testutil.AssertEqual(t, e.PrevNode.Key, "/foo") + testutil.AssertEqual(t, *e.PrevNode.Value, "bar") } // Ensure that the store can delete a directory if recursive is specified. @@ -407,28 +407,28 @@ func TestStoreDeleteDiretory(t *testing.T) { // delete /foo with dir = true and recursive = false // this should succeed, since the directory is empty e, err := s.Delete("/foo", true, false) - assert.Nil(t, err, "") - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "delete", "") + testutil.AssertNil(t, err) + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "delete") // check prevNode - assert.NotNil(t, e.PrevNode, "") - assert.Equal(t, e.PrevNode.Key, "/foo", "") - assert.Equal(t, e.PrevNode.Dir, true, "") + testutil.AssertNotNil(t, e.PrevNode) + testutil.AssertEqual(t, e.PrevNode.Key, "/foo") + testutil.AssertEqual(t, e.PrevNode.Dir, true) // create directory /foo and directory /foo/bar s.Create("/foo/bar", true, "", false, TTLOptionSet{ExpireTime: Permanent}) // delete /foo with dir = true and recursive = false // this should fail, since the directory is not empty _, err = s.Delete("/foo", true, false) - assert.NotNil(t, err, "") + testutil.AssertNotNil(t, err) // delete /foo with dir=false and recursive = true // this should succeed, since recursive implies dir=true // and recursively delete should be able to delete all // items under the given directory e, err = s.Delete("/foo", false, true) - assert.Nil(t, err, "") - assert.Equal(t, e.Action, "delete", "") + testutil.AssertNil(t, err) + testutil.AssertEqual(t, e.Action, "delete") } @@ -439,9 +439,9 @@ func TestStoreDeleteDiretoryFailsIfNonRecursiveAndDir(t *testing.T) { s.Create("/foo", true, "", false, TTLOptionSet{ExpireTime: Permanent}) e, _err := s.Delete("/foo", false, false) err := _err.(*etcdErr.Error) - assert.Equal(t, err.ErrorCode, etcdErr.EcodeNotFile, "") - assert.Equal(t, err.Message, "Not a file", "") - assert.Nil(t, e, "") + testutil.AssertEqual(t, err.ErrorCode, etcdErr.EcodeNotFile) + testutil.AssertEqual(t, err.Message, "Not a file") + testutil.AssertNil(t, e) } func TestRootRdOnly(t *testing.T) { @@ -449,19 +449,19 @@ func TestRootRdOnly(t *testing.T) { for _, tt := range []string{"/", "/0"} { _, err := s.Set(tt, true, "", TTLOptionSet{ExpireTime: Permanent}) - assert.NotNil(t, err, "") + testutil.AssertNotNil(t, err) _, err = s.Delete(tt, true, true) - assert.NotNil(t, err, "") + testutil.AssertNotNil(t, err) _, err = s.Create(tt, true, "", false, TTLOptionSet{ExpireTime: Permanent}) - assert.NotNil(t, err, "") + testutil.AssertNotNil(t, err) _, err = s.Update(tt, "", TTLOptionSet{ExpireTime: Permanent}) - assert.NotNil(t, err, "") + testutil.AssertNotNil(t, err) _, err = s.CompareAndSwap(tt, "", 0, "", TTLOptionSet{ExpireTime: Permanent}) - assert.NotNil(t, err, "") + testutil.AssertNotNil(t, err) } } @@ -470,17 +470,17 @@ func TestStoreCompareAndDeletePrevValue(t *testing.T) { var eidx uint64 = 2 s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) e, err := s.CompareAndDelete("/foo", "bar", 0) - assert.Nil(t, err, "") - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "compareAndDelete", "") - assert.Equal(t, e.Node.Key, "/foo", "") + testutil.AssertNil(t, err) + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "compareAndDelete") + testutil.AssertEqual(t, e.Node.Key, "/foo") // check prevNode - assert.NotNil(t, e.PrevNode, "") - assert.Equal(t, e.PrevNode.Key, "/foo", "") - assert.Equal(t, *e.PrevNode.Value, "bar", "") - assert.Equal(t, e.PrevNode.ModifiedIndex, uint64(1), "") - assert.Equal(t, e.PrevNode.CreatedIndex, uint64(1), "") + testutil.AssertNotNil(t, e.PrevNode) + testutil.AssertEqual(t, e.PrevNode.Key, "/foo") + testutil.AssertEqual(t, *e.PrevNode.Value, "bar") + testutil.AssertEqual(t, e.PrevNode.ModifiedIndex, uint64(1)) + testutil.AssertEqual(t, e.PrevNode.CreatedIndex, uint64(1)) } func TestStoreCompareAndDeletePrevValueFailsIfNotMatch(t *testing.T) { @@ -489,12 +489,12 @@ func TestStoreCompareAndDeletePrevValueFailsIfNotMatch(t *testing.T) { s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) e, _err := s.CompareAndDelete("/foo", "baz", 0) err := _err.(*etcdErr.Error) - assert.Equal(t, err.ErrorCode, etcdErr.EcodeTestFailed, "") - assert.Equal(t, err.Message, "Compare failed", "") - assert.Nil(t, e, "") + testutil.AssertEqual(t, err.ErrorCode, etcdErr.EcodeTestFailed) + testutil.AssertEqual(t, err.Message, "Compare failed") + testutil.AssertNil(t, e) e, _ = s.Get("/foo", false, false) - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, *e.Node.Value, "bar", "") + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, *e.Node.Value, "bar") } func TestStoreCompareAndDeletePrevIndex(t *testing.T) { @@ -502,15 +502,15 @@ func TestStoreCompareAndDeletePrevIndex(t *testing.T) { var eidx uint64 = 2 s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) e, err := s.CompareAndDelete("/foo", "", 1) - assert.Nil(t, err, "") - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "compareAndDelete", "") + testutil.AssertNil(t, err) + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "compareAndDelete") // check prevNode - assert.NotNil(t, e.PrevNode, "") - assert.Equal(t, e.PrevNode.Key, "/foo", "") - assert.Equal(t, *e.PrevNode.Value, "bar", "") - assert.Equal(t, e.PrevNode.ModifiedIndex, uint64(1), "") - assert.Equal(t, e.PrevNode.CreatedIndex, uint64(1), "") + testutil.AssertNotNil(t, e.PrevNode) + testutil.AssertEqual(t, e.PrevNode.Key, "/foo") + testutil.AssertEqual(t, *e.PrevNode.Value, "bar") + testutil.AssertEqual(t, e.PrevNode.ModifiedIndex, uint64(1)) + testutil.AssertEqual(t, e.PrevNode.CreatedIndex, uint64(1)) } func TestStoreCompareAndDeletePrevIndexFailsIfNotMatch(t *testing.T) { @@ -518,14 +518,14 @@ func TestStoreCompareAndDeletePrevIndexFailsIfNotMatch(t *testing.T) { var eidx uint64 = 1 s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) e, _err := s.CompareAndDelete("/foo", "", 100) - assert.NotNil(t, _err, "") + testutil.AssertNotNil(t, _err) err := _err.(*etcdErr.Error) - assert.Equal(t, err.ErrorCode, etcdErr.EcodeTestFailed, "") - assert.Equal(t, err.Message, "Compare failed", "") - assert.Nil(t, e, "") + testutil.AssertEqual(t, err.ErrorCode, etcdErr.EcodeTestFailed) + testutil.AssertEqual(t, err.Message, "Compare failed") + testutil.AssertNil(t, e) e, _ = s.Get("/foo", false, false) - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, *e.Node.Value, "bar", "") + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, *e.Node.Value, "bar") } // Ensure that the store cannot delete a directory. @@ -533,9 +533,9 @@ func TestStoreCompareAndDeleteDiretoryFail(t *testing.T) { s := newStore() s.Create("/foo", true, "", false, TTLOptionSet{ExpireTime: Permanent}) _, _err := s.CompareAndDelete("/foo", "", 0) - assert.NotNil(t, _err, "") + testutil.AssertNotNil(t, _err) err := _err.(*etcdErr.Error) - assert.Equal(t, err.ErrorCode, etcdErr.EcodeNotFile, "") + testutil.AssertEqual(t, err.ErrorCode, etcdErr.EcodeNotFile) } // Ensure that the store can conditionally update a key if it has a previous value. @@ -544,19 +544,19 @@ func TestStoreCompareAndSwapPrevValue(t *testing.T) { var eidx uint64 = 2 s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) e, err := s.CompareAndSwap("/foo", "bar", 0, "baz", TTLOptionSet{ExpireTime: Permanent}) - assert.Nil(t, err, "") - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "compareAndSwap", "") - assert.Equal(t, *e.Node.Value, "baz", "") + testutil.AssertNil(t, err) + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "compareAndSwap") + testutil.AssertEqual(t, *e.Node.Value, "baz") // check prevNode - assert.NotNil(t, e.PrevNode, "") - assert.Equal(t, e.PrevNode.Key, "/foo", "") - assert.Equal(t, *e.PrevNode.Value, "bar", "") - assert.Equal(t, e.PrevNode.ModifiedIndex, uint64(1), "") - assert.Equal(t, e.PrevNode.CreatedIndex, uint64(1), "") + testutil.AssertNotNil(t, e.PrevNode) + testutil.AssertEqual(t, e.PrevNode.Key, "/foo") + testutil.AssertEqual(t, *e.PrevNode.Value, "bar") + testutil.AssertEqual(t, e.PrevNode.ModifiedIndex, uint64(1)) + testutil.AssertEqual(t, e.PrevNode.CreatedIndex, uint64(1)) e, _ = s.Get("/foo", false, false) - assert.Equal(t, *e.Node.Value, "baz", "") + testutil.AssertEqual(t, *e.Node.Value, "baz") } // Ensure that the store cannot conditionally update a key if it has the wrong previous value. @@ -566,12 +566,12 @@ func TestStoreCompareAndSwapPrevValueFailsIfNotMatch(t *testing.T) { s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) e, _err := s.CompareAndSwap("/foo", "wrong_value", 0, "baz", TTLOptionSet{ExpireTime: Permanent}) err := _err.(*etcdErr.Error) - assert.Equal(t, err.ErrorCode, etcdErr.EcodeTestFailed, "") - assert.Equal(t, err.Message, "Compare failed", "") - assert.Nil(t, e, "") + testutil.AssertEqual(t, err.ErrorCode, etcdErr.EcodeTestFailed) + testutil.AssertEqual(t, err.Message, "Compare failed") + testutil.AssertNil(t, e) e, _ = s.Get("/foo", false, false) - assert.Equal(t, *e.Node.Value, "bar", "") - assert.Equal(t, e.EtcdIndex, eidx, "") + testutil.AssertEqual(t, *e.Node.Value, "bar") + testutil.AssertEqual(t, e.EtcdIndex, eidx) } // Ensure that the store can conditionally update a key if it has a previous index. @@ -580,20 +580,20 @@ func TestStoreCompareAndSwapPrevIndex(t *testing.T) { var eidx uint64 = 2 s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) e, err := s.CompareAndSwap("/foo", "", 1, "baz", TTLOptionSet{ExpireTime: Permanent}) - assert.Nil(t, err, "") - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "compareAndSwap", "") - assert.Equal(t, *e.Node.Value, "baz", "") + testutil.AssertNil(t, err) + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "compareAndSwap") + testutil.AssertEqual(t, *e.Node.Value, "baz") // check prevNode - assert.NotNil(t, e.PrevNode, "") - assert.Equal(t, e.PrevNode.Key, "/foo", "") - assert.Equal(t, *e.PrevNode.Value, "bar", "") - assert.Equal(t, e.PrevNode.ModifiedIndex, uint64(1), "") - assert.Equal(t, e.PrevNode.CreatedIndex, uint64(1), "") + testutil.AssertNotNil(t, e.PrevNode) + testutil.AssertEqual(t, e.PrevNode.Key, "/foo") + testutil.AssertEqual(t, *e.PrevNode.Value, "bar") + testutil.AssertEqual(t, e.PrevNode.ModifiedIndex, uint64(1)) + testutil.AssertEqual(t, e.PrevNode.CreatedIndex, uint64(1)) e, _ = s.Get("/foo", false, false) - assert.Equal(t, *e.Node.Value, "baz", "") - assert.Equal(t, e.EtcdIndex, eidx, "") + testutil.AssertEqual(t, *e.Node.Value, "baz") + testutil.AssertEqual(t, e.EtcdIndex, eidx) } // Ensure that the store cannot conditionally update a key if it has the wrong previous index. @@ -603,12 +603,12 @@ func TestStoreCompareAndSwapPrevIndexFailsIfNotMatch(t *testing.T) { s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) e, _err := s.CompareAndSwap("/foo", "", 100, "baz", TTLOptionSet{ExpireTime: Permanent}) err := _err.(*etcdErr.Error) - assert.Equal(t, err.ErrorCode, etcdErr.EcodeTestFailed, "") - assert.Equal(t, err.Message, "Compare failed", "") - assert.Nil(t, e, "") + testutil.AssertEqual(t, err.ErrorCode, etcdErr.EcodeTestFailed) + testutil.AssertEqual(t, err.Message, "Compare failed") + testutil.AssertNil(t, e) e, _ = s.Get("/foo", false, false) - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, *e.Node.Value, "bar", "") + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, *e.Node.Value, "bar") } // Ensure that the store can watch for key creation. @@ -617,15 +617,15 @@ func TestStoreWatchCreate(t *testing.T) { var eidx uint64 = 0 w, _ := s.Watch("/foo", false, false, 0) c := w.EventChan() - assert.Equal(t, w.StartIndex(), eidx, "") + testutil.AssertEqual(t, w.StartIndex(), eidx) s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) eidx = 1 e := nbselect(c) - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "create", "") - assert.Equal(t, e.Node.Key, "/foo", "") + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "create") + testutil.AssertEqual(t, e.Node.Key, "/foo") e = nbselect(c) - assert.Nil(t, e, "") + testutil.AssertNil(t, e) } // Ensure that the store can watch for recursive key creation. @@ -633,13 +633,13 @@ func TestStoreWatchRecursiveCreate(t *testing.T) { s := newStore() var eidx uint64 = 0 w, _ := s.Watch("/foo", true, false, 0) - assert.Equal(t, w.StartIndex(), eidx, "") + testutil.AssertEqual(t, w.StartIndex(), eidx) eidx = 1 s.Create("/foo/bar", false, "baz", false, TTLOptionSet{ExpireTime: Permanent}) e := nbselect(w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "create", "") - assert.Equal(t, e.Node.Key, "/foo/bar", "") + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "create") + testutil.AssertEqual(t, e.Node.Key, "/foo/bar") } // Ensure that the store can watch for key updates. @@ -648,13 +648,13 @@ func TestStoreWatchUpdate(t *testing.T) { var eidx uint64 = 1 s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) w, _ := s.Watch("/foo", false, false, 0) - assert.Equal(t, w.StartIndex(), eidx, "") + testutil.AssertEqual(t, w.StartIndex(), eidx) eidx = 2 s.Update("/foo", "baz", TTLOptionSet{ExpireTime: Permanent}) e := nbselect(w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "update", "") - assert.Equal(t, e.Node.Key, "/foo", "") + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "update") + testutil.AssertEqual(t, e.Node.Key, "/foo") } // Ensure that the store can watch for recursive key updates. @@ -663,13 +663,13 @@ func TestStoreWatchRecursiveUpdate(t *testing.T) { var eidx uint64 = 1 s.Create("/foo/bar", false, "baz", false, TTLOptionSet{ExpireTime: Permanent}) w, _ := s.Watch("/foo", true, false, 0) - assert.Equal(t, w.StartIndex(), eidx, "") + testutil.AssertEqual(t, w.StartIndex(), eidx) eidx = 2 s.Update("/foo/bar", "baz", TTLOptionSet{ExpireTime: Permanent}) e := nbselect(w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "update", "") - assert.Equal(t, e.Node.Key, "/foo/bar", "") + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "update") + testutil.AssertEqual(t, e.Node.Key, "/foo/bar") } // Ensure that the store can watch for key deletions. @@ -678,13 +678,13 @@ func TestStoreWatchDelete(t *testing.T) { var eidx uint64 = 1 s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) w, _ := s.Watch("/foo", false, false, 0) - assert.Equal(t, w.StartIndex(), eidx, "") + testutil.AssertEqual(t, w.StartIndex(), eidx) eidx = 2 s.Delete("/foo", false, false) e := nbselect(w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "delete", "") - assert.Equal(t, e.Node.Key, "/foo", "") + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "delete") + testutil.AssertEqual(t, e.Node.Key, "/foo") } // Ensure that the store can watch for recursive key deletions. @@ -693,13 +693,13 @@ func TestStoreWatchRecursiveDelete(t *testing.T) { var eidx uint64 = 1 s.Create("/foo/bar", false, "baz", false, TTLOptionSet{ExpireTime: Permanent}) w, _ := s.Watch("/foo", true, false, 0) - assert.Equal(t, w.StartIndex(), eidx, "") + testutil.AssertEqual(t, w.StartIndex(), eidx) eidx = 2 s.Delete("/foo/bar", false, false) e := nbselect(w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "delete", "") - assert.Equal(t, e.Node.Key, "/foo/bar", "") + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "delete") + testutil.AssertEqual(t, e.Node.Key, "/foo/bar") } // Ensure that the store can watch for CAS updates. @@ -708,13 +708,13 @@ func TestStoreWatchCompareAndSwap(t *testing.T) { var eidx uint64 = 1 s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) w, _ := s.Watch("/foo", false, false, 0) - assert.Equal(t, w.StartIndex(), eidx, "") + testutil.AssertEqual(t, w.StartIndex(), eidx) eidx = 2 s.CompareAndSwap("/foo", "bar", 0, "baz", TTLOptionSet{ExpireTime: Permanent}) e := nbselect(w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "compareAndSwap", "") - assert.Equal(t, e.Node.Key, "/foo", "") + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "compareAndSwap") + testutil.AssertEqual(t, e.Node.Key, "/foo") } // Ensure that the store can watch for recursive CAS updates. @@ -723,13 +723,13 @@ func TestStoreWatchRecursiveCompareAndSwap(t *testing.T) { var eidx uint64 = 1 s.Create("/foo/bar", false, "baz", false, TTLOptionSet{ExpireTime: Permanent}) w, _ := s.Watch("/foo", true, false, 0) - assert.Equal(t, w.StartIndex(), eidx, "") + testutil.AssertEqual(t, w.StartIndex(), eidx) eidx = 2 s.CompareAndSwap("/foo/bar", "baz", 0, "bat", TTLOptionSet{ExpireTime: Permanent}) e := nbselect(w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "compareAndSwap", "") - assert.Equal(t, e.Node.Key, "/foo/bar", "") + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "compareAndSwap") + testutil.AssertEqual(t, e.Node.Key, "/foo/bar") } // Ensure that the store can watch for key expiration. @@ -744,30 +744,30 @@ func TestStoreWatchExpire(t *testing.T) { s.Create("/foodir", true, "", false, TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)}) w, _ := s.Watch("/", true, false, 0) - assert.Equal(t, w.StartIndex(), eidx, "") + testutil.AssertEqual(t, w.StartIndex(), eidx) c := w.EventChan() e := nbselect(c) - assert.Nil(t, e, "") + testutil.AssertNil(t, e) fc.Advance(600 * time.Millisecond) s.DeleteExpiredKeys(fc.Now()) eidx = 4 e = nbselect(c) - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "expire", "") - assert.Equal(t, e.Node.Key, "/foo", "") + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "expire") + testutil.AssertEqual(t, e.Node.Key, "/foo") w, _ = s.Watch("/", true, false, 5) eidx = 6 - assert.Equal(t, w.StartIndex(), eidx, "") + testutil.AssertEqual(t, w.StartIndex(), eidx) e = nbselect(w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "expire", "") - assert.Equal(t, e.Node.Key, "/foofoo", "") + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "expire") + testutil.AssertEqual(t, e.Node.Key, "/foofoo") w, _ = s.Watch("/", true, false, 6) e = nbselect(w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "expire", "") - assert.Equal(t, e.Node.Key, "/foodir", "") - assert.Equal(t, e.Node.Dir, true, "") + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "expire") + testutil.AssertEqual(t, e.Node.Key, "/foodir") + testutil.AssertEqual(t, e.Node.Dir, true) } // Ensure that the store can watch for key expiration when refreshing. @@ -782,28 +782,28 @@ func TestStoreWatchExpireRefresh(t *testing.T) { // Make sure we set watch updates when Refresh is true for newly created keys w, _ := s.Watch("/", true, false, 0) - assert.Equal(t, w.StartIndex(), eidx, "") + testutil.AssertEqual(t, w.StartIndex(), eidx) c := w.EventChan() e := nbselect(c) - assert.Nil(t, e, "") + testutil.AssertNil(t, e) fc.Advance(600 * time.Millisecond) s.DeleteExpiredKeys(fc.Now()) eidx = 3 e = nbselect(c) - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "expire", "") - assert.Equal(t, e.Node.Key, "/foo", "") + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "expire") + testutil.AssertEqual(t, e.Node.Key, "/foo") s.Update("/foofoo", "", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond), Refresh: true}) w, _ = s.Watch("/", true, false, 4) fc.Advance(700 * time.Millisecond) s.DeleteExpiredKeys(fc.Now()) eidx = 5 // We should skip 4 because a TTL update should occur with no watch notification if set `TTLOptionSet.Refresh` to true - assert.Equal(t, w.StartIndex(), eidx-1, "") + testutil.AssertEqual(t, w.StartIndex(), eidx-1) e = nbselect(w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "expire", "") - assert.Equal(t, e.Node.Key, "/foofoo", "") + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "expire") + testutil.AssertEqual(t, e.Node.Key, "/foofoo") } // Ensure that the store can watch for key expiration when refreshing with an empty value. @@ -823,12 +823,12 @@ func TestStoreWatchExpireEmptyRefresh(t *testing.T) { fc.Advance(700 * time.Millisecond) s.DeleteExpiredKeys(fc.Now()) eidx = 3 // We should skip 2 because a TTL update should occur with no watch notification if set `TTLOptionSet.Refresh` to true - assert.Equal(t, w.StartIndex(), eidx-1, "") + testutil.AssertEqual(t, w.StartIndex(), eidx-1) e := nbselect(w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "expire", "") - assert.Equal(t, e.Node.Key, "/foo", "") - assert.Equal(t, *e.PrevNode.Value, "bar", "") + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "expire") + testutil.AssertEqual(t, e.Node.Key, "/foo") + testutil.AssertEqual(t, *e.PrevNode.Value, "bar") } // Update TTL of a key (set TTLOptionSet.Refresh to false) and send notification @@ -849,12 +849,12 @@ func TestStoreWatchNoRefresh(t *testing.T) { fc.Advance(700 * time.Millisecond) s.DeleteExpiredKeys(fc.Now()) eidx = 2 - assert.Equal(t, w.StartIndex(), eidx, "") + testutil.AssertEqual(t, w.StartIndex(), eidx) e := nbselect(w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "update", "") - assert.Equal(t, e.Node.Key, "/foo", "") - assert.Equal(t, *e.PrevNode.Value, "bar", "") + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "update") + testutil.AssertEqual(t, e.Node.Key, "/foo") + testutil.AssertEqual(t, *e.PrevNode.Value, "bar") } // Ensure that the store can update the TTL on a value with refresh. @@ -866,16 +866,16 @@ func TestStoreRefresh(t *testing.T) { s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)}) s.Create("/bar", true, "bar", false, TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond)}) _, err := s.Update("/foo", "", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond), Refresh: true}) - assert.Nil(t, err, "") + testutil.AssertNil(t, err) _, err = s.Set("/foo", false, "", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond), Refresh: true}) - assert.Nil(t, err, "") + testutil.AssertNil(t, err) _, err = s.Update("/bar", "", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond), Refresh: true}) - assert.Nil(t, err, "") + testutil.AssertNil(t, err) _, err = s.CompareAndSwap("/foo", "bar", 0, "", TTLOptionSet{ExpireTime: fc.Now().Add(500 * time.Millisecond), Refresh: true}) - assert.Nil(t, err, "") + testutil.AssertNil(t, err) } // Ensure that the store can watch in streaming mode. @@ -886,22 +886,22 @@ func TestStoreWatchStream(t *testing.T) { // first modification s.Create("/foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) e := nbselect(w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "create", "") - assert.Equal(t, e.Node.Key, "/foo", "") - assert.Equal(t, *e.Node.Value, "bar", "") + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "create") + testutil.AssertEqual(t, e.Node.Key, "/foo") + testutil.AssertEqual(t, *e.Node.Value, "bar") e = nbselect(w.EventChan()) - assert.Nil(t, e, "") + testutil.AssertNil(t, e) // second modification eidx = 2 s.Update("/foo", "baz", TTLOptionSet{ExpireTime: Permanent}) e = nbselect(w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "update", "") - assert.Equal(t, e.Node.Key, "/foo", "") - assert.Equal(t, *e.Node.Value, "baz", "") + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "update") + testutil.AssertEqual(t, e.Node.Key, "/foo") + testutil.AssertEqual(t, *e.Node.Value, "baz") e = nbselect(w.EventChan()) - assert.Nil(t, e, "") + testutil.AssertNil(t, e) } // Ensure that the store can recover from a previously saved state. @@ -913,22 +913,22 @@ func TestStoreRecover(t *testing.T) { s.Update("/foo/x", "barbar", TTLOptionSet{ExpireTime: Permanent}) s.Create("/foo/y", false, "baz", false, TTLOptionSet{ExpireTime: Permanent}) b, err := s.Save() - assert.Nil(t, err, "") + testutil.AssertNil(t, err) s2 := newStore() s2.Recovery(b) e, err := s.Get("/foo/x", false, false) - assert.Equal(t, e.Node.CreatedIndex, uint64(2), "") - assert.Equal(t, e.Node.ModifiedIndex, uint64(3), "") - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Nil(t, err, "") - assert.Equal(t, *e.Node.Value, "barbar", "") + testutil.AssertEqual(t, e.Node.CreatedIndex, uint64(2)) + testutil.AssertEqual(t, e.Node.ModifiedIndex, uint64(3)) + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertNil(t, err) + testutil.AssertEqual(t, *e.Node.Value, "barbar") e, err = s.Get("/foo/y", false, false) - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Nil(t, err, "") - assert.Equal(t, *e.Node.Value, "baz", "") + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertNil(t, err) + testutil.AssertEqual(t, *e.Node.Value, "baz") } // Ensure that the store can recover from a previously saved state that includes an expiring key. @@ -943,7 +943,7 @@ func TestStoreRecoverWithExpiration(t *testing.T) { s.Create("/foo/x", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) s.Create("/foo/y", false, "baz", false, TTLOptionSet{ExpireTime: fc.Now().Add(5 * time.Millisecond)}) b, err := s.Save() - assert.Nil(t, err, "") + testutil.AssertNil(t, err) time.Sleep(10 * time.Millisecond) @@ -956,13 +956,13 @@ func TestStoreRecoverWithExpiration(t *testing.T) { s.DeleteExpiredKeys(fc.Now()) e, err := s.Get("/foo/x", false, false) - assert.Nil(t, err, "") - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, *e.Node.Value, "bar", "") + testutil.AssertNil(t, err) + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, *e.Node.Value, "bar") e, err = s.Get("/foo/y", false, false) - assert.NotNil(t, err, "") - assert.Nil(t, e, "") + testutil.AssertNotNil(t, err) + testutil.AssertNil(t, e) } // Ensure that the store can watch for hidden keys as long as it's an exact path match. @@ -972,11 +972,11 @@ func TestStoreWatchCreateWithHiddenKey(t *testing.T) { w, _ := s.Watch("/_foo", false, false, 0) s.Create("/_foo", false, "bar", false, TTLOptionSet{ExpireTime: Permanent}) e := nbselect(w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "create", "") - assert.Equal(t, e.Node.Key, "/_foo", "") + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "create") + testutil.AssertEqual(t, e.Node.Key, "/_foo") e = nbselect(w.EventChan()) - assert.Nil(t, e, "") + testutil.AssertNil(t, e) } // Ensure that the store doesn't see hidden key creates without an exact path match in recursive mode. @@ -985,14 +985,14 @@ func TestStoreWatchRecursiveCreateWithHiddenKey(t *testing.T) { w, _ := s.Watch("/foo", true, false, 0) s.Create("/foo/_bar", false, "baz", false, TTLOptionSet{ExpireTime: Permanent}) e := nbselect(w.EventChan()) - assert.Nil(t, e, "") + testutil.AssertNil(t, e) w, _ = s.Watch("/foo", true, false, 0) s.Create("/foo/_baz", true, "", false, TTLOptionSet{ExpireTime: Permanent}) e = nbselect(w.EventChan()) - assert.Nil(t, e, "") + testutil.AssertNil(t, e) s.Create("/foo/_baz/quux", false, "quux", false, TTLOptionSet{ExpireTime: Permanent}) e = nbselect(w.EventChan()) - assert.Nil(t, e, "") + testutil.AssertNil(t, e) } // Ensure that the store doesn't see hidden key updates. @@ -1002,10 +1002,10 @@ func TestStoreWatchUpdateWithHiddenKey(t *testing.T) { w, _ := s.Watch("/_foo", false, false, 0) s.Update("/_foo", "baz", TTLOptionSet{ExpireTime: Permanent}) e := nbselect(w.EventChan()) - assert.Equal(t, e.Action, "update", "") - assert.Equal(t, e.Node.Key, "/_foo", "") + testutil.AssertEqual(t, e.Action, "update") + testutil.AssertEqual(t, e.Node.Key, "/_foo") e = nbselect(w.EventChan()) - assert.Nil(t, e, "") + testutil.AssertNil(t, e) } // Ensure that the store doesn't see hidden key updates without an exact path match in recursive mode. @@ -1015,7 +1015,7 @@ func TestStoreWatchRecursiveUpdateWithHiddenKey(t *testing.T) { w, _ := s.Watch("/foo", true, false, 0) s.Update("/foo/_bar", "baz", TTLOptionSet{ExpireTime: Permanent}) e := nbselect(w.EventChan()) - assert.Nil(t, e, "") + testutil.AssertNil(t, e) } // Ensure that the store can watch for key deletions. @@ -1026,11 +1026,11 @@ func TestStoreWatchDeleteWithHiddenKey(t *testing.T) { w, _ := s.Watch("/_foo", false, false, 0) s.Delete("/_foo", false, false) e := nbselect(w.EventChan()) - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "delete", "") - assert.Equal(t, e.Node.Key, "/_foo", "") + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "delete") + testutil.AssertEqual(t, e.Node.Key, "/_foo") e = nbselect(w.EventChan()) - assert.Nil(t, e, "") + testutil.AssertNil(t, e) } // Ensure that the store doesn't see hidden key deletes without an exact path match in recursive mode. @@ -1040,7 +1040,7 @@ func TestStoreWatchRecursiveDeleteWithHiddenKey(t *testing.T) { w, _ := s.Watch("/foo", true, false, 0) s.Delete("/foo/_bar", false, false) e := nbselect(w.EventChan()) - assert.Nil(t, e, "") + testutil.AssertNil(t, e) } // Ensure that the store doesn't see expirations of hidden keys. @@ -1055,16 +1055,16 @@ func TestStoreWatchExpireWithHiddenKey(t *testing.T) { w, _ := s.Watch("/", true, false, 0) c := w.EventChan() e := nbselect(c) - assert.Nil(t, e, "") + testutil.AssertNil(t, e) fc.Advance(600 * time.Millisecond) s.DeleteExpiredKeys(fc.Now()) e = nbselect(c) - assert.Nil(t, e, "") + testutil.AssertNil(t, e) fc.Advance(600 * time.Millisecond) s.DeleteExpiredKeys(fc.Now()) e = nbselect(c) - assert.Equal(t, e.Action, "expire", "") - assert.Equal(t, e.Node.Key, "/foofoo", "") + testutil.AssertEqual(t, e.Action, "expire") + testutil.AssertEqual(t, e.Node.Key, "/foofoo") } // Ensure that the store does see hidden key creates if watching deeper than a hidden key in recursive mode. @@ -1075,10 +1075,10 @@ func TestStoreWatchRecursiveCreateDeeperThanHiddenKey(t *testing.T) { s.Create("/_foo/bar/baz", false, "baz", false, TTLOptionSet{ExpireTime: Permanent}) e := nbselect(w.EventChan()) - assert.NotNil(t, e, "") - assert.Equal(t, e.EtcdIndex, eidx, "") - assert.Equal(t, e.Action, "create", "") - assert.Equal(t, e.Node.Key, "/_foo/bar/baz", "") + testutil.AssertNotNil(t, e) + testutil.AssertEqual(t, e.EtcdIndex, eidx) + testutil.AssertEqual(t, e.Action, "create") + testutil.AssertEqual(t, e.Node.Key, "/_foo/bar/baz") } // Ensure that slow consumers are handled properly. @@ -1095,10 +1095,10 @@ func TestStoreWatchSlowConsumer(t *testing.T) { for i := 1; i <= 100; i++ { s.Set("/foo", false, string(i), TTLOptionSet{ExpireTime: Permanent}) // ok } - assert.Equal(t, s.WatcherHub.count, int64(1), "") + testutil.AssertEqual(t, s.WatcherHub.count, int64(1)) s.Set("/foo", false, "101", TTLOptionSet{ExpireTime: Permanent}) // ok // remove watcher - assert.Equal(t, s.WatcherHub.count, int64(0), "") + testutil.AssertEqual(t, s.WatcherHub.count, int64(0)) s.Set("/foo", false, "102", TTLOptionSet{ExpireTime: Permanent}) // must not panic } diff --git a/github.com/coreos/etcd/test b/github.com/coreos/etcd/test index 3258fdd908..d933020a7f 100755 --- a/github.com/coreos/etcd/test +++ b/github.com/coreos/etcd/test @@ -16,13 +16,20 @@ set -e source ./build +# build before setting up test GOPATH +if [[ "${PASSES}" == *"functional"* ]]; then + ./tools/functional-tester/build +fi + # build tests with vendored dependencies etcd_setup_gopath if [ -z "$PASSES" ]; then - PASSES="fmt dep compile build unit" + PASSES="fmt bom dep compile build unit" fi +USERPKG=${PKG:-} + # Invoke ./cover for HTML output COVER=${COVER:-"-cover"} @@ -31,41 +38,40 @@ IGNORE_PKGS="(cmd/|etcdserverpb|rafttest|gopath.proto|v3lockpb|v3electionpb)" INTEGRATION_PKGS="(integration|e2e|contrib|functional-tester)" # all github.com/coreos/etcd/whatever pkgs that are not auto-generated / tools -PKGS=`find . -name \*.go | while read a; do dirname $a; done | sort | uniq | egrep -v "$IGNORE_PKGS" | egrep -v "(tools/|contrib/|e2e|pb)" | sed "s|\.|${REPO_PATH}|g"` +PKGS=`find . -name \*.go | while read a; do dirname $a; done | sort | uniq | egrep -v "$IGNORE_PKGS" | egrep -v "(tools/|contrib/|e2e|pb)" | sed "s|\.|${REPO_PATH}|g" | xargs echo` # pkg1,pkg2,pkg3 -PKGS_COMMA=`echo ${PKGS} | sed 's/ /,/g'` +PKGS_COMMA=${PKGS// /,} TEST_PKGS=`find . -name \*_test.go | while read a; do dirname $a; done | sort | uniq | egrep -v "$IGNORE_PKGS" | sed "s|\./||g"` -FORMATTABLE=`find . -name \*.go | while read a; do echo $(dirname $a)/"*.go"; done | sort | uniq | egrep -v "$IGNORE_PKGS" | sed "s|\./||g"` +FORMATTABLE=`find . -name \*.go | while read a; do echo "$(dirname $a)/*.go"; done | sort | uniq | egrep -v "$IGNORE_PKGS" | sed "s|\./||g"` TESTABLE_AND_FORMATTABLE=`echo "$TEST_PKGS" | egrep -v "$INTEGRATION_PKGS"` -# TODO: 'client' pkg fails with gosimple from generated files -# TODO: 'rafttest' is failing with unused -STATIC_ANALYSIS_PATHS=`find . -name \*.go | while read a; do dirname $a; done | sort | uniq | egrep -v "$IGNORE_PKGS" | grep -v 'client'` - -if [ -z "$GOARCH" ]; then - GOARCH=$(go env GOARCH); -fi - -# user has not provided PKG override -if [ -z "$PKG" ]; then +# check if user provided PKG override +if [ -z "${USERPKG}" ]; then TEST=$TESTABLE_AND_FORMATTABLE FMT=$FORMATTABLE - -# user has provided PKG override else # strip out leading dotslashes and trailing slashes from PKG=./foo/ - TEST=${PKG/#./} + TEST=${USERPKG/#./} TEST=${TEST/#\//} TEST=${TEST/%\//} - # only run gofmt on packages provided by user FMT="$TEST" fi -# split TEST into an array and prepend REPO_PATH to each local package -split=(${TEST// / }) -TEST=${split[@]/#/${REPO_PATH}/} +# prepend REPO_PATH to each local package +split=$TEST +TEST="" +for a in $split; do TEST="$TEST ${REPO_PATH}/${a}"; done + +# TODO: 'client' pkg fails with gosimple from generated files +# TODO: 'rafttest' is failing with unused +STATIC_ANALYSIS_PATHS=`find . -name \*.go | while read a; do dirname $a; done | sort | uniq | egrep -v "$IGNORE_PKGS" | grep -v 'client'` + +if [ -z "$GOARCH" ]; then + GOARCH=$(go env GOARCH); +fi + # determine whether target supports race detection if [ "$GOARCH" == "amd64" ]; then @@ -87,6 +93,43 @@ function integration_pass { go test -timeout 1m -v ${RACE} -cpu 1,2,4 -run=Example $@ ${TEST} } +function functional_pass { + for a in 1 2 3; do + mkdir -p ./agent-$a + ./bin/etcd-agent -etcd-path ./bin/etcd -etcd-log-dir "./agent-$a" -port ":${a}9027" -use-root=false & + pid="$!" + agent_pids="${agent_pids} $pid" + done + + for a in 1 2 3; do + echo "Waiting for 'etcd-agent' on ${a}9027..." + while ! nc -z localhost ${a}9027; do + sleep 1 + done + done + + echo "Starting 'etcd-tester'" + ./bin/etcd-tester \ + -agent-endpoints "127.0.0.1:19027,127.0.0.1:29027,127.0.0.1:39027" \ + -client-ports 12379,22379,32379 \ + -peer-ports 12380,22380,32380 \ + -limit 1 \ + -schedule-cases "0 1 2 3 4 5" \ + -exit-on-failure && echo "'etcd-tester' succeeded" + ETCD_TESTER_EXIT_CODE=$? + echo "ETCD_TESTER_EXIT_CODE:" ${ETCD_TESTER_EXIT_CODE} + + echo "Waiting for processes to exit" + kill -s TERM ${agent_pids} + for a in ${agent_pids}; do wait $a || true; done + rm -rf ./agent-* + + if [[ "${ETCD_TESTER_EXIT_CODE}" -ne "0" ]]; then + echo "FAIL with exit code" ${ETCD_TESTER_EXIT_CODE} + exit ${ETCD_TESTER_EXIT_CODE} + fi +} + function cov_pass { echo "Running code coverage..." # install gocovmerge before running code coverage from github.com/wadey/gocovmerge @@ -127,9 +170,21 @@ function cov_pass { # use 30m timeout because e2e coverage takes longer # due to many tests cause etcd process to wait # on leadership transfer timeout during gracefully shutdown + echo Testing e2e without proxy... go test -tags cov -timeout 30m -v ${REPO_PATH}"/e2e" || failed="$failed e2e" - - gocovmerge "$COVERDIR"/*.coverprofile >"$COVERDIR"/cover.out + echo Testing e2e with proxy... + go test -tags "cov cluster_proxy" -timeout 30m -v ${REPO_PATH}"/e2e" || failed="$failed e2e-proxy" + + # incrementally merge to get coverage data even if some coverage files are corrupted + # optimistically assume etcdserver package's coverage file is OK since gocovmerge + # expects to start with a non-empty file + cp "$COVERDIR"/etcdserver.coverprofile "$COVERDIR"/cover.out + for f in "$COVERDIR"/*.coverprofile; do + gocovmerge $f "$COVERDIR"/cover.out >"$COVERDIR"/cover.tmp || failed="$failed $f" + if [ -s "$COVERDIR"/cover.tmp ]; then + mv "$COVERDIR"/cover.tmp "$COVERDIR"/cover.out + fi + done # strip out generated files (using GNU-style sed) sed --in-place '/generated.go/d' "$COVERDIR"/cover.out || true @@ -163,18 +218,23 @@ function integration_e2e_pass { } function grpcproxy_pass { - go test -timeout 15m -v ${RACE} -tags cluster_proxy -cpu 1,2,4 $@ ${REPO_PATH}/integration + go test -timeout 20m -v ${RACE} -tags cluster_proxy -cpu 1,2,4 $@ ${REPO_PATH}/integration go test -timeout 15m -v ${RACE} -tags cluster_proxy -cpu 1,2,4 $@ ${REPO_PATH}/clientv3/integration + go test -timeout 15m -v -tags cluster_proxy $@ ${REPO_PATH}/e2e } function release_pass { rm -f ./bin/etcd-last-release # to grab latest patch release; bump this up for every minor release - UPGRADE_VER=$(git tag -l --sort=-version:refname "v3.1.*" | head -1) + UPGRADE_VER=$(git tag -l --sort=-version:refname "v3.2.*" | head -1) if [ -n "$MANUAL_VER" ]; then # in case, we need to test against different version UPGRADE_VER=$MANUAL_VER fi + if [[ -z ${UPGRADE_VER} ]]; then + UPGRADE_VER="v3.2.0" + echo "fallback to" ${UPGRADE_VER} + fi local file="etcd-$UPGRADE_VER-linux-$GOARCH.tar.gz" echo "Downloading $file" @@ -185,9 +245,8 @@ function release_pass { set -e case $result in 0) ;; - 22) return 0 - ;; - *) exit $result + *) echo "FAIL with" ${result} + exit $result ;; esac @@ -213,26 +272,50 @@ function fmt_pass { exit 255 fi - echo "Checking 'go tool vet -shadow'..." - for path in $FMT; do - if [ "${path##*.}" != "go" ]; then - path="${path}/*.go" - fi - vetRes=$(go tool vet -shadow ${path}) - if [ -n "${vetRes}" ]; then - echo -e "govet -shadow checking ${path} failed:\n${vetRes}" - exit 255 + echo "Checking 'go tool vet -all -shadow'..." + fmtpkgs=$(echo $FMT | xargs dirname | sort | uniq | sed '/\./d') + vetRes=$(go tool vet -all -shadow ${fmtpkgs} 2>&1 | grep -v '/gw/' || true) + if [ -n "${vetRes}" ]; then + echo -e "govet -all -shadow checking failed:\n${vetRes}" + exit 255 + fi + + if which shellcheck >/dev/null; then + echo "Checking shellcheck..." + shellcheckResult=$(shellcheck -fgcc build test scripts/* 2>&1 || true) + if [ -n "${shellcheckResult}" ]; then + # mask the most common ones; fix later + SHELLCHECK_MASK="SC(2086|2006|2068|2196|2035|2162|2076)" + errs=$(echo "${shellcheckResult}" | egrep -v "${SHELLCHECK_MASK}" || true) + if [ -n "${errs}" ]; then + echo -e "shellcheck checking failed:\n${shellcheckResult}\n===\nFailed:\n${errs}" + exit 255 + fi + suppressed=$(echo "${shellcheckResult}" | cut -f4- -d':' | sort | uniq -c | sort -n) + echo -e "shellcheck suppressed warnings:\n${suppressed}" fi - done + fi echo "Checking documentation style..." # eschew you - yous=`find . -name \*.md | xargs egrep --color "[Yy]ou[r]?[ '.,;]" | grep -v /v2/ || true` + yous=`find . -name \*.md -exec egrep --color "[Yy]ou[r]?[ '.,;]" {} + | grep -v /v2/ || true` if [ ! -z "$yous" ]; then echo -e "found 'you' in documentation:\n${yous}" exit 255 fi + # TODO: check other markdown files when marker handles headers with '[]' + if which marker >/dev/null; then + echo "Checking marker to find broken links..." + markerResult=`marker --skip-http --root ./Documentation 2>&1 || true` + if [ -n "${markerResult}" ]; then + echo -e "marker checking failed:\n${markerResult}" + exit 255 + fi + else + echo "Skipping marker..." + fi + if which goword >/dev/null; then echo "Checking goword..." # get all go files to process @@ -256,7 +339,7 @@ function fmt_pass { # TODO: resolve these after go1.8 migration SIMPLE_CHECK_MASK="S(1024)" if echo "${gosimpleResult}" | egrep -v "$SIMPLE_CHECK_MASK"; then - echo -e "gosimple checking ${path} failed:\n${gosimpleResult}" + echo -e "gosimple checking failed:\n${gosimpleResult}" exit 255 else echo -e "gosimple warning:\n${gosimpleResult}" @@ -285,7 +368,7 @@ function fmt_pass { # See https://github.com/dominikh/go-tools/tree/master/cmd/staticcheck STATIC_CHECK_MASK="SA(1019|2002)" if echo "${staticcheckResult}" | egrep -v "$STATIC_CHECK_MASK"; then - echo -e "staticcheck checking ${path} failed:\n${staticcheckResult}" + echo -e "staticcheck checking failed:\n${staticcheckResult}" exit 255 else suppressed=`echo "${staticcheckResult}" | sed 's/ /\n/g' | grep "(SA" | sort | uniq -c` @@ -297,16 +380,20 @@ function fmt_pass { fi echo "Checking for license header..." - licRes=$(for file in $(find . -type f -iname '*.go' ! -path './cmd/*' ! -path './gopath.proto/*'); do - head -n3 "${file}" | grep -Eq "(Copyright|generated|GENERATED)" || echo -e " ${file}" - done;) + licRes="" + files=$(find . -type f -iname '*.go' ! -path './cmd/*' ! -path './gopath.proto/*') + for file in $files; do + if ! head -n3 "${file}" | grep -Eq "(Copyright|generated|GENERATED)" ; then + licRes="${licRes}"$(echo -e " ${file}") + fi + done if [ -n "${licRes}" ]; then echo -e "license header checking failed:\n${licRes}" exit 255 fi echo "Checking commit titles..." - git log --oneline `git merge-base HEAD master`...HEAD | while read l; do + git log --oneline "$(git merge-base HEAD master)"...HEAD | while read l; do commitMsg=`echo "$l" | cut -f2- -d' '` if [[ "$commitMsg" == Merge* ]]; then # ignore "Merge pull" commits @@ -329,12 +416,27 @@ function fmt_pass { done } +function bom_pass { + if ! which license-bill-of-materials >/dev/null; then + return + fi + echo "Checking bill of materials..." + license-bill-of-materials \ + --override-file bill-of-materials.override.json \ + github.com/coreos/etcd github.com/coreos/etcd/etcdctl >bom-now.json || true + if ! diff bill-of-materials.json bom-now.json; then + echo "vendored licenses do not match given bill of materials" + exit 255 + fi + rm bom-now.json +} + function dep_pass { echo "Checking package dependencies..." # don't pull in etcdserver package pushd clientv3 >/dev/null - badpkg="(etcdserver|mvcc)" - deps=`go list -f '{{ .Deps }}' | sed 's/ /\n/g' | egrep "${badpkg}" | egrep -v "${badpkg}/" || echo ""` + badpkg="(etcdserver$|mvcc$|backend$|grpc-gateway)" + deps=`go list -f '{{ .Deps }}' | sed 's/ /\n/g' | egrep "${badpkg}" || echo ""` popd >/dev/null if [ ! -z "$deps" ]; then echo -e "clientv3 has masked dependencies:\n${deps}" @@ -345,7 +447,7 @@ function dep_pass { function build_cov_pass { out="bin" if [ -n "${BINDIR}" ]; then out="${BINDIR}"; fi - go test -c -covermode=set -coverpkg=$PKGS_COMMA -o ${out}/etcd_test + go test -tags cov -c -covermode=set -coverpkg=$PKGS_COMMA -o ${out}/etcd_test go test -tags cov -c -covermode=set -coverpkg=$PKGS_COMMA -o ${out}/etcdctl_test ${REPO_PATH}/etcdctl } @@ -360,7 +462,9 @@ function build_pass { } for pass in $PASSES; do + echo "Starting '$pass' pass at $(date)" ${pass}_pass $@ + echo "Finished '$pass' pass at $(date)" done echo "Success" diff --git a/github.com/coreos/etcd/tools/benchmark/cmd/mvcc-put.go b/github.com/coreos/etcd/tools/benchmark/cmd/mvcc-put.go index bebb13d8c2..2bf482d1b5 100644 --- a/github.com/coreos/etcd/tools/benchmark/cmd/mvcc-put.go +++ b/github.com/coreos/etcd/tools/benchmark/cmd/mvcc-put.go @@ -36,19 +36,21 @@ var mvccPutCmd = &cobra.Command{ } var ( - totalNrKeys int - storageKeySize int - valueSize int - txn bool + mvccTotalRequests int + storageKeySize int + valueSize int + txn bool + nrTxnOps int ) func init() { mvccCmd.AddCommand(mvccPutCmd) - mvccPutCmd.Flags().IntVar(&totalNrKeys, "total", 100, "a total number of keys to put") + mvccPutCmd.Flags().IntVar(&mvccTotalRequests, "total", 100, "a total number of keys to put") mvccPutCmd.Flags().IntVar(&storageKeySize, "key-size", 64, "a size of key (Byte)") mvccPutCmd.Flags().IntVar(&valueSize, "value-size", 64, "a size of value (Byte)") mvccPutCmd.Flags().BoolVar(&txn, "txn", false, "put a key in transaction or not") + mvccPutCmd.Flags().IntVar(&nrTxnOps, "txn-ops", 1, "a number of keys to put per transaction") // TODO: after the PR https://github.com/spf13/cobra/pull/220 is merged, the below pprof related flags should be moved to RootCmd mvccPutCmd.Flags().StringVar(&cpuProfPath, "cpuprofile", "", "the path of file for storing cpu profile result") @@ -99,23 +101,33 @@ func mvccPutFunc(cmd *cobra.Command, args []string) { }() } - keys := createBytesSlice(storageKeySize, totalNrKeys) - vals := createBytesSlice(valueSize, totalNrKeys) + keys := createBytesSlice(storageKeySize, mvccTotalRequests*nrTxnOps) + vals := createBytesSlice(valueSize, mvccTotalRequests*nrTxnOps) - r := newReport() + weight := float64(nrTxnOps) + r := newWeightedReport() rrc := r.Results() rc := r.Run() - for i := 0; i < totalNrKeys; i++ { - st := time.Now() - if txn { + + if txn { + for i := 0; i < mvccTotalRequests; i++ { + st := time.Now() + tw := s.Write() - tw.Put(keys[i], vals[i], lease.NoLease) + for j := i; j < i+nrTxnOps; j++ { + tw.Put(keys[j], vals[j], lease.NoLease) + } tw.End() - } else { + + rrc <- report.Result{Start: st, End: time.Now(), Weight: weight} + } + } else { + for i := 0; i < mvccTotalRequests; i++ { + st := time.Now() s.Put(keys[i], vals[i], lease.NoLease) + rrc <- report.Result{Start: st, End: time.Now()} } - rrc <- report.Result{Start: st, End: time.Now()} } close(r.Results()) diff --git a/github.com/coreos/etcd/tools/benchmark/cmd/stm.go b/github.com/coreos/etcd/tools/benchmark/cmd/stm.go index 6cad118f02..5d0f6cc69e 100644 --- a/github.com/coreos/etcd/tools/benchmark/cmd/stm.go +++ b/github.com/coreos/etcd/tools/benchmark/cmd/stm.go @@ -17,16 +17,19 @@ package cmd import ( "encoding/binary" "fmt" + "math" "math/rand" "os" "time" v3 "github.com/coreos/etcd/clientv3" v3sync "github.com/coreos/etcd/clientv3/concurrency" + "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" "github.com/coreos/etcd/pkg/report" "github.com/spf13/cobra" "golang.org/x/net/context" + "golang.org/x/time/rate" "gopkg.in/cheggaaa/pb.v1" ) @@ -49,7 +52,8 @@ var ( stmKeyCount int stmValSize int stmWritePercent int - stmMutex bool + stmLocker string + stmRate int ) func init() { @@ -60,8 +64,9 @@ func init() { stmCmd.Flags().IntVar(&stmTotal, "total", 10000, "Total number of completed STM transactions") stmCmd.Flags().IntVar(&stmKeysPerTxn, "keys-per-txn", 1, "Number of keys to access per transaction") stmCmd.Flags().IntVar(&stmWritePercent, "txn-wr-percent", 50, "Percentage of keys to overwrite per transaction") - stmCmd.Flags().BoolVar(&stmMutex, "use-mutex", false, "Wrap STM transaction in a distributed mutex") + stmCmd.Flags().StringVar(&stmLocker, "stm-locker", "stm", "Wrap STM transaction with a custom locking mechanism (stm, lock-client, lock-rpc)") stmCmd.Flags().IntVar(&stmValSize, "val-size", 8, "Value size of each STM put request") + stmCmd.Flags().IntVar(&stmRate, "rate", 0, "Maximum STM transactions per second (0 is no limit)") } func stmFunc(cmd *cobra.Command, args []string) { @@ -94,6 +99,11 @@ func stmFunc(cmd *cobra.Command, args []string) { os.Exit(1) } + if stmRate == 0 { + stmRate = math.MaxInt32 + } + limit := rate.NewLimiter(rate.Limit(stmRate), 1) + requests := make(chan stmApply, totalClients) clients := mustCreateClients(totalClients, totalConns) @@ -118,6 +128,7 @@ func stmFunc(cmd *cobra.Command, args []string) { } applyf := func(s v3sync.STM) error { + limit.Wait(context.Background()) wrs := int(float32(len(kset)*stmWritePercent) / 100.0) for k := range kset { s.Get(k) @@ -144,23 +155,52 @@ func stmFunc(cmd *cobra.Command, args []string) { func doSTM(client *v3.Client, requests <-chan stmApply, results chan<- report.Result) { defer wg.Done() - var m *v3sync.Mutex - if stmMutex { + lock, unlock := func() error { return nil }, func() error { return nil } + switch stmLocker { + case "lock-client": + s, err := v3sync.NewSession(client) + if err != nil { + panic(err) + } + defer s.Close() + m := v3sync.NewMutex(s, "stmlock") + lock = func() error { return m.Lock(context.TODO()) } + unlock = func() error { return m.Unlock(context.TODO()) } + case "lock-rpc": + var lockKey []byte s, err := v3sync.NewSession(client) if err != nil { panic(err) } - m = v3sync.NewMutex(s, "stmlock") + defer s.Close() + lc := v3lockpb.NewLockClient(client.ActiveConnection()) + lock = func() error { + req := &v3lockpb.LockRequest{Name: []byte("stmlock"), Lease: int64(s.Lease())} + resp, err := lc.Lock(context.TODO(), req) + if resp != nil { + lockKey = resp.Key + } + return err + } + unlock = func() error { + req := &v3lockpb.UnlockRequest{Key: lockKey} + _, err := lc.Unlock(context.TODO(), req) + return err + } + case "stm": + default: + fmt.Fprintf(os.Stderr, "unexpected stm locker %q\n", stmLocker) + os.Exit(1) } for applyf := range requests { st := time.Now() - if m != nil { - m.Lock(context.TODO()) + if lerr := lock(); lerr != nil { + panic(lerr) } _, err := v3sync.NewSTM(client, applyf, v3sync.WithIsolation(stmIso)) - if m != nil { - m.Unlock(context.TODO()) + if lerr := unlock(); lerr != nil { + panic(lerr) } results <- report.Result{Err: err, Start: st, End: time.Now()} bar.Increment() diff --git a/github.com/coreos/etcd/tools/benchmark/cmd/util.go b/github.com/coreos/etcd/tools/benchmark/cmd/util.go index 7775acce42..a8a8b4708f 100644 --- a/github.com/coreos/etcd/tools/benchmark/cmd/util.go +++ b/github.com/coreos/etcd/tools/benchmark/cmd/util.go @@ -44,9 +44,8 @@ func mustFindLeaderEndpoints(c *clientv3.Client) { leaderId := uint64(0) for _, ep := range c.Endpoints() { - resp, serr := c.Status(context.TODO(), ep) - if serr == nil { - leaderId = resp.Leader + if sresp, serr := c.Status(context.TODO(), ep); serr == nil { + leaderId = sresp.Leader break } } @@ -142,3 +141,14 @@ func newReport() report.Report { } return report.NewReport(p) } + +func newWeightedReport() report.Report { + p := "%4.4f" + if precise { + p = "%g" + } + if sample { + return report.NewReportSample(p) + } + return report.NewWeightedReport(report.NewReport(p), p) +} diff --git a/github.com/coreos/etcd/tools/benchmark/cmd/watch.go b/github.com/coreos/etcd/tools/benchmark/cmd/watch.go index b73e4f20ee..5b2f57fc97 100644 --- a/github.com/coreos/etcd/tools/benchmark/cmd/watch.go +++ b/github.com/coreos/etcd/tools/benchmark/cmd/watch.go @@ -15,6 +15,7 @@ package cmd import ( + "context" "encoding/binary" "fmt" "math/rand" @@ -22,11 +23,11 @@ import ( "sync/atomic" "time" - v3 "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/pkg/report" "github.com/spf13/cobra" - "golang.org/x/net/context" + "golang.org/x/time/rate" "gopkg.in/cheggaaa/pb.v1" ) @@ -50,9 +51,9 @@ Each key is watched by (--total/--watched-key-total) watchers. } var ( - watchTotalStreams int - watchTotal int - watchedKeyTotal int + watchStreams int + watchWatchesPerStream int + watchedKeyTotal int watchPutRate int watchPutTotal int @@ -60,23 +61,27 @@ var ( watchKeySize int watchKeySpaceSize int watchSeqKeys bool +) - eventsTotal int +type watchedKeys struct { + watched []string + numWatchers map[string]int - nrWatchCompleted int32 - nrRecvCompleted int32 - watchCompletedNotifier chan struct{} - recvCompletedNotifier chan struct{} -) + watches []clientv3.WatchChan + + // ctx to control all watches + ctx context.Context + cancel context.CancelFunc +} func init() { RootCmd.AddCommand(watchCmd) - watchCmd.Flags().IntVar(&watchTotalStreams, "watchers", 10000, "Total number of watchers") - watchCmd.Flags().IntVar(&watchTotal, "total", 100000, "Total number of watch requests") - watchCmd.Flags().IntVar(&watchedKeyTotal, "watched-key-total", 10000, "Total number of keys to be watched") + watchCmd.Flags().IntVar(&watchStreams, "streams", 10, "Total watch streams") + watchCmd.Flags().IntVar(&watchWatchesPerStream, "watch-per-stream", 100, "Total watchers per stream") + watchCmd.Flags().IntVar(&watchedKeyTotal, "watched-key-total", 1, "Total number of keys to be watched") - watchCmd.Flags().IntVar(&watchPutRate, "put-rate", 100, "Number of keys to put per second") - watchCmd.Flags().IntVar(&watchPutTotal, "put-total", 10000, "Number of put requests") + watchCmd.Flags().IntVar(&watchPutRate, "put-rate", 0, "Number of keys to put per second") + watchCmd.Flags().IntVar(&watchPutTotal, "put-total", 1000, "Number of put requests") watchCmd.Flags().IntVar(&watchKeySize, "key-size", 32, "Key size of watch request") watchCmd.Flags().IntVar(&watchKeySpaceSize, "key-space-size", 1, "Maximum possible keys") @@ -88,124 +93,155 @@ func watchFunc(cmd *cobra.Command, args []string) { fmt.Fprintf(os.Stderr, "expected positive --key-space-size, got (%v)", watchKeySpaceSize) os.Exit(1) } - - watched := make([]string, watchedKeyTotal) - numWatchers := make(map[string]int) - for i := range watched { - k := make([]byte, watchKeySize) - if watchSeqKeys { - binary.PutVarint(k, int64(i%watchKeySpaceSize)) - } else { - binary.PutVarint(k, int64(rand.Intn(watchKeySpaceSize))) - } - watched[i] = string(k) + grpcConns := int(totalClients) + if totalClients > totalConns { + grpcConns = int(totalConns) + } + wantedConns := 1 + (watchStreams / 100) + if grpcConns < wantedConns { + fmt.Fprintf(os.Stderr, "warning: grpc limits 100 streams per client connection, have %d but need %d\n", grpcConns, wantedConns) } - - requests := make(chan string, totalClients) - clients := mustCreateClients(totalClients, totalConns) + wk := newWatchedKeys() + benchMakeWatches(clients, wk) + benchPutWatches(clients, wk) +} - streams := make([]v3.Watcher, watchTotalStreams) +func benchMakeWatches(clients []*clientv3.Client, wk *watchedKeys) { + streams := make([]clientv3.Watcher, watchStreams) for i := range streams { - streams[i] = v3.NewWatcher(clients[i%len(clients)]) + streams[i] = clientv3.NewWatcher(clients[i%len(clients)]) } - // watching phase - bar = pb.New(watchTotal) + keyc := make(chan string, watchStreams) + bar = pb.New(watchStreams * watchWatchesPerStream) bar.Format("Bom !") bar.Start() - atomic.StoreInt32(&nrWatchCompleted, int32(0)) - watchCompletedNotifier = make(chan struct{}) - - r := report.NewReportRate("%4.4f") - for i := range streams { - go doWatch(streams[i], requests, r.Results()) + r := newReport() + rch := r.Results() + + wg.Add(len(streams) + 1) + wc := make(chan []clientv3.WatchChan, len(streams)) + for _, s := range streams { + go func(s clientv3.Watcher) { + defer wg.Done() + var ws []clientv3.WatchChan + for i := 0; i < watchWatchesPerStream; i++ { + k := <-keyc + st := time.Now() + wch := s.Watch(wk.ctx, k) + rch <- report.Result{Start: st, End: time.Now()} + ws = append(ws, wch) + bar.Increment() + } + wc <- ws + }(s) } - go func() { - for i := 0; i < watchTotal; i++ { - key := watched[i%len(watched)] - requests <- key - numWatchers[key]++ + defer func() { + close(keyc) + wg.Done() + }() + for i := 0; i < watchStreams*watchWatchesPerStream; i++ { + key := wk.watched[i%len(wk.watched)] + keyc <- key + wk.numWatchers[key]++ } - close(requests) }() rc := r.Run() - <-watchCompletedNotifier + wg.Wait() bar.Finish() close(r.Results()) fmt.Printf("Watch creation summary:\n%s", <-rc) - // put phase - eventsTotal = 0 + for i := 0; i < len(streams); i++ { + wk.watches = append(wk.watches, (<-wc)...) + } +} + +func newWatchedKeys() *watchedKeys { + watched := make([]string, watchedKeyTotal) + for i := range watched { + k := make([]byte, watchKeySize) + if watchSeqKeys { + binary.PutVarint(k, int64(i%watchKeySpaceSize)) + } else { + binary.PutVarint(k, int64(rand.Intn(watchKeySpaceSize))) + } + watched[i] = string(k) + } + ctx, cancel := context.WithCancel(context.TODO()) + return &watchedKeys{ + watched: watched, + numWatchers: make(map[string]int), + ctx: ctx, + cancel: cancel, + } +} + +func benchPutWatches(clients []*clientv3.Client, wk *watchedKeys) { + eventsTotal := 0 for i := 0; i < watchPutTotal; i++ { - eventsTotal += numWatchers[watched[i%len(watched)]] + eventsTotal += wk.numWatchers[wk.watched[i%len(wk.watched)]] } bar = pb.New(eventsTotal) bar.Format("Bom !") bar.Start() - atomic.StoreInt32(&nrRecvCompleted, 0) - recvCompletedNotifier = make(chan struct{}) - putreqc := make(chan v3.Op) + r := newReport() - r = report.NewReportRate("%4.4f") - for i := 0; i < watchPutTotal; i++ { - go func(c *v3.Client) { - for op := range putreqc { - if _, err := c.Do(context.TODO(), op); err != nil { - fmt.Fprintf(os.Stderr, "failed to Put for watch benchmark: %v\n", err) - os.Exit(1) - } - } - }(clients[i%len(clients)]) + wg.Add(len(wk.watches)) + nrRxed := int32(eventsTotal) + for _, w := range wk.watches { + go func(wc clientv3.WatchChan) { + defer wg.Done() + recvWatchChan(wc, r.Results(), &nrRxed) + wk.cancel() + }(w) } + putreqc := make(chan clientv3.Op, len(clients)) go func() { + defer close(putreqc) for i := 0; i < watchPutTotal; i++ { - putreqc <- v3.OpPut(watched[i%(len(watched))], "data") - // TODO: use a real rate-limiter instead of sleep. - time.Sleep(time.Second / time.Duration(watchPutRate)) + putreqc <- clientv3.OpPut(wk.watched[i%(len(wk.watched))], "data") } - close(putreqc) }() - rc = r.Run() - <-recvCompletedNotifier + limit := rate.NewLimiter(rate.Limit(watchPutRate), 1) + for _, cc := range clients { + go func(c *clientv3.Client) { + for op := range putreqc { + if err := limit.Wait(context.TODO()); err != nil { + panic(err) + } + if _, err := c.Do(context.TODO(), op); err != nil { + panic(err) + } + } + }(cc) + } + + rc := r.Run() + wg.Wait() bar.Finish() close(r.Results()) fmt.Printf("Watch events received summary:\n%s", <-rc) -} -func doWatch(stream v3.Watcher, requests <-chan string, results chan<- report.Result) { - for r := range requests { - st := time.Now() - wch := stream.Watch(context.TODO(), r) - results <- report.Result{Start: st, End: time.Now()} - bar.Increment() - go recvWatchChan(wch, results) - } - atomic.AddInt32(&nrWatchCompleted, 1) - if atomic.LoadInt32(&nrWatchCompleted) == int32(watchTotalStreams) { - watchCompletedNotifier <- struct{}{} - } } -func recvWatchChan(wch v3.WatchChan, results chan<- report.Result) { +func recvWatchChan(wch clientv3.WatchChan, results chan<- report.Result, nrRxed *int32) { for r := range wch { st := time.Now() for range r.Events { results <- report.Result{Start: st, End: time.Now()} bar.Increment() - atomic.AddInt32(&nrRecvCompleted, 1) - } - - if atomic.LoadInt32(&nrRecvCompleted) == int32(eventsTotal) { - recvCompletedNotifier <- struct{}{} - break + if atomic.AddInt32(nrRxed, -1) <= 0 { + return + } } } } diff --git a/github.com/coreos/etcd/tools/benchmark/cmd/watch_latency.go b/github.com/coreos/etcd/tools/benchmark/cmd/watch_latency.go index 0f1f5db52a..3a070d2600 100644 --- a/github.com/coreos/etcd/tools/benchmark/cmd/watch_latency.go +++ b/github.com/coreos/etcd/tools/benchmark/cmd/watch_latency.go @@ -17,9 +17,10 @@ package cmd import ( "fmt" "os" + "sync" "time" - v3 "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/pkg/report" "github.com/spf13/cobra" @@ -47,19 +48,23 @@ var ( func init() { RootCmd.AddCommand(watchLatencyCmd) - watchLatencyCmd.Flags().IntVar(&watchLTotal, "total", 10000, "Total number of watch responses.") + watchLatencyCmd.Flags().IntVar(&watchLTotal, "total", 10000, "Total number of put requests") watchLatencyCmd.Flags().IntVar(&watchLPutRate, "put-rate", 100, "Number of keys to put per second") - watchLatencyCmd.Flags().IntVar(&watchLKeySize, "key-size", 32, "Key size of watch request") - watchLatencyCmd.Flags().IntVar(&watchLValueSize, "val-size", 32, "Val size of watch request") + watchLatencyCmd.Flags().IntVar(&watchLKeySize, "key-size", 32, "Key size of watch response") + watchLatencyCmd.Flags().IntVar(&watchLValueSize, "val-size", 32, "Value size of watch response") } func watchLatencyFunc(cmd *cobra.Command, args []string) { key := string(mustRandBytes(watchLKeySize)) value := string(mustRandBytes(watchLValueSize)) - client := mustCreateConn() - stream := v3.NewWatcher(client) - wch := stream.Watch(context.TODO(), key) + clients := mustCreateClients(totalClients, totalConns) + putClient := mustCreateConn() + + wchs := make([]clientv3.WatchChan, len(clients)) + for i := range wchs { + wchs[i] = clients[i].Watch(context.TODO(), key) + } bar = pb.New(watchLTotal) bar.Format("Bom !") @@ -74,15 +79,29 @@ func watchLatencyFunc(cmd *cobra.Command, args []string) { if err := limiter.Wait(context.TODO()); err != nil { break } - _, err := client.Put(context.TODO(), string(key), value) - if err != nil { + var st time.Time + var wg sync.WaitGroup + wg.Add(len(clients)) + barrierc := make(chan struct{}) + for _, wch := range wchs { + ch := wch + go func() { + <-barrierc + <-ch + r.Results() <- report.Result{Start: st, End: time.Now()} + wg.Done() + }() + } + + if _, err := putClient.Put(context.TODO(), key, value); err != nil { fmt.Fprintf(os.Stderr, "Failed to Put for watch latency benchmark: %v\n", err) os.Exit(1) } - st := time.Now() - <-wch - r.Results() <- report.Result{Err: err, Start: st, End: time.Now()} + + st = time.Now() + close(barrierc) + wg.Wait() bar.Increment() } diff --git a/github.com/coreos/etcd/tools/etcd-dump-db/backend.go b/github.com/coreos/etcd/tools/etcd-dump-db/backend.go index 5a509586f3..618d811493 100644 --- a/github.com/coreos/etcd/tools/etcd-dump-db/backend.go +++ b/github.com/coreos/etcd/tools/etcd-dump-db/backend.go @@ -18,7 +18,7 @@ import ( "fmt" "path/filepath" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" "github.com/coreos/etcd/mvcc" "github.com/coreos/etcd/mvcc/backend" ) diff --git a/github.com/coreos/etcd/tools/functional-tester/build b/github.com/coreos/etcd/tools/functional-tester/build index 0786f73466..ef1168202e 100755 --- a/github.com/coreos/etcd/tools/functional-tester/build +++ b/github.com/coreos/etcd/tools/functional-tester/build @@ -7,4 +7,5 @@ fi CGO_ENABLED=0 go build -a -installsuffix cgo -ldflags "-s" -o bin/etcd-agent ./cmd/tools/functional-tester/etcd-agent CGO_ENABLED=0 go build -a -installsuffix cgo -ldflags "-s" -o bin/etcd-tester ./cmd/tools/functional-tester/etcd-tester +CGO_ENABLED=0 go build -a -installsuffix cgo -ldflags "-s" -o bin/etcd-runner ./cmd/tools/functional-tester/etcd-runner diff --git a/github.com/coreos/etcd/tools/functional-tester/etcd-agent/agent.go b/github.com/coreos/etcd/tools/functional-tester/etcd-agent/agent.go index faa067f450..b7be290967 100644 --- a/github.com/coreos/etcd/tools/functional-tester/etcd-agent/agent.go +++ b/github.com/coreos/etcd/tools/functional-tester/etcd-agent/agent.go @@ -15,7 +15,6 @@ package main import ( - "fmt" "os" "os/exec" "path/filepath" @@ -75,6 +74,7 @@ func newAgent(cfg AgentConfig) (*Agent, error) { // start starts a new etcd process with the given args. func (a *Agent) start(args ...string) error { + args = append(args, "--data-dir", a.dataDir()) a.cmd = exec.Command(a.cmd.Path, args...) a.cmd.Env = []string{"GOFAIL_HTTP=" + a.cfg.FailpointAddr} a.cmd.Stdout = a.logfile @@ -206,16 +206,7 @@ func (a *Agent) status() client.Status { } func (a *Agent) dataDir() string { - datadir := filepath.Join(a.cmd.Path, "*.etcd") - args := a.cmd.Args - // only parse the simple case like "--data-dir /var/lib/etcd" - for i, arg := range args { - if arg == "--data-dir" { - datadir = args[i+1] - break - } - } - return datadir + return filepath.Join(a.cfg.LogDir, "etcd.data") } func existDir(fpath string) bool { @@ -231,14 +222,14 @@ func existDir(fpath string) bool { } func archiveLogAndDataDir(logDir string, datadir string) error { - dir := filepath.Join("failure_archive", fmt.Sprint(time.Now().Format(time.RFC3339))) + dir := filepath.Join(logDir, "failure_archive", time.Now().Format(time.RFC3339)) if existDir(dir) { - dir = filepath.Join("failure_archive", fmt.Sprint(time.Now().Add(time.Second).Format(time.RFC3339))) + dir = filepath.Join(logDir, "failure_archive", time.Now().Add(time.Second).Format(time.RFC3339)) } if err := fileutil.TouchDirAll(dir); err != nil { return err } - if err := os.Rename(logDir, filepath.Join(dir, filepath.Base(logDir))); err != nil { + if err := os.Rename(filepath.Join(logDir, "etcd.log"), filepath.Join(dir, "etcd.log")); err != nil { if !os.IsNotExist(err) { return err } diff --git a/github.com/coreos/etcd/tools/functional-tester/etcd-agent/main.go b/github.com/coreos/etcd/tools/functional-tester/etcd-agent/main.go index 004b959b33..901750d8d6 100644 --- a/github.com/coreos/etcd/tools/functional-tester/etcd-agent/main.go +++ b/github.com/coreos/etcd/tools/functional-tester/etcd-agent/main.go @@ -27,7 +27,7 @@ var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcd-agent") func main() { etcdPath := flag.String("etcd-path", filepath.Join(os.Getenv("GOPATH"), "bin/etcd"), "the path to etcd binary") - etcdLogDir := flag.String("etcd-log-dir", "etcd-log", "directory to store etcd logs") + etcdLogDir := flag.String("etcd-log-dir", "etcd-log", "directory to store etcd logs, data directories, failure archive") port := flag.String("port", ":9027", "port to serve agent server") useRoot := flag.Bool("use-root", true, "use root permissions") failpointAddr := flag.String("failpoint-addr", ":2381", "interface for gofail's HTTP server") diff --git a/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/election_command.go b/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/election_command.go index 2dd60de5e0..174670b80c 100644 --- a/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/election_command.go +++ b/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/election_command.go @@ -20,39 +20,44 @@ import ( "fmt" "github.com/coreos/etcd/clientv3/concurrency" + "github.com/spf13/cobra" ) // NewElectionCommand returns the cobra command for "election runner". func NewElectionCommand() *cobra.Command { cmd := &cobra.Command{ - Use: "election", + Use: "election [election name (defaults to 'elector')]", Short: "Performs election operation", Run: runElectionFunc, } - cmd.Flags().IntVar(&rounds, "rounds", 100, "number of rounds to run") cmd.Flags().IntVar(&totalClientConnections, "total-client-connections", 10, "total number of client connections") return cmd } func runElectionFunc(cmd *cobra.Command, args []string) { - if len(args) > 0 { - ExitWithError(ExitBadArgs, errors.New("election does not take any argument")) + election := "elector" + if len(args) == 1 { + election = args[0] } - - rcs := make([]roundClient, totalClientConnections) - validatec, releasec := make(chan struct{}, len(rcs)), make(chan struct{}, len(rcs)) - for range rcs { - releasec <- struct{}{} + if len(args) > 1 { + ExitWithError(ExitBadArgs, errors.New("election takes at most one argument")) } + rcs := make([]roundClient, totalClientConnections) + validatec := make(chan struct{}, len(rcs)) + // nextc closes when election is ready for next round. + nextc := make(chan struct{}) eps := endpointsFromFlag(cmd) - dialTimeout := dialTimeoutFromCmd(cmd) for i := range rcs { v := fmt.Sprintf("%d", i) observedLeader := "" validateWaiters := 0 + var rcNextc chan struct{} + setRcNextc := func() { + rcNextc = nextc + } rcs[i].c = newClient(eps, dialTimeout) var ( @@ -65,20 +70,26 @@ func runElectionFunc(cmd *cobra.Command, args []string) { break } } - e := concurrency.NewElection(s, "electors") - rcs[i].acquire = func() error { - <-releasec + e := concurrency.NewElection(s, election) + rcs[i].acquire = func() (err error) { ctx, cancel := context.WithCancel(context.Background()) + donec := make(chan struct{}) go func() { - if ol, ok := <-e.Observe(ctx); ok { - observedLeader = string(ol.Kvs[0].Value) - if observedLeader != v { - cancel() + defer close(donec) + for ctx.Err() == nil { + if ol, ok := <-e.Observe(ctx); ok { + observedLeader = string(ol.Kvs[0].Value) + break } } + if observedLeader != v { + cancel() + } }() err = e.Campaign(ctx, v) + cancel() + <-donec if err == nil { observedLeader = v } @@ -89,14 +100,18 @@ func runElectionFunc(cmd *cobra.Command, args []string) { case <-ctx.Done(): return nil default: - cancel() return err } } rcs[i].validate = func() error { - if l, err := e.Leader(context.TODO()); err == nil && string(l.Kvs[0].Value) != observedLeader { - return fmt.Errorf("expected leader %q, got %q", observedLeader, l) + l, err := e.Leader(context.TODO()) + if err == nil && string(l.Kvs[0].Value) != observedLeader { + return fmt.Errorf("expected leader %q, got %q", observedLeader, l.Kvs[0].Value) } + if err != nil { + return err + } + setRcNextc() validatec <- struct{}{} return nil } @@ -113,14 +128,17 @@ func runElectionFunc(cmd *cobra.Command, args []string) { return err } if observedLeader == v { - for range rcs { - releasec <- struct{}{} - } + oldNextc := nextc + nextc = make(chan struct{}) + close(oldNextc) + } + <-rcNextc observedLeader = "" return nil } } - - doRounds(rcs, rounds) + // each client creates 1 key from Campaign() and delete it from Resign() + // a round involves in 2*len(rcs) requests. + doRounds(rcs, rounds, 2*len(rcs)) } diff --git a/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/global.go b/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/global.go index 38d6de38c8..02ae92dc27 100644 --- a/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/global.go +++ b/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/global.go @@ -15,6 +15,7 @@ package command import ( + "context" "fmt" "log" "sync" @@ -23,25 +24,18 @@ import ( "github.com/coreos/etcd/clientv3" "github.com/spf13/cobra" + "golang.org/x/time/rate" ) +// shared flags var ( - rounds int // total number of rounds the operation needs to be performed - totalClientConnections int // total number of client connections to be made with server - noOfPrefixes int // total number of prefixes which will be watched upon - watchPerPrefix int // number of watchers per prefix - reqRate int // put request per second - totalKeys int // total number of keys for operation - runningTime time.Duration // time for which operation should be performed + totalClientConnections int // total number of client connections to be made with server + endpoints []string + dialTimeout time.Duration + rounds int // total number of rounds to run; set to <= 0 to run forever. + reqRate int // maximum number of requests per second. ) -// GlobalFlags are flags that defined globally -// and are inherited to all sub-commands. -type GlobalFlags struct { - Endpoints []string - DialTimeout time.Duration -} - type roundClient struct { c *clientv3.Client progress int @@ -61,41 +55,39 @@ func newClient(eps []string, timeout time.Duration) *clientv3.Client { return c } -func doRounds(rcs []roundClient, rounds int) { - var mu sync.Mutex +func doRounds(rcs []roundClient, rounds int, requests int) { var wg sync.WaitGroup wg.Add(len(rcs)) finished := make(chan struct{}) + limiter := rate.NewLimiter(rate.Limit(reqRate), reqRate) for i := range rcs { go func(rc *roundClient) { defer wg.Done() - for rc.progress < rounds { + for rc.progress < rounds || rounds <= 0 { + if err := limiter.WaitN(context.Background(), requests/len(rcs)); err != nil { + log.Panicf("rate limiter error %v", err) + } + for rc.acquire() != nil { /* spin */ } - mu.Lock() if err := rc.validate(); err != nil { log.Fatal(err) } - mu.Unlock() time.Sleep(10 * time.Millisecond) rc.progress++ finished <- struct{}{} - mu.Lock() - for rc.release() != nil { - mu.Unlock() - mu.Lock() + for rc.release() != nil { /* spin */ } - mu.Unlock() } }(&rcs[i]) } start := time.Now() - for i := 1; i < len(rcs)*rounds+1; i++ { + for i := 1; i < len(rcs)*rounds+1 || rounds <= 0; i++ { select { case <-finished: if i%100 == 0 { @@ -120,11 +112,3 @@ func endpointsFromFlag(cmd *cobra.Command) []string { } return endpoints } - -func dialTimeoutFromCmd(cmd *cobra.Command) time.Duration { - dialTimeout, err := cmd.Flags().GetDuration("dial-timeout") - if err != nil { - ExitWithError(ExitError, err) - } - return dialTimeout -} diff --git a/github.com/coreos/etcd/tools/functional-tester/etcd-runner/help.go b/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/help.go similarity index 99% rename from github.com/coreos/etcd/tools/functional-tester/etcd-runner/help.go rename to github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/help.go index c68f8de380..e7d7a4e890 100644 --- a/github.com/coreos/etcd/tools/functional-tester/etcd-runner/help.go +++ b/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/help.go @@ -14,7 +14,7 @@ // copied from https://github.com/rkt/rkt/blob/master/rkt/help.go -package main +package command import ( "bytes" diff --git a/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/lease_renewer_command.go b/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/lease_renewer_command.go index e5257d4301..1e95958ce2 100644 --- a/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/lease_renewer_command.go +++ b/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/lease_renewer_command.go @@ -22,11 +22,16 @@ import ( "time" "github.com/coreos/etcd/clientv3" + "github.com/spf13/cobra" "google.golang.org/grpc" "google.golang.org/grpc/codes" ) +var ( + leaseTTL int64 +) + // NewLeaseRenewerCommand returns the cobra command for "lease-renewer runner". func NewLeaseRenewerCommand() *cobra.Command { cmd := &cobra.Command{ @@ -34,6 +39,7 @@ func NewLeaseRenewerCommand() *cobra.Command { Short: "Performs lease renew operation", Run: runLeaseRenewerFunc, } + cmd.Flags().Int64Var(&leaseTTL, "ttl", 5, "lease's ttl") return cmd } @@ -43,7 +49,6 @@ func runLeaseRenewerFunc(cmd *cobra.Command, args []string) { } eps := endpointsFromFlag(cmd) - dialTimeout := dialTimeoutFromCmd(cmd) c := newClient(eps, dialTimeout) ctx := context.Background() @@ -54,7 +59,7 @@ func runLeaseRenewerFunc(cmd *cobra.Command, args []string) { err error ) for { - l, err = c.Lease.Grant(ctx, 5) + l, err = c.Lease.Grant(ctx, leaseTTL) if err == nil { break } @@ -65,14 +70,14 @@ func runLeaseRenewerFunc(cmd *cobra.Command, args []string) { lk, err = c.Lease.KeepAliveOnce(ctx, l.ID) if grpc.Code(err) == codes.NotFound { if time.Since(expire) < 0 { - log.Printf("bad renew! exceeded: %v", time.Since(expire)) + log.Fatalf("bad renew! exceeded: %v", time.Since(expire)) for { lk, err = c.Lease.KeepAliveOnce(ctx, l.ID) fmt.Println(lk, err) time.Sleep(time.Second) } } - log.Printf("lost lease %d, expire: %v\n", l.ID, expire) + log.Fatalf("lost lease %d, expire: %v\n", l.ID, expire) break } if err != nil { diff --git a/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/lock_racer_command.go b/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/lock_racer_command.go index d9544263ae..6cd36d50b1 100644 --- a/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/lock_racer_command.go +++ b/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/lock_racer_command.go @@ -18,34 +18,41 @@ import ( "context" "errors" "fmt" + "sync" "github.com/coreos/etcd/clientv3/concurrency" + "github.com/spf13/cobra" ) // NewLockRacerCommand returns the cobra command for "lock-racer runner". func NewLockRacerCommand() *cobra.Command { cmd := &cobra.Command{ - Use: "lock-racer", + Use: "lock-racer [name of lock (defaults to 'racers')]", Short: "Performs lock race operation", Run: runRacerFunc, } - cmd.Flags().IntVar(&rounds, "rounds", 100, "number of rounds to run") cmd.Flags().IntVar(&totalClientConnections, "total-client-connections", 10, "total number of client connections") return cmd } func runRacerFunc(cmd *cobra.Command, args []string) { - if len(args) > 0 { - ExitWithError(ExitBadArgs, errors.New("lock-racer does not take any argument")) + racers := "racers" + if len(args) == 1 { + racers = args[0] + } + + if len(args) > 1 { + ExitWithError(ExitBadArgs, errors.New("lock-racer takes at most one argument")) } rcs := make([]roundClient, totalClientConnections) ctx := context.Background() + // mu ensures validate and release funcs are atomic. + var mu sync.Mutex cnt := 0 eps := endpointsFromFlag(cmd) - dialTimeout := dialTimeoutFromCmd(cmd) for i := range rcs { var ( @@ -61,15 +68,19 @@ func runRacerFunc(cmd *cobra.Command, args []string) { break } } - m := concurrency.NewMutex(s, "racers") + m := concurrency.NewMutex(s, racers) rcs[i].acquire = func() error { return m.Lock(ctx) } rcs[i].validate = func() error { + mu.Lock() + defer mu.Unlock() if cnt++; cnt != 1 { return fmt.Errorf("bad lock; count: %d", cnt) } return nil } rcs[i].release = func() error { + mu.Lock() + defer mu.Unlock() if err := m.Unlock(ctx); err != nil { return err } @@ -77,5 +88,7 @@ func runRacerFunc(cmd *cobra.Command, args []string) { return nil } } - doRounds(rcs, rounds) + // each client creates 1 key from NewMutex() and delete it from Unlock() + // a round involves in 2*len(rcs) requests. + doRounds(rcs, rounds, 2*len(rcs)) } diff --git a/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/root.go b/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/root.go new file mode 100644 index 0000000000..cc4347b886 --- /dev/null +++ b/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/root.go @@ -0,0 +1,70 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package command implements individual etcd-runner commands for the etcd-runner utility. +package command + +import ( + "log" + "math/rand" + "time" + + "github.com/spf13/cobra" +) + +const ( + cliName = "etcd-runner" + cliDescription = "Stress tests using clientv3 functionality.." + + defaultDialTimeout = 2 * time.Second +) + +var ( + rootCmd = &cobra.Command{ + Use: cliName, + Short: cliDescription, + SuggestFor: []string{"etcd-runner"}, + } +) + +func init() { + cobra.EnablePrefixMatching = true + + rand.Seed(time.Now().UnixNano()) + + log.SetFlags(log.Lmicroseconds) + + rootCmd.PersistentFlags().StringSliceVar(&endpoints, "endpoints", []string{"127.0.0.1:2379"}, "gRPC endpoints") + rootCmd.PersistentFlags().DurationVar(&dialTimeout, "dial-timeout", defaultDialTimeout, "dial timeout for client connections") + rootCmd.PersistentFlags().IntVar(&reqRate, "req-rate", 30, "maximum number of requests per second") + rootCmd.PersistentFlags().IntVar(&rounds, "rounds", 100, "number of rounds to run; 0 to run forever") + + rootCmd.AddCommand( + NewElectionCommand(), + NewLeaseRenewerCommand(), + NewLockRacerCommand(), + NewWatchCommand(), + ) +} + +func Start() { + rootCmd.SetUsageFunc(usageFunc) + + // Make help just show the usage + rootCmd.SetHelpTemplate(`{{.UsageString}}`) + + if err := rootCmd.Execute(); err != nil { + ExitWithError(ExitError, err) + } +} diff --git a/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/watch_command.go b/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/watch_command.go index fe9ab279be..c74bef397e 100644 --- a/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/watch_command.go +++ b/github.com/coreos/etcd/tools/functional-tester/etcd-runner/command/watch_command.go @@ -24,10 +24,19 @@ import ( "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/pkg/stringutil" + "github.com/spf13/cobra" "golang.org/x/time/rate" ) +var ( + runningTime time.Duration // time for which operation should be performed + noOfPrefixes int // total number of prefixes which will be watched upon + watchPerPrefix int // number of watchers per prefix + watchPrefix string // prefix append to keys in watcher + totalKeys int // total number of keys for operation +) + // NewWatchCommand returns the cobra command for "watcher runner". func NewWatchCommand() *cobra.Command { cmd := &cobra.Command{ @@ -35,12 +44,12 @@ func NewWatchCommand() *cobra.Command { Short: "Performs watch operation", Run: runWatcherFunc, } - cmd.Flags().IntVar(&rounds, "rounds", 100, "number of rounds to run") cmd.Flags().DurationVar(&runningTime, "running-time", 60, "number of seconds to run") + cmd.Flags().StringVar(&watchPrefix, "prefix", "", "the prefix to append on all keys") cmd.Flags().IntVar(&noOfPrefixes, "total-prefixes", 10, "total no of prefixes to use") cmd.Flags().IntVar(&watchPerPrefix, "watch-per-prefix", 10, "number of watchers per prefix") - cmd.Flags().IntVar(&reqRate, "req-rate", 30, "rate at which put request will be performed") cmd.Flags().IntVar(&totalKeys, "total-keys", 1000, "total number of keys to watch") + return cmd } @@ -50,7 +59,7 @@ func runWatcherFunc(cmd *cobra.Command, args []string) { } ctx := context.Background() - for round := 0; round < rounds; round++ { + for round := 0; round < rounds || rounds <= 0; round++ { fmt.Println("round", round) performWatchOnPrefixes(ctx, cmd, round) } @@ -64,7 +73,6 @@ func performWatchOnPrefixes(ctx context.Context, cmd *cobra.Command, round int) roundPrefix := fmt.Sprintf("%16x", round) eps := endpointsFromFlag(cmd) - dialTimeout := dialTimeoutFromCmd(cmd) var ( revision int64 @@ -94,7 +102,7 @@ func performWatchOnPrefixes(ctx context.Context, cmd *cobra.Command, round int) if err = limiter.Wait(ctxt); err != nil { return } - if err = putKeyAtMostOnce(ctxt, client, roundPrefix+"-"+prefix+"-"+key); err != nil { + if err = putKeyAtMostOnce(ctxt, client, watchPrefix+"-"+roundPrefix+"-"+prefix+"-"+key); err != nil { log.Fatalf("failed to put key: %v", err) return } @@ -112,15 +120,15 @@ func performWatchOnPrefixes(ctx context.Context, cmd *cobra.Command, round int) rc := newClient(eps, dialTimeout) rcs = append(rcs, rc) - watchPrefix := roundPrefix + "-" + prefix + wprefix := watchPrefix + "-" + roundPrefix + "-" + prefix - wc := rc.Watch(ctxc, watchPrefix, clientv3.WithPrefix(), clientv3.WithRev(revision)) + wc := rc.Watch(ctxc, wprefix, clientv3.WithPrefix(), clientv3.WithRev(revision)) wcs = append(wcs, wc) wg.Add(1) go func() { defer wg.Done() - checkWatchResponse(wc, watchPrefix, keys) + checkWatchResponse(wc, wprefix, keys) }() } } @@ -139,7 +147,7 @@ func performWatchOnPrefixes(ctx context.Context, cmd *cobra.Command, round int) rc.Close() } - if err = deletePrefix(ctx, client, roundPrefix); err != nil { + if err = deletePrefix(ctx, client, watchPrefix); err != nil { log.Fatalf("failed to clean up keys after test: %v", err) } } @@ -148,7 +156,7 @@ func checkWatchResponse(wc clientv3.WatchChan, prefix string, keys []string) { for n := 0; n < len(keys); { wr, more := <-wc if !more { - log.Fatalf("expect more keys (received %d/%d) for %s", len(keys), n, prefix) + log.Fatalf("expect more keys (received %d/%d) for %s", n, len(keys), prefix) } for _, event := range wr.Events { expectedKey := prefix + "-" + keys[n] diff --git a/github.com/coreos/etcd/tools/functional-tester/etcd-runner/main.go b/github.com/coreos/etcd/tools/functional-tester/etcd-runner/main.go index 82dbc309f3..04fede098a 100644 --- a/github.com/coreos/etcd/tools/functional-tester/etcd-runner/main.go +++ b/github.com/coreos/etcd/tools/functional-tester/etcd-runner/main.go @@ -15,61 +15,8 @@ // etcd-runner is a command line application that performs tests on etcd. package main -import ( - "log" - "time" - - "github.com/coreos/etcd/tools/functional-tester/etcd-runner/command" - "github.com/spf13/cobra" -) - -const ( - cliName = "etcd-runner" - cliDescription = "Stress tests using clientv3 functionality.." - - defaultDialTimeout = 2 * time.Second -) - -var ( - globalFlags = command.GlobalFlags{} -) - -var ( - rootCmd = &cobra.Command{ - Use: cliName, - Short: cliDescription, - SuggestFor: []string{"etcd-runner"}, - } -) - -func init() { - log.SetFlags(log.Lmicroseconds) - rootCmd.PersistentFlags().StringSliceVar(&globalFlags.Endpoints, "endpoints", []string{"127.0.0.1:2379"}, "gRPC endpoints") - rootCmd.PersistentFlags().DurationVar(&globalFlags.DialTimeout, "dial-timeout", defaultDialTimeout, "dial timeout for client connections") - - rootCmd.AddCommand( - command.NewElectionCommand(), - command.NewLeaseRenewerCommand(), - command.NewLockRacerCommand(), - command.NewWatchCommand(), - ) -} - -func init() { - cobra.EnablePrefixMatching = true -} - -func Start() { - rootCmd.SetUsageFunc(usageFunc) - - // Make help just show the usage - rootCmd.SetHelpTemplate(`{{.UsageString}}`) - - if err := rootCmd.Execute(); err != nil { - command.ExitWithError(command.ExitError, err) - } -} +import "github.com/coreos/etcd/tools/functional-tester/etcd-runner/command" func main() { - Start() + command.Start() } diff --git a/github.com/coreos/etcd/tools/functional-tester/etcd-tester/checks.go b/github.com/coreos/etcd/tools/functional-tester/etcd-tester/checks.go index 7cff67c57c..f3c5de9b46 100644 --- a/github.com/coreos/etcd/tools/functional-tester/etcd-tester/checks.go +++ b/github.com/coreos/etcd/tools/functional-tester/etcd-tester/checks.go @@ -245,6 +245,19 @@ func (cchecker *compositeChecker) Check() error { return errsToError(errs) } +type runnerChecker struct { + errc chan error +} + +func (rc *runnerChecker) Check() error { + select { + case err := <-rc.errc: + return err + default: + return nil + } +} + type noChecker struct{} func newNoChecker() Checker { return &noChecker{} } diff --git a/github.com/coreos/etcd/tools/functional-tester/etcd-tester/cluster.go b/github.com/coreos/etcd/tools/functional-tester/etcd-tester/cluster.go index 3a03244e24..61f36f0c99 100644 --- a/github.com/coreos/etcd/tools/functional-tester/etcd-tester/cluster.go +++ b/github.com/coreos/etcd/tools/functional-tester/etcd-tester/cluster.go @@ -34,8 +34,6 @@ type agentConfig struct { clientPort int peerPort int failpointPort int - - datadir string } type cluster struct { @@ -78,7 +76,6 @@ func (c *cluster) bootstrap() error { for i, m := range members { flags := append( m.Flags(), - "--data-dir", c.agents[i].datadir, "--initial-cluster-token", token, "--initial-cluster", clusterStr, "--snapshot-count", "10000") diff --git a/github.com/coreos/etcd/tools/functional-tester/etcd-tester/etcd_runner_stresser.go b/github.com/coreos/etcd/tools/functional-tester/etcd-tester/etcd_runner_stresser.go new file mode 100644 index 0000000000..23636bf5a0 --- /dev/null +++ b/github.com/coreos/etcd/tools/functional-tester/etcd-tester/etcd_runner_stresser.go @@ -0,0 +1,97 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "fmt" + "io/ioutil" + "os/exec" + "syscall" + + "golang.org/x/time/rate" +) + +type runnerStresser struct { + cmd *exec.Cmd + cmdStr string + args []string + rl *rate.Limiter + reqRate int + + errc chan error + donec chan struct{} +} + +func newRunnerStresser(cmdStr string, args []string, rl *rate.Limiter, reqRate int) *runnerStresser { + rl.SetLimit(rl.Limit() - rate.Limit(reqRate)) + return &runnerStresser{ + cmdStr: cmdStr, + args: args, + rl: rl, + reqRate: reqRate, + errc: make(chan error, 1), + donec: make(chan struct{}), + } +} + +func (rs *runnerStresser) setupOnce() (err error) { + if rs.cmd != nil { + return nil + } + + rs.cmd = exec.Command(rs.cmdStr, rs.args...) + stderr, err := rs.cmd.StderrPipe() + if err != nil { + return err + } + + go func() { + defer close(rs.donec) + out, err := ioutil.ReadAll(stderr) + if err != nil { + rs.errc <- err + } else { + rs.errc <- fmt.Errorf("(%v %v) stderr %v", rs.cmdStr, rs.args, string(out)) + } + }() + + return rs.cmd.Start() +} + +func (rs *runnerStresser) Stress() (err error) { + if err = rs.setupOnce(); err != nil { + return err + } + return syscall.Kill(rs.cmd.Process.Pid, syscall.SIGCONT) +} + +func (rs *runnerStresser) Pause() { + syscall.Kill(rs.cmd.Process.Pid, syscall.SIGSTOP) +} + +func (rs *runnerStresser) Close() { + syscall.Kill(rs.cmd.Process.Pid, syscall.SIGINT) + rs.cmd.Wait() + <-rs.donec + rs.rl.SetLimit(rs.rl.Limit() + rate.Limit(rs.reqRate)) +} + +func (rs *runnerStresser) ModifiedKeys() int64 { + return 1 +} + +func (rs *runnerStresser) Checker() Checker { + return &runnerChecker{rs.errc} +} diff --git a/github.com/coreos/etcd/tools/functional-tester/etcd-tester/failpoint.go b/github.com/coreos/etcd/tools/functional-tester/etcd-tester/failpoint.go index 1080dbeceb..bfb9374364 100644 --- a/github.com/coreos/etcd/tools/functional-tester/etcd-tester/failpoint.go +++ b/github.com/coreos/etcd/tools/functional-tester/etcd-tester/failpoint.go @@ -32,7 +32,7 @@ type failpointStats struct { var fpStats failpointStats -func failpointFailures(c *cluster) (ret []failure, err error) { +func failpointFailures(c *cluster, failpoints []string) (ret []failure, err error) { var fps []string fps, err = failpointPaths(c.Members[0].FailpointURL) if err != nil { @@ -43,7 +43,7 @@ func failpointFailures(c *cluster) (ret []failure, err error) { if len(fp) == 0 { continue } - fpFails := failuresFromFailpoint(fp) + fpFails := failuresFromFailpoint(fp, failpoints) // wrap in delays so failpoint has time to trigger for i, fpf := range fpFails { if strings.Contains(fp, "Snap") { @@ -77,34 +77,39 @@ func failpointPaths(endpoint string) ([]string, error) { return fps, nil } -func failuresFromFailpoint(fp string) []failure { - inject := makeInjectFailpoint(fp, `panic("etcd-tester")`) +// failpoints follows FreeBSD KFAIL_POINT syntax. +// e.g. panic("etcd-tester"),1*sleep(1000)->panic("etcd-tester") +func failuresFromFailpoint(fp string, failpoints []string) (fs []failure) { recov := makeRecoverFailpoint(fp) - return []failure{ - &failureOne{ - description: description("failpoint " + fp + " panic one"), - injectMember: inject, - recoverMember: recov, - }, - &failureAll{ - description: description("failpoint " + fp + " panic all"), - injectMember: inject, - recoverMember: recov, - }, - &failureMajority{ - description: description("failpoint " + fp + " panic majority"), - injectMember: inject, - recoverMember: recov, - }, - &failureLeader{ - failureByFunc{ - description: description("failpoint " + fp + " panic leader"), + for _, failpoint := range failpoints { + inject := makeInjectFailpoint(fp, failpoint) + fs = append(fs, []failure{ + &failureOne{ + description: description(fmt.Sprintf("failpoint %s (one: %s)", fp, failpoint)), injectMember: inject, recoverMember: recov, }, - 0, - }, + &failureAll{ + description: description(fmt.Sprintf("failpoint %s (all: %s)", fp, failpoint)), + injectMember: inject, + recoverMember: recov, + }, + &failureMajority{ + description: description(fmt.Sprintf("failpoint %s (majority: %s)", fp, failpoint)), + injectMember: inject, + recoverMember: recov, + }, + &failureLeader{ + failureByFunc{ + description: description(fmt.Sprintf("failpoint %s (leader: %s)", fp, failpoint)), + injectMember: inject, + recoverMember: recov, + }, + 0, + }, + }...) } + return fs } func makeInjectFailpoint(fp, val string) injectMemberFunc { diff --git a/github.com/coreos/etcd/tools/functional-tester/etcd-tester/key_stresser.go b/github.com/coreos/etcd/tools/functional-tester/etcd-tester/key_stresser.go index e049a62c91..1e351b7e10 100644 --- a/github.com/coreos/etcd/tools/functional-tester/etcd-tester/key_stresser.go +++ b/github.com/coreos/etcd/tools/functional-tester/etcd-tester/key_stresser.go @@ -140,11 +140,16 @@ func (s *keyStresser) run(ctx context.Context) { } } -func (s *keyStresser) Cancel() { +func (s *keyStresser) Pause() { + s.Close() +} + +func (s *keyStresser) Close() { s.cancel() s.conn.Close() s.wg.Wait() - plog.Infof("keyStresser %q is canceled", s.Endpoint) + plog.Infof("keyStresser %q is closed", s.Endpoint) + } func (s *keyStresser) ModifiedKeys() int64 { diff --git a/github.com/coreos/etcd/tools/functional-tester/etcd-tester/lease_stresser.go b/github.com/coreos/etcd/tools/functional-tester/etcd-tester/lease_stresser.go index fe334952d7..0767ccc2b5 100644 --- a/github.com/coreos/etcd/tools/functional-tester/etcd-tester/lease_stresser.go +++ b/github.com/coreos/etcd/tools/functional-tester/etcd-tester/lease_stresser.go @@ -361,13 +361,17 @@ func (ls *leaseStresser) randomlyDropLease(leaseID int64) (bool, error) { return false, ls.ctx.Err() } -func (ls *leaseStresser) Cancel() { - plog.Debugf("lease stresser %q is canceling...", ls.endpoint) +func (ls *leaseStresser) Pause() { + ls.Close() +} + +func (ls *leaseStresser) Close() { + plog.Debugf("lease stresser %q is closing...", ls.endpoint) ls.cancel() ls.runWg.Wait() ls.aliveWg.Wait() ls.conn.Close() - plog.Infof("lease stresser %q is canceled", ls.endpoint) + plog.Infof("lease stresser %q is closed", ls.endpoint) } func (ls *leaseStresser) ModifiedKeys() int64 { diff --git a/github.com/coreos/etcd/tools/functional-tester/etcd-tester/main.go b/github.com/coreos/etcd/tools/functional-tester/etcd-tester/main.go index 265157c721..16d55bbfb1 100644 --- a/github.com/coreos/etcd/tools/functional-tester/etcd-tester/main.go +++ b/github.com/coreos/etcd/tools/functional-tester/etcd-tester/main.go @@ -18,10 +18,11 @@ import ( "flag" "fmt" "net/http" - "net/http/pprof" "os" "strings" + "github.com/coreos/etcd/pkg/debugutil" + "github.com/coreos/pkg/capnslog" "github.com/prometheus/client_golang/prometheus" "golang.org/x/time/rate" @@ -35,24 +36,24 @@ const ( defaultFailpointPort = 2381 ) -const pprofPrefix = "/debug/pprof-tester" - func main() { endpointStr := flag.String("agent-endpoints", "localhost:9027", "HTTP RPC endpoints of agents. Do not specify the schema.") clientPorts := flag.String("client-ports", "", "etcd client port for each agent endpoint") peerPorts := flag.String("peer-ports", "", "etcd peer port for each agent endpoint") failpointPorts := flag.String("failpoint-ports", "", "etcd failpoint port for each agent endpoint") - datadir := flag.String("data-dir", "agent.etcd", "etcd data directory location on agent machine.") stressKeyLargeSize := flag.Uint("stress-key-large-size", 32*1024+1, "the size of each large key written into etcd.") stressKeySize := flag.Uint("stress-key-size", 100, "the size of each small key written into etcd.") stressKeySuffixRange := flag.Uint("stress-key-count", 250000, "the count of key range written into etcd.") limit := flag.Int("limit", -1, "the limit of rounds to run failure set (-1 to run without limits).") + exitOnFailure := flag.Bool("exit-on-failure", false, "exit tester on first failure") stressQPS := flag.Int("stress-qps", 10000, "maximum number of stresser requests per second.") schedCases := flag.String("schedule-cases", "", "test case schedule") consistencyCheck := flag.Bool("consistency-check", true, "true to check consistency (revision, hash)") - stresserType := flag.String("stresser", "keys,lease", "comma separated list of stressers (keys, lease, v2keys, nop).") + stresserType := flag.String("stresser", "keys,lease", "comma separated list of stressers (keys, lease, v2keys, nop, election-runner, watch-runner, lock-racer-runner, lease-runner).") + etcdRunnerPath := flag.String("etcd-runner", "", "specify a path of etcd runner binary") failureTypes := flag.String("failures", "default,failpoints", "specify failures (concat of \"default\" and \"failpoints\").") + failpoints := flag.String("failpoints", `panic("etcd-tester")`, `comma separated list of failpoint terms to inject (e.g. 'panic("etcd-tester"),1*sleep(1000)')`) externalFailures := flag.String("external-failures", "", "specify a path of script for enabling/disabling an external fault injector") enablePprof := flag.Bool("enable-pprof", false, "true to enable pprof") flag.Parse() @@ -68,7 +69,6 @@ func main() { agents[i].clientPort = cports[i] agents[i].peerPort = pports[i] agents[i].failpointPort = fports[i] - agents[i].datadir = *datadir } c := &cluster{agents: agents} @@ -83,7 +83,8 @@ func main() { var failures []failure if failureTypes != nil && *failureTypes != "" { - failures = makeFailures(*failureTypes, c) + types, failpoints := strings.Split(*failureTypes, ","), strings.Split(*failpoints, ",") + failures = makeFailures(types, failpoints, c) } if externalFailures != nil && *externalFailures != "" { @@ -120,12 +121,15 @@ func main() { keySuffixRange: int(*stressKeySuffixRange), numLeases: 10, keysPerLease: 10, + + etcdRunnerPath: *etcdRunnerPath, } t := &tester{ - failures: schedule, - cluster: c, - limit: *limit, + failures: schedule, + cluster: c, + limit: *limit, + exitOnFailure: *exitOnFailure, scfg: scfg, stresserType: *stresserType, @@ -137,15 +141,9 @@ func main() { http.Handle("/metrics", prometheus.Handler()) if *enablePprof { - http.Handle(pprofPrefix+"/", http.HandlerFunc(pprof.Index)) - http.Handle(pprofPrefix+"/profile", http.HandlerFunc(pprof.Profile)) - http.Handle(pprofPrefix+"/symbol", http.HandlerFunc(pprof.Symbol)) - http.Handle(pprofPrefix+"/cmdline", http.HandlerFunc(pprof.Cmdline)) - http.Handle(pprofPrefix+"/trace", http.HandlerFunc(pprof.Trace)) - http.Handle(pprofPrefix+"/heap", pprof.Handler("heap")) - http.Handle(pprofPrefix+"/goroutine", pprof.Handler("goroutine")) - http.Handle(pprofPrefix+"/threadcreate", pprof.Handler("threadcreate")) - http.Handle(pprofPrefix+"/block", pprof.Handler("block")) + for p, h := range debugutil.PProfHandlers() { + http.Handle(p, h) + } } go func() { plog.Fatal(http.ListenAndServe(":9028", nil)) }() @@ -176,12 +174,10 @@ func portsFromArg(arg string, n, defaultPort int) []int { return ret } -func makeFailures(types string, c *cluster) []failure { +func makeFailures(types, failpoints []string, c *cluster) []failure { var failures []failure - - fails := strings.Split(types, ",") - for i := range fails { - switch fails[i] { + for i := range types { + switch types[i] { case "default": defaultFailures := []failure{ newFailureKillAll(), @@ -199,14 +195,14 @@ func makeFailures(types string, c *cluster) []failure { failures = append(failures, defaultFailures...) case "failpoints": - fpFailures, fperr := failpointFailures(c) + fpFailures, fperr := failpointFailures(c, failpoints) if len(fpFailures) == 0 { plog.Infof("no failpoints found (%v)", fperr) } failures = append(failures, fpFailures...) default: - plog.Errorf("unknown failure: %s\n", fails[i]) + plog.Errorf("unknown failure: %s\n", types[i]) os.Exit(1) } } diff --git a/github.com/coreos/etcd/tools/functional-tester/etcd-tester/stresser.go b/github.com/coreos/etcd/tools/functional-tester/etcd-tester/stresser.go index ea8968d58c..30e8d47d70 100644 --- a/github.com/coreos/etcd/tools/functional-tester/etcd-tester/stresser.go +++ b/github.com/coreos/etcd/tools/functional-tester/etcd-tester/stresser.go @@ -15,6 +15,7 @@ package main import ( + "fmt" "strings" "sync" "time" @@ -28,8 +29,10 @@ func init() { grpclog.SetLogger(plog) } type Stresser interface { // Stress starts to stress the etcd cluster Stress() error - // Cancel cancels the stress test on the etcd cluster - Cancel() + // Pause stops the stresser from sending requests to etcd. Resume by calling Stress. + Pause() + // Close releases all of the Stresser's resources. + Close() // ModifiedKeys reports the number of keys created and deleted by stresser ModifiedKeys() int64 // Checker returns an invariant checker for after the stresser is canceled. @@ -43,7 +46,8 @@ type nopStresser struct { } func (s *nopStresser) Stress() error { return nil } -func (s *nopStresser) Cancel() {} +func (s *nopStresser) Pause() {} +func (s *nopStresser) Close() {} func (s *nopStresser) ModifiedKeys() int64 { return 0 } @@ -59,7 +63,7 @@ func (cs *compositeStresser) Stress() error { for i, s := range cs.stressers { if err := s.Stress(); err != nil { for j := 0; j < i; j++ { - cs.stressers[i].Cancel() + cs.stressers[i].Close() } return err } @@ -67,13 +71,25 @@ func (cs *compositeStresser) Stress() error { return nil } -func (cs *compositeStresser) Cancel() { +func (cs *compositeStresser) Pause() { var wg sync.WaitGroup wg.Add(len(cs.stressers)) for i := range cs.stressers { go func(s Stresser) { defer wg.Done() - s.Cancel() + s.Pause() + }(cs.stressers[i]) + } + wg.Wait() +} + +func (cs *compositeStresser) Close() { + var wg sync.WaitGroup + wg.Add(len(cs.stressers)) + for i := range cs.stressers { + go func(s Stresser) { + defer wg.Done() + s.Close() }(cs.stressers[i]) } wg.Wait() @@ -108,6 +124,8 @@ type stressConfig struct { keysPerLease int rateLimiter *rate.Limiter + + etcdRunnerPath string } // NewStresser creates stresser from a comma separated list of stresser types. @@ -149,6 +167,49 @@ func NewStresser(s string, sc *stressConfig, m *member) Stresser { keysPerLease: sc.keysPerLease, rateLimiter: sc.rateLimiter, } + case "election-runner": + reqRate := 100 + args := []string{ + "election", + fmt.Sprintf("%v", time.Now().UnixNano()), // election name as current nano time + "--dial-timeout=10s", + "--endpoints", m.grpcAddr(), + "--total-client-connections=10", + "--rounds=0", // runs forever + "--req-rate", fmt.Sprintf("%v", reqRate), + } + return newRunnerStresser(sc.etcdRunnerPath, args, sc.rateLimiter, reqRate) + case "watch-runner": + reqRate := 100 + args := []string{ + "watcher", + "--prefix", fmt.Sprintf("%v", time.Now().UnixNano()), // prefix all keys with nano time + "--total-keys=1", + "--total-prefixes=1", + "--watch-per-prefix=1", + "--endpoints", m.grpcAddr(), + "--rounds=0", // runs forever + "--req-rate", fmt.Sprintf("%v", reqRate), + } + return newRunnerStresser(sc.etcdRunnerPath, args, sc.rateLimiter, reqRate) + case "lock-racer-runner": + reqRate := 100 + args := []string{ + "lock-racer", + fmt.Sprintf("%v", time.Now().UnixNano()), // locker name as current nano time + "--endpoints", m.grpcAddr(), + "--total-client-connections=10", + "--rounds=0", // runs forever + "--req-rate", fmt.Sprintf("%v", reqRate), + } + return newRunnerStresser(sc.etcdRunnerPath, args, sc.rateLimiter, reqRate) + case "lease-runner": + args := []string{ + "lease-renewer", + "--ttl=30", + "--endpoints", m.grpcAddr(), + } + return newRunnerStresser(sc.etcdRunnerPath, args, sc.rateLimiter, 0) default: plog.Panicf("unknown stresser type: %s\n", s) } diff --git a/github.com/coreos/etcd/tools/functional-tester/etcd-tester/tester.go b/github.com/coreos/etcd/tools/functional-tester/etcd-tester/tester.go index 5eecf292de..6a0c72af55 100644 --- a/github.com/coreos/etcd/tools/functional-tester/etcd-tester/tester.go +++ b/github.com/coreos/etcd/tools/functional-tester/etcd-tester/tester.go @@ -16,12 +16,14 @@ package main import ( "fmt" + "os" "time" ) type tester struct { - cluster *cluster - limit int + cluster *cluster + limit int + exitOnFailure bool failures []failure status Status @@ -49,6 +51,7 @@ func (tt *tester) runLoop() { if err := tt.resetStressCheck(); err != nil { plog.Errorf("%s failed to start stresser (%v)", tt.logPrefix(), err) + tt.failed() return } @@ -87,6 +90,7 @@ func (tt *tester) runLoop() { if round > 0 && round%500 == 0 { // every 500 rounds if err := tt.defrag(); err != nil { plog.Warningf("%s functional-tester returning with error (%v)", tt.logPrefix(), err) + tt.failed() return } } @@ -114,7 +118,7 @@ func (tt *tester) doRound(round int) error { return fmt.Errorf("recovery error: %v", err) } plog.Infof("%s recovered failure", tt.logPrefix()) - tt.cancelStresser() + tt.pauseStresser() plog.Infof("%s wait until cluster is healthy", tt.logPrefix()) if err := tt.cluster.WaitHealth(); err != nil { return fmt.Errorf("wait full health error: %v", err) @@ -161,7 +165,7 @@ func (tt *tester) checkConsistency() (err error) { } func (tt *tester) compact(rev int64, timeout time.Duration) (err error) { - tt.cancelStresser() + tt.pauseStresser() defer func() { if err == nil { err = tt.startStresser() @@ -209,7 +213,18 @@ func (tt *tester) logPrefix() string { return prefix } +func (tt *tester) failed() { + if !tt.exitOnFailure { + return + } + plog.Warningf("%s exiting on failure", tt.logPrefix()) + tt.cluster.Terminate() + os.Exit(2) +} + func (tt *tester) cleanup() error { + defer tt.failed() + roundFailedTotalCounter.Inc() desc := "compact/defrag" if tt.status.Case != -1 { @@ -217,7 +232,7 @@ func (tt *tester) cleanup() error { } caseFailedTotalCounter.WithLabelValues(desc).Inc() - tt.cancelStresser() + tt.closeStresser() if err := tt.cluster.Cleanup(); err != nil { plog.Warningf("%s cleanup error: %v", tt.logPrefix(), err) return err @@ -229,10 +244,10 @@ func (tt *tester) cleanup() error { return tt.resetStressCheck() } -func (tt *tester) cancelStresser() { - plog.Infof("%s canceling the stressers...", tt.logPrefix()) - tt.stresser.Cancel() - plog.Infof("%s canceled stressers", tt.logPrefix()) +func (tt *tester) pauseStresser() { + plog.Infof("%s pausing the stressers...", tt.logPrefix()) + tt.stresser.Pause() + plog.Infof("%s paused stressers", tt.logPrefix()) } func (tt *tester) startStresser() (err error) { @@ -242,6 +257,12 @@ func (tt *tester) startStresser() (err error) { return err } +func (tt *tester) closeStresser() { + plog.Infof("%s closing the stressers...", tt.logPrefix()) + tt.stresser.Close() + plog.Infof("%s closed stressers", tt.logPrefix()) +} + func (tt *tester) resetStressCheck() error { plog.Infof("%s resetting stressers and checkers...", tt.logPrefix()) cs := &compositeStresser{} diff --git a/github.com/coreos/etcd/tools/functional-tester/etcd-tester/v2_stresser.go b/github.com/coreos/etcd/tools/functional-tester/etcd-tester/v2_stresser.go index 39fbd722c0..620532e0c7 100644 --- a/github.com/coreos/etcd/tools/functional-tester/etcd-tester/v2_stresser.go +++ b/github.com/coreos/etcd/tools/functional-tester/etcd-tester/v2_stresser.go @@ -93,11 +93,15 @@ func (s *v2Stresser) run(ctx context.Context, kv clientV2.KeysAPI) { } } -func (s *v2Stresser) Cancel() { +func (s *v2Stresser) Pause() { s.cancel() s.wg.Wait() } +func (s *v2Stresser) Close() { + s.Pause() +} + func (s *v2Stresser) ModifiedKeys() int64 { return atomic.LoadInt64(&s.atomicModifiedKey) } diff --git a/github.com/coreos/etcd/tools/local-tester/Procfile b/github.com/coreos/etcd/tools/local-tester/Procfile index ba150278a0..2b7a6d6028 100644 --- a/github.com/coreos/etcd/tools/local-tester/Procfile +++ b/github.com/coreos/etcd/tools/local-tester/Procfile @@ -1,14 +1,14 @@ # Use goreman to run `go get github.com/mattn/goreman` # peer bridges -pbridge1: tools/local-tester/bridge/bridge 127.0.0.1:11111 127.0.0.1:12380 -pbridge2: tools/local-tester/bridge/bridge 127.0.0.1:22222 127.0.0.1:22380 -pbridge3: tools/local-tester/bridge/bridge 127.0.0.1:33333 127.0.0.1:32380 +pbridge1: tools/local-tester/bridge.sh 127.0.0.1:11111 127.0.0.1:12380 +pbridge2: tools/local-tester/bridge.sh 127.0.0.1:22222 127.0.0.1:22380 +pbridge3: tools/local-tester/bridge.sh 127.0.0.1:33333 127.0.0.1:32380 # client bridges -cbridge1: tools/local-tester/bridge/bridge 127.0.0.1:2379 127.0.0.1:11119 -cbridge2: tools/local-tester/bridge/bridge 127.0.0.1:22379 127.0.0.1:22229 -cbridge3: tools/local-tester/bridge/bridge 127.0.0.1:32379 127.0.0.1:33339 +cbridge1: tools/local-tester/bridge.sh 127.0.0.1:2379 127.0.0.1:11119 +cbridge2: tools/local-tester/bridge.sh 127.0.0.1:22379 127.0.0.1:22229 +cbridge3: tools/local-tester/bridge.sh 127.0.0.1:32379 127.0.0.1:33339 faults: tools/local-tester/faults.sh diff --git a/github.com/coreos/etcd/tools/local-tester/bridge.sh b/github.com/coreos/etcd/tools/local-tester/bridge.sh new file mode 100755 index 0000000000..3c2b5cb3fc --- /dev/null +++ b/github.com/coreos/etcd/tools/local-tester/bridge.sh @@ -0,0 +1,16 @@ +#!/bin/sh + +exec tools/local-tester/bridge/bridge \ + -delay-accept \ + -reset-listen \ + -conn-fault-rate=0.25 \ + -immediate-close \ + -blackhole \ + -time-close \ + -write-remote-only \ + -read-remote-only \ + -random-blackhole \ + -corrupt-receive \ + -corrupt-send \ + -reorder \ + $@ diff --git a/github.com/coreos/etcd/tools/local-tester/bridge/bridge.go b/github.com/coreos/etcd/tools/local-tester/bridge/bridge.go index 86c90857b7..77dd0e2861 100644 --- a/github.com/coreos/etcd/tools/local-tester/bridge/bridge.go +++ b/github.com/coreos/etcd/tools/local-tester/bridge/bridge.go @@ -57,6 +57,11 @@ func bridge(b *bridgeConn) { b.d.Copy(b.in, makeFetch(b.out)) } +func delayBridge(b *bridgeConn, txDelay, rxDelay time.Duration) { + go b.d.Copy(b.out, makeFetchDelay(makeFetch(b.in), txDelay)) + b.d.Copy(b.in, makeFetchDelay(makeFetch(b.out), rxDelay)) +} + func timeBridge(b *bridgeConn) { go func() { t := time.Duration(rand.Intn(5)+1) * time.Second @@ -135,6 +140,17 @@ func makeFetchRand(f func() ([]byte, error)) fetchFunc { } } +func makeFetchDelay(f fetchFunc, delay time.Duration) fetchFunc { + return func() ([]byte, error) { + b, err := f() + if err != nil { + return nil, err + } + time.Sleep(delay) + return b, nil + } +} + func randomBlackhole(b *bridgeConn) { log.Println("random blackhole: connection", b.String()) @@ -166,6 +182,9 @@ type config struct { corruptSend bool corruptReceive bool reorder bool + + txDelay string + rxDelay string } type acceptFaultFunc func() @@ -174,19 +193,23 @@ type connFaultFunc func(*bridgeConn) func main() { var cfg config - flag.BoolVar(&cfg.delayAccept, "delay-accept", true, "delays accepting new connections") - flag.BoolVar(&cfg.resetListen, "reset-listen", true, "resets the listening port") - - flag.Float64Var(&cfg.connFaultRate, "conn-fault-rate", 0.25, "rate of faulty connections") - flag.BoolVar(&cfg.immediateClose, "immediate-close", true, "close after accept") - flag.BoolVar(&cfg.blackhole, "blackhole", true, "reads nothing, writes go nowhere") - flag.BoolVar(&cfg.timeClose, "time-close", true, "close after random time") - flag.BoolVar(&cfg.writeRemoteOnly, "write-remote-only", true, "only write, no read") - flag.BoolVar(&cfg.readRemoteOnly, "read-remote-only", true, "only read, no write") - flag.BoolVar(&cfg.randomBlackhole, "random-blackhole", true, "blackhole after data xfer") - flag.BoolVar(&cfg.corruptReceive, "corrupt-receive", true, "corrupt packets received from destination") - flag.BoolVar(&cfg.corruptSend, "corrupt-send", true, "corrupt packets sent to destination") - flag.BoolVar(&cfg.reorder, "reorder", true, "reorder packet delivery") + flag.BoolVar(&cfg.delayAccept, "delay-accept", false, "delays accepting new connections") + flag.BoolVar(&cfg.resetListen, "reset-listen", false, "resets the listening port") + + flag.Float64Var(&cfg.connFaultRate, "conn-fault-rate", 0.0, "rate of faulty connections") + flag.BoolVar(&cfg.immediateClose, "immediate-close", false, "close after accept") + flag.BoolVar(&cfg.blackhole, "blackhole", false, "reads nothing, writes go nowhere") + flag.BoolVar(&cfg.timeClose, "time-close", false, "close after random time") + flag.BoolVar(&cfg.writeRemoteOnly, "write-remote-only", false, "only write, no read") + flag.BoolVar(&cfg.readRemoteOnly, "read-remote-only", false, "only read, no write") + flag.BoolVar(&cfg.randomBlackhole, "random-blackhole", false, "blackhole after data xfer") + flag.BoolVar(&cfg.corruptReceive, "corrupt-receive", false, "corrupt packets received from destination") + flag.BoolVar(&cfg.corruptSend, "corrupt-send", false, "corrupt packets sent to destination") + flag.BoolVar(&cfg.reorder, "reorder", false, "reorder packet delivery") + + flag.StringVar(&cfg.txDelay, "tx-delay", "0", "duration to delay client transmission to server") + flag.StringVar(&cfg.rxDelay, "rx-delay", "0", "duration to delay client receive from server") + flag.Parse() lAddr := flag.Args()[0] @@ -251,6 +274,23 @@ func main() { connFaults = append(connFaults, corruptReceive) } + txd, txdErr := time.ParseDuration(cfg.txDelay) + if txdErr != nil { + log.Fatal(txdErr) + } + rxd, rxdErr := time.ParseDuration(cfg.rxDelay) + if rxdErr != nil { + log.Fatal(rxdErr) + } + if txd != 0 || rxd != 0 { + f := func(b *bridgeConn) { delayBridge(b, txd, rxd) } + connFaults = append(connFaults, f) + } + + if len(connFaults) > 1 && cfg.connFaultRate == 0 { + log.Fatal("connection faults defined but conn-fault-rate=0") + } + var disp dispatcher if cfg.reorder { disp = newDispatcherPool() @@ -266,7 +306,7 @@ func main() { } r := rand.Intn(len(connFaults)) - if rand.Intn(100) > int(100.0*cfg.connFaultRate) { + if rand.Intn(100) >= int(100.0*cfg.connFaultRate) { r = 0 } diff --git a/github.com/coreos/etcd/wal/wal.go b/github.com/coreos/etcd/wal/wal.go index 2cac25c1c9..8f11411668 100644 --- a/github.com/coreos/etcd/wal/wal.go +++ b/github.com/coreos/etcd/wal/wal.go @@ -157,6 +157,48 @@ func Create(dirpath string, metadata []byte) (*WAL, error) { return w, nil } +func (w *WAL) renameWal(tmpdirpath string) (*WAL, error) { + if err := os.RemoveAll(w.dir); err != nil { + return nil, err + } + // On non-Windows platforms, hold the lock while renaming. Releasing + // the lock and trying to reacquire it quickly can be flaky because + // it's possible the process will fork to spawn a process while this is + // happening. The fds are set up as close-on-exec by the Go runtime, + // but there is a window between the fork and the exec where another + // process holds the lock. + if err := os.Rename(tmpdirpath, w.dir); err != nil { + if _, ok := err.(*os.LinkError); ok { + return w.renameWalUnlock(tmpdirpath) + } + return nil, err + } + w.fp = newFilePipeline(w.dir, SegmentSizeBytes) + df, err := fileutil.OpenDir(w.dir) + w.dirFile = df + return w, err +} + +func (w *WAL) renameWalUnlock(tmpdirpath string) (*WAL, error) { + // rename of directory with locked files doesn't work on windows/cifs; + // close the WAL to release the locks so the directory can be renamed. + plog.Infof("releasing file lock to rename %q to %q", tmpdirpath, w.dir) + w.Close() + if err := os.Rename(tmpdirpath, w.dir); err != nil { + return nil, err + } + // reopen and relock + newWAL, oerr := Open(w.dir, walpb.Snapshot{}) + if oerr != nil { + return nil, oerr + } + if _, _, _, err := newWAL.ReadAll(); err != nil { + newWAL.Close() + return nil, err + } + return newWAL, nil +} + // Open opens the WAL at the given snap. // The snap SHOULD have been previously saved to the WAL, or the following // ReadAll will fail. diff --git a/github.com/coreos/etcd/wal/wal_unix.go b/github.com/coreos/etcd/wal/wal_unix.go deleted file mode 100644 index 82fd6a17a7..0000000000 --- a/github.com/coreos/etcd/wal/wal_unix.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows - -package wal - -import ( - "os" - - "github.com/coreos/etcd/pkg/fileutil" -) - -func (w *WAL) renameWal(tmpdirpath string) (*WAL, error) { - // On non-Windows platforms, hold the lock while renaming. Releasing - // the lock and trying to reacquire it quickly can be flaky because - // it's possible the process will fork to spawn a process while this is - // happening. The fds are set up as close-on-exec by the Go runtime, - // but there is a window between the fork and the exec where another - // process holds the lock. - - if err := os.RemoveAll(w.dir); err != nil { - return nil, err - } - if err := os.Rename(tmpdirpath, w.dir); err != nil { - return nil, err - } - - w.fp = newFilePipeline(w.dir, SegmentSizeBytes) - df, err := fileutil.OpenDir(w.dir) - w.dirFile = df - return w, err -}