diff --git a/README.textile b/README.textile index c964e31655dc8..ce7b3b7d34476 100644 --- a/README.textile +++ b/README.textile @@ -27,7 +27,6 @@ Elasticsearch is a distributed RESTful search engine built for the cloud. Featur ** All the power of Lucene easily exposed through simple configuration / plugins. * Per operation consistency ** Single document level operations are atomic, consistent, isolated and durable. -* Open Source under the Apache License, version 2 ("ALv2") h2. Getting Started @@ -217,23 +216,3 @@ Elasticsearch (1.x), it is required to perform a full cluster restart. Please see the "setup reference": https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html for more details on the upgrade process. - -h1. License - -
-This software is licensed under the Apache License, version 2 ("ALv2"), quoted below.
-
-Copyright 2009-2016 Elasticsearch 
-
-Licensed under the Apache License, Version 2.0 (the "License"); you may not
-use this file except in compliance with the License. You may obtain a copy of
-the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-License for the specific language governing permissions and limitations under
-the License.
-
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 6043ce210906a..5eb82c12616fc 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -475,14 +475,18 @@ class BuildPlugin implements Plugin { } static void configureJavadoc(Project project) { - project.tasks.withType(Javadoc) { - executable = new File(project.compilerJavaHome, 'bin/javadoc') + // remove compiled classes from the Javadoc classpath: http://mail.openjdk.java.net/pipermail/javadoc-dev/2018-January/000400.html + final List classes = new ArrayList<>() + project.tasks.withType(JavaCompile) { javaCompile -> + classes.add(javaCompile.destinationDir) } - configureJavadocJar(project) - if (project.compilerJavaVersion == JavaVersion.VERSION_1_10) { - project.tasks.withType(Javadoc) { it.enabled = false } - project.tasks.getByName('javadocJar').each { it.enabled = false } + project.tasks.withType(Javadoc) { javadoc -> + javadoc.executable = new File(project.compilerJavaHome, 'bin/javadoc') + javadoc.classpath = javadoc.getClasspath().filter { f -> + return classes.contains(f) == false + } } + configureJavadocJar(project) } /** Adds a javadocJar task to generate a jar containing javadocs. */ diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index 0b366aa99e188..f5b46a6a53192 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -34,15 +34,17 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; -import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; -import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; @@ -261,6 +263,28 @@ public void flushAsync(FlushRequest flushRequest, ActionListener listener, emptySet(), headers); } + /** + * Force merge one or more indices using the Force Merge API + *

+ * See + * Force Merge API on elastic.co + */ + public ForceMergeResponse forceMerge(ForceMergeRequest forceMergeRequest, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(forceMergeRequest, Request::forceMerge, ForceMergeResponse::fromXContent, + emptySet(), headers); + } + + /** + * Asynchronously force merge one or more indices using the Force Merge API + *

+ * See + * Force Merge API on elastic.co + */ + public void forceMergeAsync(ForceMergeRequest forceMergeRequest, ActionListener listener, Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(forceMergeRequest, Request::forceMerge, ForceMergeResponse::fromXContent, + listener, emptySet(), headers); + } + /** * Clears the cache of one or more indices using the Clear Cache API *

diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java index b212112f781c9..fbb9cf4c05795 100755 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java @@ -37,6 +37,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; @@ -235,6 +236,17 @@ static Request flush(FlushRequest flushRequest) { return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null); } + static Request forceMerge(ForceMergeRequest forceMergeRequest) { + String[] indices = forceMergeRequest.indices() == null ? Strings.EMPTY_ARRAY : forceMergeRequest.indices(); + String endpoint = endpoint(indices, "_forcemerge"); + Params parameters = Params.builder(); + parameters.withIndicesOptions(forceMergeRequest.indicesOptions()); + parameters.putParam("max_num_segments", Integer.toString(forceMergeRequest.maxNumSegments())); + parameters.putParam("only_expunge_deletes", Boolean.toString(forceMergeRequest.onlyExpungeDeletes())); + parameters.putParam("flush", Boolean.toString(forceMergeRequest.flush())); + return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null); + } + static Request clearCache(ClearIndicesCacheRequest clearIndicesCacheRequest) { String[] indices = clearIndicesCacheRequest.indices() == null ? Strings.EMPTY_ARRAY :clearIndicesCacheRequest.indices(); String endpoint = endpoint(indices, "_cache/clear"); @@ -533,7 +545,7 @@ static Request existsAlias(GetAliasesRequest getAliasesRequest) { } static Request rankEval(RankEvalRequest rankEvalRequest) throws IOException { - String endpoint = endpoint(rankEvalRequest.getIndices(), Strings.EMPTY_ARRAY, "_rank_eval"); + String endpoint = endpoint(rankEvalRequest.indices(), Strings.EMPTY_ARRAY, "_rank_eval"); HttpEntity entity = createEntity(rankEvalRequest.getRankEvalSpec(), REQUEST_BODY_CONTENT_TYPE); return new Request(HttpGet.METHOD_NAME, endpoint, Collections.emptyMap(), entity); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index d48f09b67abbc..9d71236af1621 100755 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -38,6 +38,8 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; @@ -469,6 +471,32 @@ public void testClearCache() throws IOException { } } + public void testForceMerge() throws IOException { + { + String index = "index"; + Settings settings = Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + createIndex(index, settings); + ForceMergeRequest forceMergeRequest = new ForceMergeRequest(index); + ForceMergeResponse forceMergeResponse = + execute(forceMergeRequest, highLevelClient().indices()::forceMerge, highLevelClient().indices()::forceMergeAsync); + assertThat(forceMergeResponse.getTotalShards(), equalTo(1)); + assertThat(forceMergeResponse.getSuccessfulShards(), equalTo(1)); + assertThat(forceMergeResponse.getFailedShards(), equalTo(0)); + assertThat(forceMergeResponse.getShardFailures(), equalTo(BroadcastResponse.EMPTY)); + } + { + String nonExistentIndex = "non_existent_index"; + assertFalse(indexExists(nonExistentIndex)); + ForceMergeRequest forceMergeRequest = new ForceMergeRequest(nonExistentIndex); + ElasticsearchException exception = expectThrows(ElasticsearchException.class, + () -> execute(forceMergeRequest, highLevelClient().indices()::forceMerge, highLevelClient().indices()::forceMergeAsync)); + assertEquals(RestStatus.NOT_FOUND, exception.status()); + } + } + public void testExistsAlias() throws IOException { GetAliasesRequest getAliasesRequest = new GetAliasesRequest("alias"); assertFalse(execute(getAliasesRequest, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java index b753d40ee5c3e..05effff57befb 100755 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java @@ -40,6 +40,7 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; @@ -623,6 +624,43 @@ public void testFlush() { assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); } + public void testForceMerge() { + String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5); + ForceMergeRequest forceMergeRequest; + if (randomBoolean()) { + forceMergeRequest = new ForceMergeRequest(indices); + } else { + forceMergeRequest = new ForceMergeRequest(); + forceMergeRequest.indices(indices); + } + + Map expectedParams = new HashMap<>(); + setRandomIndicesOptions(forceMergeRequest::indicesOptions, forceMergeRequest::indicesOptions, expectedParams); + if (randomBoolean()) { + forceMergeRequest.maxNumSegments(randomInt()); + } + expectedParams.put("max_num_segments", Integer.toString(forceMergeRequest.maxNumSegments())); + if (randomBoolean()) { + forceMergeRequest.onlyExpungeDeletes(randomBoolean()); + } + expectedParams.put("only_expunge_deletes", Boolean.toString(forceMergeRequest.onlyExpungeDeletes())); + if (randomBoolean()) { + forceMergeRequest.flush(randomBoolean()); + } + expectedParams.put("flush", Boolean.toString(forceMergeRequest.flush())); + + Request request = Request.forceMerge(forceMergeRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + if (indices != null && indices.length > 0) { + endpoint.add(String.join(",", indices)); + } + endpoint.add("_forcemerge"); + assertThat(request.getEndpoint(), equalTo(endpoint.toString())); + assertThat(request.getParameters(), equalTo(expectedParams)); + assertThat(request.getEntity(), nullValue()); + assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + } + public void testClearCache() { String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5); ClearIndicesCacheRequest clearIndicesCacheRequest; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index a69bdea133b72..1c87ae6acddbb 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -37,6 +37,8 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; @@ -771,6 +773,79 @@ public void onFailure(Exception e) { } } + public void testForceMergeIndex() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + createIndex("index", Settings.EMPTY); + } + + { + // tag::force-merge-request + ForceMergeRequest request = new ForceMergeRequest("index1"); // <1> + ForceMergeRequest requestMultiple = new ForceMergeRequest("index1", "index2"); // <2> + ForceMergeRequest requestAll = new ForceMergeRequest(); // <3> + // end::force-merge-request + + // tag::force-merge-request-indicesOptions + request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1> + // end::force-merge-request-indicesOptions + + // tag::force-merge-request-segments-num + request.maxNumSegments(1); // <1> + // end::force-merge-request-segments-num + + // tag::force-merge-request-only-expunge-deletes + request.onlyExpungeDeletes(true); // <1> + // end::force-merge-request-only-expunge-deletes + + // tag::force-merge-request-flush + request.flush(true); // <1> + // end::force-merge-request-flush + + // tag::force-merge-execute + ForceMergeResponse forceMergeResponse = client.indices().forceMerge(request); + // end::force-merge-execute + + // tag::force-merge-response + int totalShards = forceMergeResponse.getTotalShards(); // <1> + int successfulShards = forceMergeResponse.getSuccessfulShards(); // <2> + int failedShards = forceMergeResponse.getFailedShards(); // <3> + DefaultShardOperationFailedException[] failures = forceMergeResponse.getShardFailures(); // <4> + // end::force-merge-response + + // tag::force-merge-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(ForceMergeResponse forceMergeResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::force-merge-execute-listener + + // tag::force-merge-execute-async + client.indices().forceMergeAsync(request, listener); // <1> + // end::force-merge-execute-async + } + { + // tag::force-merge-notfound + try { + ForceMergeRequest request = new ForceMergeRequest("does_not_exist"); + client.indices().forceMerge(request); + } catch (ElasticsearchException exception) { + if (exception.status() == RestStatus.NOT_FOUND) { + // <1> + } + } + // end::force-merge-notfound + } + } + public void testClearCache() throws Exception { RestHighLevelClient client = highLevelClient(); @@ -855,7 +930,6 @@ public void onFailure(Exception e) { } } - public void testCloseIndex() throws Exception { RestHighLevelClient client = highLevelClient(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java index 22421dec6d9b9..96d962c3ac553 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java @@ -27,6 +27,8 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.ClearScrollResponse; +import org.elasticsearch.action.search.MultiSearchRequest; +import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; @@ -85,45 +87,15 @@ import static org.hamcrest.Matchers.greaterThan; /** - * This class is used to generate the Java High Level REST Client Search API documentation. - *

- * You need to wrap your code between two tags like: - * // tag::example - * // end::example - *

- * Where example is your tag name. - *

- * Then in the documentation, you can extract what is between tag and end tags with - * ["source","java",subs="attributes,callouts,macros"] - * -------------------------------------------------- - * include-tagged::{doc-tests}/SearchDocumentationIT.java[example] - * -------------------------------------------------- - *

- * The column width of the code block is 84. If the code contains a line longer - * than 84, the line will be cut and a horizontal scroll bar will be displayed. - * (the code indentation of the tag is not included in the width) + * Documentation for search APIs in the high level java client. + * Code wrapped in {@code tag} and {@code end} tags is included in the docs. */ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { @SuppressWarnings({"unused", "unchecked"}) public void testSearch() throws Exception { + indexSearchTestData(); RestHighLevelClient client = highLevelClient(); - { - BulkRequest request = new BulkRequest(); - request.add(new IndexRequest("posts", "doc", "1") - .source(XContentType.JSON, "title", "In which order are my Elasticsearch queries executed?", "user", - Arrays.asList("kimchy", "luca"), "innerObject", Collections.singletonMap("key", "value"))); - request.add(new IndexRequest("posts", "doc", "2") - .source(XContentType.JSON, "title", "Current status and upcoming changes in Elasticsearch", "user", - Arrays.asList("kimchy", "christoph"), "innerObject", Collections.singletonMap("key", "value"))); - request.add(new IndexRequest("posts", "doc", "3") - .source(XContentType.JSON, "title", "The Future of Federated Search in Elasticsearch", "user", - Arrays.asList("kimchy", "tanguy"), "innerObject", Collections.singletonMap("key", "value"))); - request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - BulkResponse bulkResponse = client.bulk(request); - assertSame(RestStatus.OK, bulkResponse.status()); - assertFalse(bulkResponse.hasFailures()); - } { // tag::search-request-basic SearchRequest searchRequest = new SearchRequest(); // <1> @@ -715,4 +687,90 @@ public void onFailure(Exception e) { assertTrue(succeeded); } } + + public void testMultiSearch() throws Exception { + indexSearchTestData(); + RestHighLevelClient client = highLevelClient(); + { + // tag::multi-search-request-basic + MultiSearchRequest request = new MultiSearchRequest(); // <1> + SearchRequest firstSearchRequest = new SearchRequest(); // <2> + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + searchSourceBuilder.query(QueryBuilders.matchQuery("user", "kimchy")); + firstSearchRequest.source(searchSourceBuilder); + request.add(firstSearchRequest); // <3> + SearchRequest secondSearchRequest = new SearchRequest(); // <4> + searchSourceBuilder = new SearchSourceBuilder(); + searchSourceBuilder.query(QueryBuilders.matchQuery("user", "luca")); + secondSearchRequest.source(searchSourceBuilder); + request.add(secondSearchRequest); + // end::multi-search-request-basic + // tag::multi-search-execute + MultiSearchResponse response = client.multiSearch(request); + // end::multi-search-execute + // tag::multi-search-response + MultiSearchResponse.Item firstResponse = response.getResponses()[0]; // <1> + assertNull(firstResponse.getFailure()); // <2> + SearchResponse searchResponse = firstResponse.getResponse(); // <3> + assertEquals(3, searchResponse.getHits().getTotalHits()); + MultiSearchResponse.Item secondResponse = response.getResponses()[1]; // <4> + assertNull(secondResponse.getFailure()); + searchResponse = secondResponse.getResponse(); + assertEquals(1, searchResponse.getHits().getTotalHits()); + // end::multi-search-response + + // tag::multi-search-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(MultiSearchResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::multi-search-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::multi-search-execute-async + client.multiSearchAsync(request, listener); // <1> + // end::multi-search-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + { + // tag::multi-search-request-index + MultiSearchRequest request = new MultiSearchRequest(); + request.add(new SearchRequest("posts") // <1> + .types("doc")); // <2> + // end::multi-search-request-index + MultiSearchResponse response = client.multiSearch(request); + MultiSearchResponse.Item firstResponse = response.getResponses()[0]; + assertNull(firstResponse.getFailure()); + SearchResponse searchResponse = firstResponse.getResponse(); + assertEquals(3, searchResponse.getHits().getTotalHits()); + } + } + + private void indexSearchTestData() throws IOException { + BulkRequest request = new BulkRequest(); + request.add(new IndexRequest("posts", "doc", "1") + .source(XContentType.JSON, "title", "In which order are my Elasticsearch queries executed?", "user", + Arrays.asList("kimchy", "luca"), "innerObject", Collections.singletonMap("key", "value"))); + request.add(new IndexRequest("posts", "doc", "2") + .source(XContentType.JSON, "title", "Current status and upcoming changes in Elasticsearch", "user", + Arrays.asList("kimchy", "christoph"), "innerObject", Collections.singletonMap("key", "value"))); + request.add(new IndexRequest("posts", "doc", "3") + .source(XContentType.JSON, "title", "The Future of Federated Search in Elasticsearch", "user", + Arrays.asList("kimchy", "tanguy"), "innerObject", Collections.singletonMap("key", "value"))); + request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + BulkResponse bulkResponse = highLevelClient().bulk(request); + assertSame(RestStatus.OK, bulkResponse.status()); + assertFalse(bulkResponse.hasFailures()); + } } diff --git a/docs/community-clients/index.asciidoc b/docs/community-clients/index.asciidoc index ca37c42fb0202..76312f39345e7 100644 --- a/docs/community-clients/index.asciidoc +++ b/docs/community-clients/index.asciidoc @@ -8,6 +8,7 @@ Besides the link:/guide[officially supported Elasticsearch clients], there are a number of clients that have been contributed by the community for various languages: * <> +* <> * <> * <> * <> @@ -35,6 +36,10 @@ a number of clients that have been contributed by the community for various lang * https://www.b4x.com/android/forum/threads/server-jelasticsearch-search-and-text-analytics.73335/ B4J client based on the official Java REST client. +[[cpp]] +== C++ +* https://github.com/seznam/elasticlient[elasticlient]: simple library for simplified work with Elasticsearch in C++ + [[clojure]] == Clojure diff --git a/docs/java-rest/high-level/indices/force_merge.asciidoc b/docs/java-rest/high-level/indices/force_merge.asciidoc new file mode 100644 index 0000000000000..6fe1fcd82b749 --- /dev/null +++ b/docs/java-rest/high-level/indices/force_merge.asciidoc @@ -0,0 +1,102 @@ +[[java-rest-high-force-merge]] +=== Force Merge API + +[[java-rest-high-force-merge-request]] +==== Force merge Request + +A `ForceMergeRequest` can be applied to one or more indices, or even on `_all` the indices: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-request] +-------------------------------------------------- +<1> Force merge one index +<2> Force merge multiple indices +<3> Force merge all the indices + +==== Optional arguments + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-request-indicesOptions] +-------------------------------------------------- +<1> Setting `IndicesOptions` controls how unavailable indices are resolved and +how wildcard expressions are expanded + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-request-segments-num] +-------------------------------------------------- +<1> Set `max_num_segments` to control the number of segments to merge down to. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-request-only-expunge-deletes] +-------------------------------------------------- +<1> Set the `only_expunge_deletes` flag to `true` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-request-flush] +-------------------------------------------------- +<1> Set the `flush` flag to `true` + +[[java-rest-high-force-merge-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-execute] +-------------------------------------------------- + +[[java-rest-high-force-merge-async]] +==== Asynchronous Execution + +The asynchronous execution of a force merge request requires both the `ForceMergeRequest` +instance and an `ActionListener` instance to be passed to the asynchronous +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-execute-async] +-------------------------------------------------- +<1> The `ForceMergeRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `ForceMergeResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument + +[[java-rest-high-force-merge-response]] +==== Force Merge Response + +The returned `ForceMergeResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-response] +-------------------------------------------------- +<1> Total number of shards hit by the force merge request +<2> Number of shards where the force merge has succeeded +<3> Number of shards where the force merge has failed +<4> A list of failures if the operation failed on one or more shards + +By default, if the indices were not found, an `ElasticsearchException` will be thrown: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[force-merge-notfound] +-------------------------------------------------- +<1> Do something if the indices to be force merged were not found \ No newline at end of file diff --git a/docs/java-rest/high-level/search/multi-search.asciidoc b/docs/java-rest/high-level/search/multi-search.asciidoc new file mode 100644 index 0000000000000..1b76f8976666a --- /dev/null +++ b/docs/java-rest/high-level/search/multi-search.asciidoc @@ -0,0 +1,90 @@ +[[java-rest-high-multi-search]] +=== Multi-Search API + +The `multiSearch` API executes multiple <> +requests in a single http request in parallel. + +[[java-rest-high-multi-search-request]] +==== Multi-Search Request + +The `MultiSearchRequest` is built empty and you add all of the searches that +you wish to execute to it: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[multi-search-request-basic] +-------------------------------------------------- +<1> Create an empty `MultiSearchRequest`. +<2> Create an empty `SearchRequest` and populate it just like you +would for a regular <>. +<3> Add the `SearchRequest` to the `MultiSearchRequest`. +<4> Build a second `SearchRequest` and add it to the `MultiSearchRequest`. + +===== Optional arguments + +The `SearchRequest`s inside of `MultiSearchRequest` support all of +<>'s optional arguments. +For example: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[search-request-indices-types] +-------------------------------------------------- +<1> Restricts the request to an index +<2> Limits the request to a type + +[[java-rest-high-multi-search-sync]] +==== Synchronous Execution + +The `multiSearch` method executes `MultiSearchRequest`s synchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[multi-search-execute] +-------------------------------------------------- + +[[java-rest-high-multi-search-async]] +==== Asynchronous Execution + +The `multiSearchAsync` method executes `MultiSearchRequest`s asynchronously, +calling the provided `ActionListener` when the response is ready. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[search-execute-async] +-------------------------------------------------- +<1> The `MultiSearchRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `MultiSearchResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[multi-search-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. +<2> Called when the whole `SearchRequest` fails. + +==== MultiSearchResponse + +The `MultiSearchResponse` that is returned by executing the `multiSearch` +a `MultiSearchResponse.Item` for each `SearchRequest` in the +`MultiSearchRequest`. Each `MultiSearchResponse.Item` contains an +exception in `getFailure` if the request failed or a +<> in `getResponse` if +the request succeeded: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[multi-search-response] +-------------------------------------------------- +<1> The item for the first search. +<2> It succeeded so `getFailure` returns null. +<3> And there is a <> in +`getResponse`. +<4> The item for the second search. diff --git a/docs/java-rest/high-level/search/search.asciidoc b/docs/java-rest/high-level/search/search.asciidoc index 2e8dda64286f4..af81775a90072 100644 --- a/docs/java-rest/high-level/search/search.asciidoc +++ b/docs/java-rest/high-level/search/search.asciidoc @@ -20,6 +20,7 @@ include-tagged::{doc-tests}/SearchDocumentationIT.java[search-request-basic] <3> Add a `match_all` query to the `SearchSourceBuilder`. <4> Add the `SearchSourceBuilder` to the `SeachRequest`. +[[java-rest-high-search-request-optional]] ===== Optional arguments Let's first look at some of the optional arguments of a `SearchRequest`: @@ -140,7 +141,7 @@ The `SearchSourceBuilder` allows to add one or more `SortBuilder` instances. The include-tagged::{doc-tests}/SearchDocumentationIT.java[search-source-sorting] -------------------------------------------------- <1> Sort descending by `_score` (the default) -<2> Also sort ascending by `_id` field +<2> Also sort ascending by `_id` field ===== Source filtering @@ -268,6 +269,7 @@ include-tagged::{doc-tests}/SearchDocumentationIT.java[search-execute-listener] <1> Called when the execution is successfully completed. <2> Called when the whole `SearchRequest` fails. +[[java-rest-high-search-response]] ==== SearchResponse The `SearchResponse` that is returned by executing the search provides details diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index bea30690fe183..0330b1903c5bf 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -31,9 +31,11 @@ The Java High Level REST Client supports the following Search APIs: * <> * <> * <> +* <> include::search/search.asciidoc[] include::search/scroll.asciidoc[] +include::search/multi-search.asciidoc[] == Miscellaneous APIs @@ -60,6 +62,7 @@ Index Management:: * <> * <> * <> +* <> * <> Mapping Management:: @@ -79,6 +82,7 @@ include::indices/split_index.asciidoc[] include::indices/refresh.asciidoc[] include::indices/flush.asciidoc[] include::indices/clear_cache.asciidoc[] +include::indices/force_merge.asciidoc[] include::indices/rollover.asciidoc[] include::indices/put_mapping.asciidoc[] include::indices/update_aliases.asciidoc[] diff --git a/docs/java-rest/low-level/configuration.asciidoc b/docs/java-rest/low-level/configuration.asciidoc index 54f7cd2817354..b0753496558bb 100644 --- a/docs/java-rest/low-level/configuration.asciidoc +++ b/docs/java-rest/low-level/configuration.asciidoc @@ -86,3 +86,16 @@ will be used. For any other required configuration needed, the Apache HttpAsyncClient docs should be consulted: https://hc.apache.org/httpcomponents-asyncclient-4.1.x/ . + +NOTE: If your application runs under the security manager you might be subject +to the JVM default policies of caching positive hostname resolutions +indefinitely and negative hostname resolutions for ten seconds. If the resolved +addresses of the hosts to which you are connecting the client to vary with time +then you might want to modify the default JVM behavior. These can be modified by +adding +http://docs.oracle.com/javase/8/docs/technotes/guides/net/properties.html[`networkaddress.cache.ttl=`] +and +http://docs.oracle.com/javase/8/docs/technotes/guides/net/properties.html[`networkaddress.cache.negative.ttl=`] +to your +http://docs.oracle.com/javase/8/docs/technotes/guides/security/PolicyFiles.html[Java +security policy]. diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index faecc08f7c503..07fbe952c98bf 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -249,7 +249,7 @@ on a per-operation basis using the `routing` parameter. For example: [source,js] -------------------------------------------------- -POST twitter/tweet?routing=kimchy +POST twitter/_doc?routing=kimchy { "user" : "kimchy", "post_date" : "2009-11-15T14:12:12", diff --git a/docs/reference/mapping/types/geo-point.asciidoc b/docs/reference/mapping/types/geo-point.asciidoc index 7d92bb3b2e7c7..ae81773e6a0a2 100644 --- a/docs/reference/mapping/types/geo-point.asciidoc +++ b/docs/reference/mapping/types/geo-point.asciidoc @@ -105,6 +105,13 @@ The following parameters are accepted by `geo_point` fields: If `true`, malformed geo-points are ignored. If `false` (default), malformed geo-points throw an exception and reject the whole document. +`ignore_z_value`:: + + If `true` (default) three dimension points will be accepted (stored in source) + but only latitude and longitude values will be indexed; the third dimension is + ignored. If `false`, geo-points containing any more than latitude and longitude + (two dimensions) values throw an exception and reject the whole document. + ==== Using geo-points in scripts When accessing the value of a geo-point in a script, the value is returned as diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index 23caaf6a8ec5c..26974f1f867de 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -91,6 +91,12 @@ false (default), malformed GeoJSON and WKT shapes throw an exception and reject entire document. | `false` +|`ignore_z_value` |If `true` (default) three dimension points will be accepted (stored in source) +but only latitude and longitude values will be indexed; the third dimension is ignored. If `false`, +geo-points containing any more than latitude and longitude (two dimensions) values throw an exception +and reject the whole document. +| `true` + |======================================================================= diff --git a/docs/reference/modules/cluster/misc.asciidoc b/docs/reference/modules/cluster/misc.asciidoc index 3963312c0f4ea..837cfcc43ebf7 100644 --- a/docs/reference/modules/cluster/misc.asciidoc +++ b/docs/reference/modules/cluster/misc.asciidoc @@ -56,3 +56,30 @@ PUT /_cluster/settings } ------------------------------- // CONSOLE + + +[[persistent-tasks-allocation]] +==== Persistent Tasks Allocations + +Plugins can create a kind of tasks called persistent tasks. Those tasks are +usually long-live tasks and are stored in the cluster state, allowing the +tasks to be revived after a full cluster restart. + +Every time a persistent task is created, the master nodes takes care of +assigning the task to a node of the cluster, and the assigned node will then +pick up the task and execute it locally. The process of assigning persistent +tasks to nodes is controlled by the following property, which can be updated +dynamically: + +`cluster.persistent_tasks.allocation.enable`:: ++ +-- +Enable or disable allocation for persistent tasks: + +* `all` - (default) Allows persistent tasks to be assigned to nodes +* `none` - No allocations are allowed for any type of persistent task + +This setting does not affect the persistent tasks that are already being executed. +Only newly created persistent tasks, or tasks that must be reassigned (after a node +left the cluster, for example), are impacted by this setting. +-- diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index b8883173b9890..ea3f99debb94e 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -401,7 +401,7 @@ created the snapshotting process will be aborted and all files created as part o cleaned. Therefore, the delete snapshot operation can be used to cancel long running snapshot operations that were started by mistake. -A repository can be deleted using the following command: +A repository can be unregistered using the following command: [source,sh] ----------------------------------- @@ -410,7 +410,7 @@ DELETE /_snapshot/my_fs_backup // CONSOLE // TEST[continued] -When a repository is deleted, Elasticsearch only removes the reference to the location where the repository is storing +When a repository is unregistered, Elasticsearch only removes the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place. [float] diff --git a/docs/reference/search/rank-eval.asciidoc b/docs/reference/search/rank-eval.asciidoc index 76143c726167e..cb1914a70768a 100644 --- a/docs/reference/search/rank-eval.asciidoc +++ b/docs/reference/search/rank-eval.asciidoc @@ -1,14 +1,16 @@ [[search-rank-eval]] == Ranking Evaluation API +experimental[The ranking evaluation API is experimental and may be changed or removed completely in a future release, +as well as change in non-backwards compatible ways on minor versions updates. Elastic will take a best effort +approach to fix any issues, but experimental features are not subject to the support SLA of official GA features.] + The ranking evaluation API allows to evaluate the quality of ranked search results over a set of typical search queries. Given this set of queries and a list or manually rated documents, the `_rank_eval` endpoint calculates and returns typical information retrieval metrics like _mean reciprocal rank_, _precision_ or _discounted cumulative gain_. -experimental[The ranking evaluation API is new and may change in non-backwards compatible ways in the future, even on minor versions updates.] - [float] === Overview @@ -41,7 +43,7 @@ GET /my_index/_rank_eval { "requests": [ ... ], <1> "metric": { <2> - "reciprocal_rank": { ... } <3> + "mean_reciprocal_rank": { ... } <3> } } ------------------------------ @@ -85,7 +87,7 @@ The request section contains several search requests typical to your application <3> a list of document ratings, each entry containing the documents `_index` and `_id` together with the rating of the documents relevance with regards to this search request -A document `rating` can be any integer value that expresses the relevance of the document on a user defined scale. For some of the metrics, just giving a binary rating (e.g. `0` for irrelevant and `1` for relevant) will be sufficient, other metrics can use a more fine grained scale. +A document `rating` can be any integer value that expresses the relevance of the document on a user defined scale. For some of the metrics, just giving a binary rating (e.g. `0` for irrelevant and `1` for relevant) will be sufficient, other metrics can use a more fine grained scale. NOTE: To use the ranking evaluation API with indices that use multiple types, you should add a filter on the `_type` field to the query in the request. Otherwise, if your index uses multiple types with the same id, the provided @@ -162,6 +164,7 @@ GET /twitter/_rank_eval }], "metric": { "precision": { + "k" : 20, "relevant_rating_threshold": 1, "ignore_unlabeled": false } @@ -176,7 +179,9 @@ The `precision` metric takes the following optional parameters [cols="<,<",options="header",] |======================================================================= |Parameter |Description -|`relevant_rating_threshold` |Sets the rating threshold above which documents are considered to be +|`k` |sets the maximum number of documents retrieved per query. This value will act in place of the usual `size` parameter +in the query. Defaults to 10. +|`relevant_rating_threshold` |sets the rating threshold above which documents are considered to be "relevant". Defaults to `1`. |`ignore_unlabeled` |controls how unlabeled documents in the search results are counted. If set to 'true', unlabeled documents are ignored and neither count as relevant or irrelevant. Set to 'false' (the default), they are treated as irrelevant. @@ -202,6 +207,7 @@ GET /twitter/_rank_eval }], "metric": { "mean_reciprocal_rank": { + "k" : 20, "relevant_rating_threshold" : 1 } } @@ -215,6 +221,8 @@ The `mean_reciprocal_rank` metric takes the following optional parameters [cols="<,<",options="header",] |======================================================================= |Parameter |Description +|`k` |sets the maximum number of documents retrieved per query. This value will act in place of the usual `size` parameter +in the query. Defaults to 10. |`relevant_rating_threshold` |Sets the rating threshold above which documents are considered to be "relevant". Defaults to `1`. |======================================================================= @@ -238,6 +246,7 @@ GET /twitter/_rank_eval }], "metric": { "dcg": { + "k" : 20, "normalize": false } } @@ -251,6 +260,8 @@ The `dcg` metric takes the following optional parameters: [cols="<,<",options="header",] |======================================================================= |Parameter |Description +|`k` |sets the maximum number of documents retrieved per query. This value will act in place of the usual `size` parameter +in the query. Defaults to 10. |`normalize` | If set to `true`, this metric will calculate the https://en.wikipedia.org/wiki/Discounted_cumulative_gain#Normalized_DCG[Normalized DCG]. |======================================================================= diff --git a/docs/reference/setup/important-settings.asciidoc b/docs/reference/setup/important-settings.asciidoc index 997f267a7e29f..b9b99b708031e 100644 --- a/docs/reference/setup/important-settings.asciidoc +++ b/docs/reference/setup/important-settings.asciidoc @@ -30,3 +30,5 @@ include::important-settings/heap-size.asciidoc[] include::important-settings/heap-dump-path.asciidoc[] include::important-settings/gc-logging.asciidoc[] + +include::important-settings/error-file.asciidoc[] diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java index 58fd3b0a694ae..7d3ec94811c5a 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java @@ -22,24 +22,47 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; +import java.util.Arrays; import java.util.Objects; /** * Request to perform a search ranking evaluation. */ -public class RankEvalRequest extends ActionRequest { +public class RankEvalRequest extends ActionRequest implements IndicesRequest.Replaceable { private RankEvalSpec rankingEvaluationSpec; + + private IndicesOptions indicesOptions = SearchRequest.DEFAULT_INDICES_OPTIONS; private String[] indices = Strings.EMPTY_ARRAY; public RankEvalRequest(RankEvalSpec rankingEvaluationSpec, String[] indices) { - this.rankingEvaluationSpec = rankingEvaluationSpec; - setIndices(indices); + this.rankingEvaluationSpec = Objects.requireNonNull(rankingEvaluationSpec, "ranking evaluation specification must not be null"); + indices(indices); + } + + RankEvalRequest(StreamInput in) throws IOException { + super.readFrom(in); + rankingEvaluationSpec = new RankEvalSpec(in); + if (in.getVersion().onOrAfter(Version.V_6_3_0)) { + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + } else { + // readStringArray uses readVInt for size, we used readInt in 6.2 + int indicesSize = in.readInt(); + String[] indices = new String[indicesSize]; + for (int i = 0; i < indicesSize; i++) { + indices[i] = in.readString(); + } + // no indices options yet + } } RankEvalRequest() { @@ -72,7 +95,8 @@ public void setRankEvalSpec(RankEvalSpec task) { /** * Sets the indices the search will be executed on. */ - public RankEvalRequest setIndices(String... indices) { + @Override + public RankEvalRequest indices(String... indices) { Objects.requireNonNull(indices, "indices must not be null"); for (String index : indices) { Objects.requireNonNull(index, "index must not be null"); @@ -84,24 +108,23 @@ public RankEvalRequest setIndices(String... indices) { /** * @return the indices for this request */ - public String[] getIndices() { + @Override + public String[] indices() { return indices; } + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + public void indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = Objects.requireNonNull(indicesOptions, "indicesOptions must not be null"); + } + @Override public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - rankingEvaluationSpec = new RankEvalSpec(in); - if (in.getVersion().onOrAfter(Version.V_6_3_0)) { - indices = in.readStringArray(); - } else { - // readStringArray uses readVInt for size, we used readInt in 6.2 - int indicesSize = in.readInt(); - String[] indices = new String[indicesSize]; - for (int i = 0; i < indicesSize; i++) { - indices[i] = in.readString(); - } - } + throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); } @Override @@ -110,12 +133,33 @@ public void writeTo(StreamOutput out) throws IOException { rankingEvaluationSpec.writeTo(out); if (out.getVersion().onOrAfter(Version.V_6_3_0)) { out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); } else { // writeStringArray uses writeVInt for size, we used writeInt in 6.2 out.writeInt(indices.length); for (String index : indices) { out.writeString(index); } + // no indices options yet + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; } + RankEvalRequest that = (RankEvalRequest) o; + return Objects.equals(indicesOptions, that.indicesOptions) && + Arrays.equals(indices, that.indices) && + Objects.equals(rankingEvaluationSpec, that.rankingEvaluationSpec); + } + + @Override + public int hashCode() { + return Objects.hash(indicesOptions, Arrays.hashCode(indices), rankingEvaluationSpec); } } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RestRankEvalAction.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RestRankEvalAction.java index a596caf4f5c7b..34cf953ea50b7 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RestRankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RestRankEvalAction.java @@ -108,7 +108,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli } private static void parseRankEvalRequest(RankEvalRequest rankEvalRequest, RestRequest request, XContentParser parser) { - rankEvalRequest.setIndices(Strings.splitStringByCommaToArray(request.param("index"))); + rankEvalRequest.indices(Strings.splitStringByCommaToArray(request.param("index"))); RankEvalSpec spec = RankEvalSpec.parse(parser); rankEvalRequest.setRankEvalSpec(spec); } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java index a4ce4c7ee92e7..d24a779fd61ce 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java @@ -75,8 +75,8 @@ public class TransportRankEvalAction extends HandledTransportAction { + + private static RankEvalPlugin rankEvalPlugin = new RankEvalPlugin(); + + @AfterClass + public static void releasePluginResources() throws IOException { + rankEvalPlugin.close(); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return new NamedXContentRegistry(rankEvalPlugin.getNamedXContent()); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(rankEvalPlugin.getNamedWriteables()); + } + + @Override + protected RankEvalRequest createTestInstance() { + int numberOfIndices = randomInt(3); + String[] indices = new String[numberOfIndices]; + for (int i=0; i < numberOfIndices; i++) { + indices[i] = randomAlphaOfLengthBetween(5, 10); + } + RankEvalRequest rankEvalRequest = new RankEvalRequest(RankEvalSpecTests.createTestItem(), indices); + IndicesOptions indicesOptions = IndicesOptions.fromOptions( + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); + rankEvalRequest.indicesOptions(indicesOptions); + return rankEvalRequest; + } + + @Override + protected Reader instanceReader() { + return RankEvalRequest::new; + } + + @Override + protected RankEvalRequest mutateInstance(RankEvalRequest instance) throws IOException { + RankEvalRequest mutation = copyInstance(instance); + List mutators = new ArrayList<>(); + mutators.add(() -> mutation.indices(ArrayUtils.concat(instance.indices(), new String[] { randomAlphaOfLength(10) }))); + mutators.add(() -> mutation.indicesOptions(randomValueOtherThan(instance.indicesOptions(), + () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())))); + mutators.add(() -> mutation.setRankEvalSpec(RankEvalSpecTests.mutateTestItem(instance.getRankEvalSpec()))); + randomFrom(mutators).run(); + return mutation; + } +} diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java index 26611679f3494..94338e570a5d2 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalSpecTests.java @@ -70,7 +70,7 @@ private static List randomList(Supplier randomSupplier) { return result; } - private static RankEvalSpec createTestItem() throws IOException { + static RankEvalSpec createTestItem() { Supplier metric = randomFrom(Arrays.asList( () -> PrecisionAtKTests.createTestItem(), () -> MeanReciprocalRankTests.createTestItem(), @@ -87,6 +87,9 @@ private static RankEvalSpec createTestItem() throws IOException { builder.field("field", randomAlphaOfLengthBetween(1, 5)); builder.endObject(); script = Strings.toString(builder); + } catch (IOException e) { + // this shouldn't happen in tests, re-throw just not to swallow it + throw new RuntimeException(e); } templates = new HashSet<>(); @@ -156,7 +159,7 @@ public void testEqualsAndHash() throws IOException { checkEqualsAndHashCode(createTestItem(), RankEvalSpecTests::copy, RankEvalSpecTests::mutateTestItem); } - private static RankEvalSpec mutateTestItem(RankEvalSpec original) { + static RankEvalSpec mutateTestItem(RankEvalSpec original) { List ratedRequests = new ArrayList<>(original.getRatedRequests()); EvaluationMetric metric = original.getMetric(); Map templates = new HashMap<>(original.getTemplates()); diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 5e17769398c49..49d7be69d4a8f 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -430,6 +430,20 @@ public void testShrinkAfterUpgrade() throws IOException { if (runningAgainstOldCluster) { XContentBuilder mappingsAndSettings = jsonBuilder(); mappingsAndSettings.startObject(); + // single type was added in 5.5.0 (see #24317) + if (oldClusterVersion.onOrAfter(Version.V_5_5_0) && + oldClusterVersion.before(Version.V_6_0_0_beta1) && + randomBoolean()) { + { + // test that mapping.single_type is correctly propagated on the shrinked index, + // if not, search will fail. + mappingsAndSettings.startObject("settings"); + mappingsAndSettings.startObject("mapping"); + mappingsAndSettings.field("single_type", true); + mappingsAndSettings.endObject(); + mappingsAndSettings.endObject(); + } + } { mappingsAndSettings.startObject("mappings"); mappingsAndSettings.startObject("doc"); diff --git a/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeMultipleTemplatesIT.java b/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeMultipleTemplatesIT.java index 178d429ca9ffd..50860ddd87b21 100644 --- a/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeMultipleTemplatesIT.java +++ b/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeMultipleTemplatesIT.java @@ -102,7 +102,7 @@ public void testPrecisionAtRequest() throws IOException { RankEvalRequestBuilder builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, new RankEvalRequest()); builder.setRankEvalSpec(task); - RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request().setIndices("test")).actionGet(); + RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request().indices("test")).actionGet(); assertEquals(0.9, response.getEvaluationResult(), Double.MIN_VALUE); } diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 1d632ae967dff..d23847c700de4 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -25,6 +25,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.monitor.jvm.JvmInfo; import java.io.IOException; @@ -34,7 +36,7 @@ import java.util.Collections; import java.util.List; -public class Version implements Comparable { +public class Version implements Comparable, ToXContentFragment { /* * The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is alpha/beta/rc indicator AA * values below 25 are for alpha builder (since 5.0), and above 25 and below 50 are beta builds, and below 99 are RC builds, with 99 @@ -145,8 +147,6 @@ public class Version implements Comparable { public static final Version V_6_1_3 = new Version(V_6_1_3_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); public static final int V_6_1_4_ID = 6010499; public static final Version V_6_1_4 = new Version(V_6_1_4_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); - public static final int V_6_1_5_ID = 6010599; - public static final Version V_6_1_5 = new Version(V_6_1_5_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); public static final int V_6_2_0_ID = 6020099; public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); public static final int V_6_2_1_ID = 6020199; @@ -184,8 +184,6 @@ public static Version fromId(int id) { return V_6_2_1; case V_6_2_0_ID: return V_6_2_0; - case V_6_1_5_ID: - return V_6_1_5; case V_6_1_4_ID: return V_6_1_4; case V_6_1_3_ID: @@ -413,6 +411,11 @@ public int compareTo(Version other) { return Integer.compare(this.id, other.id); } + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(toString()); + } + /* * We need the declared versions when computing the minimum compatibility version. As computing the declared versions uses reflection it * is not cheap. Since computing the minimum compatibility version can occur often, we use this holder to compute the declared versions diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 51abf6b0222e1..60ba0a43396e4 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -275,6 +275,7 @@ import org.elasticsearch.rest.action.admin.indices.RestSyncedFlushAction; import org.elasticsearch.rest.action.admin.indices.RestUpdateSettingsAction; import org.elasticsearch.rest.action.admin.indices.RestUpgradeAction; +import org.elasticsearch.rest.action.admin.indices.RestUpgradeStatusAction; import org.elasticsearch.rest.action.admin.indices.RestValidateQueryAction; import org.elasticsearch.rest.action.cat.AbstractCatAction; import org.elasticsearch.rest.action.cat.RestAliasAction; @@ -592,6 +593,7 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestSyncedFlushAction(settings, restController)); registerHandler.accept(new RestForceMergeAction(settings, restController)); registerHandler.accept(new RestUpgradeAction(settings, restController)); + registerHandler.accept(new RestUpgradeStatusAction(settings, restController)); registerHandler.accept(new RestClearIndicesCacheAction(settings, restController)); registerHandler.accept(new RestIndexAction(settings, restController)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index 598b1a526779e..bd5912b9853ec 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.cluster.health; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActiveShardCount; @@ -104,7 +103,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); listener.onFailure(e); } }); @@ -132,7 +131,7 @@ public void onNoLongerMaster(String source) { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); listener.onFailure(e); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java index 6e4d628ea5fc3..108ce586573d7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -112,7 +111,7 @@ public void onAckTimeout() { @Override public void onFailure(String source, Exception e) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to perform [{}]", source), e); + logger.debug(() -> new ParameterizedMessage("failed to perform [{}]", source), e); super.onFailure(source, e); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index edc30bd3c35fd..4cf74fbf865cc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.cluster.settings; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; @@ -160,7 +159,7 @@ public void onNoLongerMaster(String source) { @Override public void onFailure(String source, Exception e) { //if the reroute fails we only log - logger.debug((Supplier) () -> new ParameterizedMessage("failed to perform [{}]", source), e); + logger.debug(() -> new ParameterizedMessage("failed to perform [{}]", source), e); listener.onFailure(new ElasticsearchException("reroute after update settings failed", e)); } @@ -174,7 +173,7 @@ public ClusterState execute(final ClusterState currentState) { @Override public void onFailure(String source, Exception e) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to perform [{}]", source), e); + logger.debug(() -> new ParameterizedMessage("failed to perform [{}]", source), e); super.onFailure(source, e); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index 362f54b74ab36..0bd6370e88a57 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.close; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; @@ -114,7 +113,7 @@ public void onResponse(ClusterStateUpdateResponse response) { @Override public void onFailure(Exception t) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to close indices [{}]", (Object) concreteIndices), t); + logger.debug(() -> new ParameterizedMessage("failed to close indices [{}]", (Object) concreteIndices), t); listener.onFailure(t); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java index f5c63bd470d40..a2e102e0689c5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.delete; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; @@ -102,7 +101,7 @@ public void onResponse(ClusterStateUpdateResponse response) { @Override public void onFailure(Exception t) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to delete indices [{}]", concreteIndices), t); + logger.debug(() -> new ParameterizedMessage("failed to delete indices [{}]", concreteIndices), t); listener.onFailure(t); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java index f77bb5d6a57de..6ebbbbd34cd5b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java @@ -21,7 +21,10 @@ import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; +import java.util.Arrays; import java.util.List; /** @@ -29,10 +32,25 @@ */ public class ForceMergeResponse extends BroadcastResponse { + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("force_merge", + true, arg -> { + BroadcastResponse response = (BroadcastResponse) arg[0]; + return new ForceMergeResponse(response.getTotalShards(), response.getSuccessfulShards(), response.getFailedShards(), + Arrays.asList(response.getShardFailures())); + }); + + static { + declareBroadcastFields(PARSER); + } + ForceMergeResponse() { } ForceMergeResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); } + + public static ForceMergeResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index d9ebf88fda6d7..f69670adf166d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.mapping.put; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -94,12 +93,12 @@ public void onResponse(ClusterStateUpdateResponse response) { @Override public void onFailure(Exception t) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", concreteIndices, request.type()), t); + logger.debug(() -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", concreteIndices, request.type()), t); listener.onFailure(t); } }); } catch (IndexNotFoundException ex) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", request.indices(), request.type()), ex); + logger.debug(() -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", request.indices(), request.type()), ex); throw ex; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java index 795e11c228839..1e89244b67644 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.open; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; @@ -99,7 +98,7 @@ public void onResponse(OpenIndexClusterStateUpdateResponse response) { @Override public void onFailure(Exception t) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to open indices [{}]", (Object) concreteIndices), t); + logger.debug(() -> new ParameterizedMessage("failed to open indices [{}]", (Object) concreteIndices), t); listener.onFailure(t); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java index 1a9c86049f8c6..7c51edc4d957e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.indices.recovery.RecoveryState; @@ -37,9 +36,8 @@ /** * Information regarding the recovery state of indices and their associated shards. */ -public class RecoveryResponse extends BroadcastResponse implements ToXContentFragment { +public class RecoveryResponse extends BroadcastResponse { - private boolean detailed = false; private Map> shardRecoveryStates = new HashMap<>(); public RecoveryResponse() { } @@ -51,36 +49,26 @@ public RecoveryResponse() { } * @param totalShards Total count of shards seen * @param successfulShards Count of shards successfully processed * @param failedShards Count of shards which failed to process - * @param detailed Display detailed metrics * @param shardRecoveryStates Map of indices to shard recovery information * @param shardFailures List of failures processing shards */ - public RecoveryResponse(int totalShards, int successfulShards, int failedShards, boolean detailed, - Map> shardRecoveryStates, + public RecoveryResponse(int totalShards, int successfulShards, int failedShards, Map> shardRecoveryStates, List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); this.shardRecoveryStates = shardRecoveryStates; - this.detailed = detailed; } public boolean hasRecoveries() { return shardRecoveryStates.size() > 0; } - public boolean detailed() { - return detailed; - } - - public void detailed(boolean detailed) { - this.detailed = detailed; - } - public Map> shardRecoveryStates() { return shardRecoveryStates; } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); if (hasRecoveries()) { for (String index : shardRecoveryStates.keySet()) { List recoveryStates = shardRecoveryStates.get(index); @@ -98,6 +86,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); } } + builder.endObject(); return builder; } @@ -133,4 +122,4 @@ public void readFrom(StreamInput in) throws IOException { public String toString() { return Strings.toString(this, true, true); } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java index 0e11aed9d24fd..c67f5040cdd66 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java @@ -87,7 +87,7 @@ protected RecoveryResponse newResponse(RecoveryRequest request, int totalShards, shardResponses.get(indexName).add(recoveryState); } } - return new RecoveryResponse(totalShards, successfulShards, failedShards, request.detailed(), shardResponses, shardFailures); + return new RecoveryResponse(totalShards, successfulShards, failedShards, shardResponses, shardFailures); } @Override @@ -118,4 +118,4 @@ protected ClusterBlockException checkGlobalBlock(ClusterState state, RecoveryReq protected ClusterBlockException checkRequestBlock(ClusterState state, RecoveryRequest request, String[] concreteIndices) { return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices); } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java index e8e2f5376cd24..7faf24329dadd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentResponse.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.engine.Segment; @@ -43,7 +42,7 @@ import java.util.Map; import java.util.Set; -public class IndicesSegmentResponse extends BroadcastResponse implements ToXContentFragment { +public class IndicesSegmentResponse extends BroadcastResponse { private ShardSegments[] shards; @@ -103,7 +102,7 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.INDICES); for (IndexSegments indexSegments : getIndices().values()) { @@ -173,10 +172,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endObject(); - return builder; } - static void toXContent(XContentBuilder builder, Sort sort) throws IOException { + private static void toXContent(XContentBuilder builder, Sort sort) throws IOException { builder.startArray("sort"); for (SortField field : sort.getSort()) { builder.startObject(); @@ -195,7 +193,7 @@ static void toXContent(XContentBuilder builder, Sort sort) throws IOException { builder.endArray(); } - static void toXContent(XContentBuilder builder, Accountable tree) throws IOException { + private static void toXContent(XContentBuilder builder, Accountable tree) throws IOException { builder.startObject(); builder.field(Fields.DESCRIPTION, tree.toString()); builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(tree.ramBytesUsed())); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index d20957c4bd29b..83eca83310339 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.settings.put; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -94,7 +93,7 @@ public void onResponse(ClusterStateUpdateResponse response) { @Override public void onFailure(Exception t) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to update settings on indices [{}]", (Object) concreteIndices), t); + logger.debug(() -> new ParameterizedMessage("failed to update settings on indices [{}]", (Object) concreteIndices), t); listener.onFailure(t); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java index 46aef007e6bab..7406dc4f2d12c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java @@ -25,9 +25,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import java.io.IOException; import java.util.ArrayList; @@ -39,7 +37,7 @@ import static java.util.Collections.unmodifiableMap; -public class IndicesStatsResponse extends BroadcastResponse implements ToXContentFragment { +public class IndicesStatsResponse extends BroadcastResponse { private ShardStats[] shards; @@ -147,7 +145,7 @@ public void writeTo(StreamOutput out) throws IOException { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException { final String level = params.param("level", "indices"); final boolean isLevelValid = "cluster".equalsIgnoreCase(level) || "indices".equalsIgnoreCase(level) || "shards".equalsIgnoreCase(level); @@ -155,7 +153,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws throw new IllegalArgumentException("level parameter must be one of [cluster] or [indices] or [shards] but was [" + level + "]"); } - builder.startObject("_all"); builder.startObject("primaries"); @@ -198,8 +195,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endObject(); } - - return builder; } static final class Fields { @@ -209,14 +204,6 @@ static final class Fields { @Override public String toString() { - try { - XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); - builder.startObject(); - toXContent(builder, EMPTY_PARAMS); - builder.endObject(); - return Strings.toString(builder); - } catch (IOException e) { - return "{ \"error\" : \"" + e.getMessage() + "\"}"; - } + return Strings.toString(this, true, false); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java index ad9f73b55b0cb..db5ddd326d736 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.template.delete; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -75,7 +74,7 @@ public void onResponse(MetaDataIndexTemplateService.RemoveResponse response) { @Override public void onFailure(Exception e) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to delete templates [{}]", request.name()), e); + logger.debug(() -> new ParameterizedMessage("failed to delete templates [{}]", request.name()), e); listener.onFailure(e); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index 1624c7950e7f2..7b46dc602d0ce 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.template.put; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -97,7 +96,7 @@ public void onResponse(MetaDataIndexTemplateService.PutResponse response) { @Override public void onFailure(Exception e) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to put template [{}]", request.name()), e); + logger.debug(() -> new ParameterizedMessage("failed to put template [{}]", request.name()), e); listener.onFailure(e); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java index 71110f18b875c..a45b8feda89ce 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; @@ -34,7 +33,7 @@ import java.util.Map; import java.util.Set; -public class UpgradeStatusResponse extends BroadcastResponse implements ToXContentFragment { +public class UpgradeStatusResponse extends BroadcastResponse { private ShardUpgradeStatus[] shards; private Map indicesUpgradeStatus; @@ -116,6 +115,7 @@ public long getToUpgradeBytesAncient() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, getTotalBytes()); builder.byteSizeField(Fields.SIZE_TO_UPGRADE_IN_BYTES, Fields.SIZE_TO_UPGRADE, getToUpgradeBytes()); builder.byteSizeField(Fields.SIZE_TO_UPGRADE_ANCIENT_IN_BYTES, Fields.SIZE_TO_UPGRADE_ANCIENT, getToUpgradeBytesAncient()); @@ -161,6 +161,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endObject(); } + builder.endObject(); return builder; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java index 02d58a9db7ece..2e428e85efc23 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.upgrade.post; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -78,7 +77,7 @@ public void onResponse(ClusterStateUpdateResponse response) { @Override public void onFailure(Exception t) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to upgrade minimum compatibility version settings on indices [{}]", request.versions().keySet()), t); + logger.debug(() -> new ParameterizedMessage("failed to upgrade minimum compatibility version settings on indices [{}]", request.versions().keySet()), t); listener.onFailure(t); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java index db49921d43532..4a760e273a0fa 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeResponse.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.HashMap; @@ -74,6 +75,18 @@ public void writeTo(StreamOutput out) throws IOException { } } + @Override + protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException { + builder.startObject("upgraded_indices"); + for (Map.Entry> entry : versions.entrySet()) { + builder.startObject(entry.getKey()); + builder.field("upgrade_version", entry.getValue().v1()); + builder.field("oldest_lucene_segment_version", entry.getValue().v2()); + builder.endObject(); + } + builder.endObject(); + } + /** * Returns the highest upgrade version of the node that performed metadata upgrade and the * the version of the oldest lucene segment for each index that was upgraded. diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java index eff37ff4b0cb4..5bb11dd56e00b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.ArrayList; @@ -38,8 +39,15 @@ */ public class ValidateQueryResponse extends BroadcastResponse { + public static final String INDEX_FIELD = "index"; + public static final String SHARD_FIELD = "shard"; + public static final String VALID_FIELD = "valid"; + public static final String EXPLANATIONS_FIELD = "explanations"; + public static final String ERROR_FIELD = "error"; + public static final String EXPLANATION_FIELD = "explanation"; + private boolean valid; - + private List queryExplanations; ValidateQueryResponse() { @@ -96,4 +104,30 @@ public void writeTo(StreamOutput out) throws IOException { } } + + @Override + protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException { + builder.field(VALID_FIELD, isValid()); + if (getQueryExplanation() != null && !getQueryExplanation().isEmpty()) { + builder.startArray(EXPLANATIONS_FIELD); + for (QueryExplanation explanation : getQueryExplanation()) { + builder.startObject(); + if (explanation.getIndex() != null) { + builder.field(INDEX_FIELD, explanation.getIndex()); + } + if(explanation.getShard() >= 0) { + builder.field(SHARD_FIELD, explanation.getShard()); + } + builder.field(VALID_FIELD, explanation.isValid()); + if (explanation.getError() != null) { + builder.field(ERROR_FIELD, explanation.getError()); + } + if (explanation.getExplanation() != null) { + builder.field(EXPLANATION_FIELD, explanation.getExplanation()); + } + builder.endObject(); + } + builder.endArray(); + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java index 423648bbb7105..adb1d32161fe1 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; @@ -89,10 +88,10 @@ public void onFailure(Exception e) { } } catch (InterruptedException e) { Thread.currentThread().interrupt(); - logger.info((Supplier) () -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e); + logger.info(() -> new ParameterizedMessage("Bulk request {} has been cancelled.", executionId), e); listener.afterBulk(executionId, bulkRequest, e); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e); + logger.warn(() -> new ParameterizedMessage("Failed to execute bulk request {}.", executionId), e); listener.afterBulk(executionId, bulkRequest, e); } finally { if (bulkRequestSetupSuccessful == false) { // if we fail on client.bulk() release the semaphore diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index f756c629b9832..5a3544377155c 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.bulk; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.SparseFixedBitSet; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; @@ -494,7 +493,7 @@ void processBulkIndexIngestRequest(Task task, BulkRequest original, ActionListen long ingestStartTimeInNanos = System.nanoTime(); BulkRequestModifier bulkRequestModifier = new BulkRequestModifier(original); ingestService.getPipelineExecutionService().executeBulkRequest(() -> bulkRequestModifier, (indexRequest, exception) -> { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}/{}]", + logger.debug(() -> new ParameterizedMessage("failed to execute pipeline [{}] for document [{}/{}/{}]", indexRequest.getPipeline(), indexRequest.index(), indexRequest.type(), indexRequest.id()), exception); bulkRequestModifier.markCurrentItemAsFailed(exception); }, (exception) -> { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index e66df2b0d9267..7221118d2ef50 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; @@ -197,10 +196,10 @@ static BulkItemResponse createPrimaryResponse(BulkItemResultHolder bulkItemResul DocWriteRequest docWriteRequest = replicaRequest.request(); Exception failure = operationResult.getFailure(); if (isConflictException(failure)) { - logger.trace((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", + logger.trace(() -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", request.shardId(), docWriteRequest.opType().getLowercase(), request), failure); } else { - logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", + logger.debug(() -> new ParameterizedMessage("{} failed to execute bulk item ({}) {}", request.shardId(), docWriteRequest.opType().getLowercase(), request), failure); } diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java index 8353c5dc389d9..d15b7b92d62aa 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.get; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.TransportActions; @@ -95,7 +94,7 @@ protected MultiGetShardResponse shardOperation(MultiGetShardRequest request, Sha if (TransportActions.isShardNotAvailableException(e)) { throw (ElasticsearchException) e; } else { - logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId, + logger.debug(() -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId, item.type(), item.id()), e); response.add(request.locations.get(i), new MultiGetResponse.Failure(request.index(), item.type(), item.id(), e)); } diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index d6d7cea7704fc..aad2638bd9de3 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; @@ -125,10 +124,7 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha final ShardOperationFailedException[] shardSearchFailures = ExceptionsHelper.groupBy(buildShardFailures()); Throwable cause = shardSearchFailures.length == 0 ? null : ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0]; - if (logger.isDebugEnabled()) { - logger.debug((Supplier) () -> new ParameterizedMessage("All shards failed for phase: [{}]", getName()), - cause); - } + logger.debug(() -> new ParameterizedMessage("All shards failed for phase: [{}]", getName()), cause); onPhaseFailure(currentPhase, "all shards failed", cause); } else { Boolean allowPartialResults = request.allowPartialSearchResults(); @@ -138,9 +134,8 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha final ShardOperationFailedException[] shardSearchFailures = ExceptionsHelper.groupBy(buildShardFailures()); Throwable cause = shardSearchFailures.length == 0 ? null : ElasticsearchException.guessRootCauses(shardSearchFailures[0].getCause())[0]; - logger.debug((Supplier) () -> new ParameterizedMessage("{} shards failed for phase: [{}]", - shardSearchFailures.length, getName()), - cause); + logger.debug(() -> new ParameterizedMessage("{} shards failed for phase: [{}]", + shardSearchFailures.length, getName()), cause); } onPhaseFailure(currentPhase, "Partial shards failure", null); } else { @@ -160,10 +155,7 @@ private void executePhase(SearchPhase phase) { phase.run(); } catch (Exception e) { if (logger.isDebugEnabled()) { - logger.debug( - (Supplier) () -> new ParameterizedMessage( - "Failed to execute [{}] while moving to [{}] phase", request, phase.getName()), - e); + logger.debug(new ParameterizedMessage("Failed to execute [{}] while moving to [{}] phase", request, phase.getName()), e); } onPhaseFailure(phase, "", e); } diff --git a/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java b/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java index ac708d9b6b0c7..9b98691dc9005 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java +++ b/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -133,7 +132,7 @@ private void onFreedContext(boolean freed) { } private void onFailedFreedContext(Throwable e, DiscoveryNode node) { - logger.warn((Supplier) () -> new ParameterizedMessage("Clear SC failed on node[{}]", node), e); + logger.warn(() -> new ParameterizedMessage("Clear SC failed on node[{}]", node), e); if (expectedOps.countDown()) { listener.onResponse(new ClearScrollResponse(false, freedSearchContexts.get())); } else { diff --git a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index db0425db7c320..1d8d702520e4c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.search; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; @@ -87,10 +86,8 @@ protected void innerOnResponse(QuerySearchResult response) { @Override public void onFailure(Exception exception) { try { - if (context.getLogger().isDebugEnabled()) { - context.getLogger().debug((Supplier) () -> new ParameterizedMessage("[{}] Failed to execute query phase", - querySearchRequest.id()), exception); - } + context.getLogger().debug(() -> new ParameterizedMessage("[{}] Failed to execute query phase", + querySearchRequest.id()), exception); counter.onFailure(shardIndex, searchShardTarget, exception); } finally { // the query might not have been executed at all (for example because thread pool rejected diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index 4712496bc37ec..920353abcf808 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -21,7 +21,6 @@ import com.carrotsearch.hppc.IntArrayList; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.OriginalIndices; @@ -169,10 +168,7 @@ public void innerOnResponse(FetchSearchResult result) { @Override public void onFailure(Exception e) { try { - if (logger.isDebugEnabled()) { - logger.debug((Supplier) () -> new ParameterizedMessage("[{}] Failed to execute fetch phase", - fetchSearchRequest.id()), e); - } + logger.debug(() -> new ParameterizedMessage("[{}] Failed to execute fetch phase", fetchSearchRequest.id()), e); counter.onFailure(shardIndex, shardTarget, e); } finally { // the search context might not be cleared on the node where the fetch was executed for example diff --git a/server/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java index 01f31d4c7439f..559c7ca102e6b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/InitialSearchPhase.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.routing.GroupShardsIterator; @@ -93,15 +92,10 @@ private void onShardFailure(final int shardIndex, @Nullable ShardRouting shard, if (totalOps.incrementAndGet() == expectedTotalOps) { if (logger.isDebugEnabled()) { if (e != null && !TransportActions.isShardNotAvailableException(e)) { - logger.debug( - (Supplier) () -> new ParameterizedMessage( - "{}: Failed to execute [{}]", - shard != null ? shard.shortSummary() : - shardIt.shardId(), - request), - e); + logger.debug(new ParameterizedMessage( + "{}: Failed to execute [{}]", shard != null ? shard.shortSummary() : shardIt.shardId(), request), e); } else if (logger.isTraceEnabled()) { - logger.trace((Supplier) () -> new ParameterizedMessage("{}: Failed to execute [{}]", shard, request), e); + logger.trace(new ParameterizedMessage("{}: Failed to execute [{}]", shard, request), e); } } onPhaseDone(); @@ -109,13 +103,9 @@ private void onShardFailure(final int shardIndex, @Nullable ShardRouting shard, final ShardRouting nextShard = shardIt.nextOrNull(); final boolean lastShard = nextShard == null; // trace log this exception - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "{}: Failed to execute [{}] lastShard [{}]", - shard != null ? shard.shortSummary() : shardIt.shardId(), - request, - lastShard), - e); + logger.trace(() -> new ParameterizedMessage( + "{}: Failed to execute [{}] lastShard [{}]", + shard != null ? shard.shortSummary() : shardIt.shardId(), request, lastShard), e); if (!lastShard) { performPhaseOnShard(shardIndex, shardIt, nextShard); } else { @@ -123,14 +113,9 @@ private void onShardFailure(final int shardIndex, @Nullable ShardRouting shard, // no more shards active, add a failure if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception if (e != null && !TransportActions.isShardNotAvailableException(e)) { - logger.debug( - (Supplier) () -> new ParameterizedMessage( - "{}: Failed to execute [{}] lastShard [{}]", - shard != null ? shard.shortSummary() : - shardIt.shardId(), - request, - lastShard), - e); + logger.debug(new ParameterizedMessage( + "{}: Failed to execute [{}] lastShard [{}]", + shard != null ? shard.shortSummary() : shardIt.shardId(), request, lastShard), e); } } } diff --git a/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java b/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java index 10719fcb91c6a..c584db106992c 100644 --- a/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java @@ -91,13 +91,8 @@ public void onFailure(Exception e) { try { channel.sendResponse(e); } catch (Exception e1) { - logger.warn( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage( - "Failed to send error response for action [{}] and request [{}]", - actionName, - request), - e1); + logger.warn(() -> new ParameterizedMessage( + "Failed to send error response for action [{}] and request [{}]", actionName, request), e1); } } }); diff --git a/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java b/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java index 759693e550e1e..dfcf6445abf7d 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClient; @@ -120,8 +119,7 @@ protected void doRun() throws Exception { @Override public void onFailure(Exception e) { - logger.warn( - (Supplier) () -> new ParameterizedMessage("failed to execute failure callback on [{}]", listener), e); + logger.warn(() -> new ParameterizedMessage("failed to execute failure callback on [{}]", listener), e); } }); } diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java index ce812644faea6..47bc50be330b6 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java @@ -25,7 +25,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestActions; @@ -40,7 +40,7 @@ /** * Base class for all broadcast operation based responses. */ -public class BroadcastResponse extends ActionResponse implements ToXContentFragment { +public class BroadcastResponse extends ActionResponse implements ToXContentObject { public static final DefaultShardOperationFailedException[] EMPTY = new DefaultShardOperationFailedException[0]; @@ -149,7 +149,16 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); RestActions.buildBroadcastShardsHeader(builder, params, this); + addCustomXContentFields(builder, params); + builder.endObject(); return builder; } + + /** + * Override in subclass to add custom fields following the common `_shards` field + */ + protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException { + } } diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java index 53764f4ee88d6..0961ab74c4703 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java @@ -222,13 +222,8 @@ void onOperation(@Nullable ShardRouting shard, final ShardIterator shardIt, int if (e != null) { if (logger.isTraceEnabled()) { if (!TransportActions.isShardNotAvailableException(e)) { - logger.trace( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage( - "{}: failed to execute [{}]", - shard != null ? shard.shortSummary() : shardIt.shardId(), - request), - e); + logger.trace(new ParameterizedMessage( + "{}: failed to execute [{}]", shard != null ? shard.shortSummary() : shardIt.shardId(), request), e); } } } @@ -237,13 +232,8 @@ void onOperation(@Nullable ShardRouting shard, final ShardIterator shardIt, int if (logger.isDebugEnabled()) { if (e != null) { if (!TransportActions.isShardNotAvailableException(e)) { - logger.debug( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage( - "{}: failed to execute [{}]", - shard != null ? shard.shortSummary() : shardIt.shardId(), - request), - e); + logger.debug(new ParameterizedMessage( + "{}: failed to execute [{}]", shard != null ? shard.shortSummary() : shardIt.shardId(), request), e); } } } diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index b6eaa5163c865..ff4e73acc1877 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -362,9 +362,7 @@ protected void onNodeResponse(DiscoveryNode node, int nodeIndex, NodeResponse re protected void onNodeFailure(DiscoveryNode node, int nodeIndex, Throwable t) { String nodeId = node.getId(); if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) { - logger.debug( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("failed to execute [{}] on node [{}]", actionName, nodeId), t); + logger.debug(new ParameterizedMessage("failed to execute [{}] on node [{}]", actionName, nodeId), t); } // this is defensive to protect against the possibility of double invocation @@ -441,23 +439,13 @@ private void onShardOperation(final NodeRequest request, final Object[] shardRes shardResults[shardIndex] = failure; if (TransportActions.isShardNotAvailableException(e)) { if (logger.isTraceEnabled()) { - logger.trace( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage( - "[{}] failed to execute operation for shard [{}]", - actionName, - shardRouting.shortSummary()), - e); + logger.trace(new ParameterizedMessage( + "[{}] failed to execute operation for shard [{}]", actionName, shardRouting.shortSummary()), e); } } else { if (logger.isDebugEnabled()) { - logger.debug( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage( - "[{}] failed to execute operation for shard [{}]", - actionName, - shardRouting.shortSummary()), - e); + logger.debug(new ParameterizedMessage( + "[{}] failed to execute operation for shard [{}]", actionName, shardRouting.shortSummary()), e); } } } diff --git a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java index 3f72f41345a3d..4f24a523f762d 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java @@ -154,7 +154,7 @@ public void onResponse(Response response) { public void onFailure(Exception t) { if (t instanceof Discovery.FailedToCommitClusterStateException || (t instanceof NotMasterException)) { - logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", actionName), t); + logger.debug(() -> new ParameterizedMessage("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", actionName), t); retry(t, masterChangePredicate); } else { listener.onFailure(t); @@ -209,7 +209,7 @@ public void onClusterServiceClose() { @Override public void onTimeout(TimeValue timeout) { - logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("timed out while retrying [{}] after failure (timeout [{}])", actionName, timeout), failure); + logger.debug(() -> new ParameterizedMessage("timed out while retrying [{}] after failure (timeout [{}])", actionName, timeout), failure); listener.onFailure(new MasterNotDiscoveredException(failure)); } }, statePredicate diff --git a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java index 4583e47bc1db7..0b61c7ed71247 100644 --- a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java @@ -232,9 +232,7 @@ private void onOperation(int idx, NodeResponse nodeResponse) { private void onFailure(int idx, String nodeId, Throwable t) { if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) { - logger.debug( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("failed to execute on node [{}]", nodeId), t); + logger.debug(new ParameterizedMessage("failed to execute on node [{}]", nodeId), t); } responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t)); if (counter.incrementAndGet() == responses.length()) { diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index c29ca5c1d0853..340496ca35363 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -178,7 +178,7 @@ public void onResponse(ReplicaResponse response) { @Override public void onFailure(Exception replicaException) { - logger.trace((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + logger.trace(() -> new ParameterizedMessage( "[{}] failure while performing [{}] on replica {}, request [{}]", shard.shardId(), opType, shard, replicaRequest), replicaException); // Only report "critical" exceptions - TODO: Reach out to the master node to get the latest shard state then report. diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 2cd5f7a5f13ac..aca8ed4973263 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -265,9 +265,7 @@ public void onFailure(Exception e) { channel.sendResponse(e); } catch (Exception inner) { inner.addSuppressed(e); - logger.warn( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("Failed to send response for {}", actionName), inner); + logger.warn(() -> new ParameterizedMessage("Failed to send response for {}", actionName), inner); } } }); @@ -579,7 +577,6 @@ public void onResponse(Releasable releasable) { public void onFailure(Exception e) { if (e instanceof RetryOnReplicaException) { logger.trace( - (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( "Retrying operation on replica, action [{}], request [{}]", transportReplicaAction, @@ -621,12 +618,8 @@ protected void responseWithFailure(Exception e) { channel.sendResponse(e); } catch (IOException responseException) { responseException.addSuppressed(e); - logger.warn( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage( - "failed to send error message back to client for action [{}]", - transportReplicaAction), - responseException); + logger.warn(() -> new ParameterizedMessage( + "failed to send error message back to client for action [{}]", transportReplicaAction), responseException); } } @@ -854,12 +847,9 @@ public void handleException(TransportException exp) { final Throwable cause = exp.unwrapCause(); if (cause instanceof ConnectTransportException || cause instanceof NodeClosedException || (isPrimaryAction && retryPrimaryException(cause))) { - logger.trace( - (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + logger.trace(() -> new ParameterizedMessage( "received an error from node [{}] for request [{}], scheduling a retry", - node.getId(), - requestToPerform), - exp); + node.getId(), requestToPerform), exp); retry(exp); } else { finishAsFailed(exp); @@ -903,9 +893,7 @@ public void onTimeout(TimeValue timeout) { void finishAsFailed(Exception failure) { if (finished.compareAndSet(false, true)) { setPhase(task, "failed"); - logger.trace( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("operation failed. action [{}], request [{}]", actionName, request), failure); + logger.trace(() -> new ParameterizedMessage("operation failed. action [{}], request [{}]", actionName, request), failure); listener.onFailure(failure); } else { assert false : "finishAsFailed called but operation is already finished"; @@ -913,13 +901,9 @@ void finishAsFailed(Exception failure) { } void finishWithUnexpectedFailure(Exception failure) { - logger.warn( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "unexpected error during the primary phase for action [{}], request [{}]", - actionName, - request), - failure); + actionName, request), failure); if (finished.compareAndSet(false, true)) { setPhase(task, "failed"); listener.onFailure(failure); diff --git a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java index 0b9069e9608f6..eae8ede4e1f38 100644 --- a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java @@ -188,10 +188,8 @@ public void handleException(TransportException exp) { } private void onFailure(ShardRouting shardRouting, Exception e) { - if (logger.isTraceEnabled() && e != null) { - logger.trace( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("{}: failed to execute [{}]", shardRouting, internalRequest.request()), e); + if (e != null) { + logger.trace(() -> new ParameterizedMessage("{}: failed to execute [{}]", shardRouting, internalRequest.request()), e); } perform(e); } @@ -208,11 +206,7 @@ private void perform(@Nullable final Exception currentFailure) { if (failure == null || isShardNotAvailableException(failure)) { failure = new NoShardAvailableActionException(null, LoggerMessageFormat.format("No shard available for [{}]", internalRequest.request()), failure); } else { - if (logger.isDebugEnabled()) { - logger.debug( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("{}: failed to execute [{}]", null, internalRequest.request()), failure); - } + logger.debug(() -> new ParameterizedMessage("{}: failed to execute [{}]", null, internalRequest.request()), failure); } listener.onFailure(failure); return; diff --git a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java index 35b2b41dfda6e..aad7d20073c3b 100644 --- a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java @@ -315,9 +315,7 @@ private void onOperation(int idx, NodeTasksResponse nodeResponse) { private void onFailure(int idx, String nodeId, Throwable t) { if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) { - logger.debug( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("failed to execute on node [{}]", nodeId), t); + logger.debug(new ParameterizedMessage("failed to execute on node [{}]", nodeId), t); } responses.set(idx, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t)); diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java index 8c1d06113d684..b83ac3881fda5 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.termvectors; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.TransportActions; @@ -89,7 +88,7 @@ protected MultiTermVectorsShardResponse shardOperation(MultiTermVectorsShardRequ if (TransportActions.isShardNotAvailableException(t)) { throw (ElasticsearchException) t; } else { - logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", shardId, termVectorsRequest.type(), termVectorsRequest.id()), t); + logger.debug(() -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", shardId, termVectorsRequest.type(), termVectorsRequest.id()), t); response.add(request.locations.get(i), new MultiTermVectorsResponse.Failure(request.index(), termVectorsRequest.type(), termVectorsRequest.id(), t)); } diff --git a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java index 5335b4be8b4e2..19fdb8837d69b 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.Constants; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; @@ -428,15 +427,11 @@ long getMaxMapCount(Logger logger) { try { return parseProcSysVmMaxMapCount(rawProcSysVmMaxMapCount); } catch (final NumberFormatException e) { - logger.warn( - (Supplier) () -> new ParameterizedMessage( - "unable to parse vm.max_map_count [{}]", - rawProcSysVmMaxMapCount), - e); + logger.warn(() -> new ParameterizedMessage("unable to parse vm.max_map_count [{}]", rawProcSysVmMaxMapCount), e); } } } catch (final IOException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("I/O exception while trying to read [{}]", path), e); + logger.warn(() -> new ParameterizedMessage("I/O exception while trying to read [{}]", path), e); } return -1; } diff --git a/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java b/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java index 6869a6abb710f..857ff65b6c2b8 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java @@ -71,15 +71,12 @@ static boolean isFatalUncaught(Throwable e) { void onFatalUncaught(final String threadName, final Throwable t) { final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get()); - logger.error( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("fatal error in thread [{}], exiting", threadName), t); + logger.error(() -> new ParameterizedMessage("fatal error in thread [{}], exiting", threadName), t); } void onNonFatalUncaught(final String threadName, final Throwable t) { final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get()); - logger.warn((org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("uncaught exception in thread [{}]", threadName), t); + logger.warn(() -> new ParameterizedMessage("uncaught exception in thread [{}]", threadName), t); } void halt(int status) { diff --git a/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java index 5d31e74bef621..109efb400bc93 100644 --- a/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java +++ b/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java @@ -373,7 +373,7 @@ protected List validateNewNodes(Set nodes) { transportService.connectToNode(node); } catch (Exception e) { it.remove(); - logger.debug((Supplier) () -> new ParameterizedMessage("failed to connect to discovered node [{}]", node), e); + logger.debug(() -> new ParameterizedMessage("failed to connect to discovered node [{}]", node), e); } } } @@ -428,13 +428,10 @@ public LivenessResponse newInstance() { nodeWithInfo.getAttributes(), nodeWithInfo.getRoles(), nodeWithInfo.getVersion())); } } catch (ConnectTransportException e) { - logger.debug( - (Supplier) - () -> new ParameterizedMessage("failed to connect to node [{}], ignoring...", listedNode), e); + logger.debug(() -> new ParameterizedMessage("failed to connect to node [{}], ignoring...", listedNode), e); hostFailureListener.onNodeDisconnected(listedNode, e); } catch (Exception e) { - logger.info( - (Supplier) () -> new ParameterizedMessage("failed to get node info for {}, disconnecting...", listedNode), e); + logger.info(() -> new ParameterizedMessage("failed to get node info for {}, disconnecting...", listedNode), e); } } @@ -481,12 +478,10 @@ void onDone() { public void onFailure(Exception e) { onDone(); if (e instanceof ConnectTransportException) { - logger.debug((Supplier) - () -> new ParameterizedMessage("failed to connect to node [{}], ignoring...", nodeToPing), e); + logger.debug(() -> new ParameterizedMessage("failed to connect to node [{}], ignoring...", nodeToPing), e); hostFailureListener.onNodeDisconnected(nodeToPing, e); } else { - logger.info( - (Supplier) () -> new ParameterizedMessage( + logger.info(() -> new ParameterizedMessage( "failed to get local cluster state info for {}, disconnecting...", nodeToPing), e); } } @@ -530,8 +525,7 @@ public void handleResponse(ClusterStateResponse response) { @Override public void handleException(TransportException e) { - logger.info( - (Supplier) () -> new ParameterizedMessage( + logger.info(() -> new ParameterizedMessage( "failed to get local cluster state for {}, disconnecting...", nodeToPing), e); try { hostFailureListener.onNodeDisconnected(nodeToPing, e); diff --git a/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java b/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java index aab75eb2aad7b..998cd5ba0a870 100644 --- a/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.component.AbstractLifecycleComponent; @@ -98,7 +97,7 @@ public void onFailure(Exception e) { // will try again after `cluster.nodes.reconnect_interval` on all nodes but the current master. // On the master, node fault detection will remove these nodes from the cluster as their are not // connected. Note that it is very rare that we end up here on the master. - logger.warn((Supplier) () -> new ParameterizedMessage("failed to connect to {}", node), e); + logger.warn(() -> new ParameterizedMessage("failed to connect to {}", node), e); } @Override @@ -137,7 +136,7 @@ public void disconnectFromNodesExcept(DiscoveryNodes nodesToKeep) { try { transportService.disconnectFromNode(node); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to disconnect to node [{}]", node), e); + logger.warn(() -> new ParameterizedMessage("failed to disconnect to node [{}]", node), e); } } } @@ -160,9 +159,7 @@ void validateAndConnectIfNeeded(DiscoveryNode node) { // log every 6th failure if ((nodeFailureCount % 6) == 1) { final int finalNodeFailureCount = nodeFailureCount; - logger.warn( - (Supplier) - () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "failed to connect to node {} (tried [{}] times)", node, finalNodeFailureCount), e); } nodes.put(node, nodeFailureCount); diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index f29841e3744a9..915e900b9ddf1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; @@ -205,7 +204,7 @@ private static class ShardFailedTransportHandler implements TransportRequestHand @Override public void messageReceived(FailedShardEntry request, TransportChannel channel) throws Exception { - logger.debug((Supplier) () -> new ParameterizedMessage("{} received shard failed for {}", request.shardId, request), request.failure); + logger.debug(() -> new ParameterizedMessage("{} received shard failed for {}", request.shardId, request), request.failure); clusterService.submitStateUpdateTask( "shard-failed", request, @@ -214,12 +213,12 @@ public void messageReceived(FailedShardEntry request, TransportChannel channel) new ClusterStateTaskListener() { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("{} unexpected failure while failing shard [{}]", request.shardId, request), e); + logger.error(() -> new ParameterizedMessage("{} unexpected failure while failing shard [{}]", request.shardId, request), e); try { channel.sendResponse(e); } catch (Exception channelException) { channelException.addSuppressed(e); - logger.warn((Supplier) () -> new ParameterizedMessage("{} failed to send failure [{}] while failing shard [{}]", request.shardId, e, request), channelException); + logger.warn(() -> new ParameterizedMessage("{} failed to send failure [{}] while failing shard [{}]", request.shardId, e, request), channelException); } } @@ -229,7 +228,7 @@ public void onNoLongerMaster(String source) { try { channel.sendResponse(new NotMasterException(source)); } catch (Exception channelException) { - logger.warn((Supplier) () -> new ParameterizedMessage("{} failed to send no longer master while failing shard [{}]", request.shardId, request), channelException); + logger.warn(() -> new ParameterizedMessage("{} failed to send no longer master while failing shard [{}]", request.shardId, request), channelException); } } @@ -238,7 +237,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS try { channel.sendResponse(TransportResponse.Empty.INSTANCE); } catch (Exception channelException) { - logger.warn((Supplier) () -> new ParameterizedMessage("{} failed to send response while failing shard [{}]", request.shardId, request), channelException); + logger.warn(() -> new ParameterizedMessage("{} failed to send response while failing shard [{}]", request.shardId, request), channelException); } } } @@ -323,7 +322,7 @@ public ClusterTasksResult execute(ClusterState currentState, L maybeUpdatedState = applyFailedShards(currentState, failedShardsToBeApplied, staleShardsToBeApplied); batchResultBuilder.successes(tasksToBeApplied); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to apply failed shards {}", failedShardsToBeApplied), e); + logger.warn(() -> new ParameterizedMessage("failed to apply failed shards {}", failedShardsToBeApplied), e); // failures are communicated back to the requester // cluster state will not be updated in this case batchResultBuilder.failures(tasksToBeApplied, e); @@ -501,7 +500,7 @@ public ClusterTasksResult execute(ClusterState currentState, maybeUpdatedState = allocationService.applyStartedShards(currentState, shardRoutingsToBeApplied); builder.successes(tasksToBeApplied); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to apply started shards {}", shardRoutingsToBeApplied), e); + logger.warn(() -> new ParameterizedMessage("failed to apply started shards {}", shardRoutingsToBeApplied), e); builder.failures(tasksToBeApplied, e); } @@ -510,7 +509,7 @@ public ClusterTasksResult execute(ClusterState currentState, @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java index 9e05d50831882..ee4779bc8c514 100644 --- a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java +++ b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster.block; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; + import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -30,6 +31,7 @@ import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -53,7 +55,7 @@ public class ClusterBlocks extends AbstractDiffable { private final ImmutableOpenMap> indicesBlocks; - private final ImmutableLevelHolder[] levelHolders; + private final EnumMap levelHolders; ClusterBlocks(Set global, ImmutableOpenMap> indicesBlocks) { this.global = global; @@ -70,20 +72,20 @@ public ImmutableOpenMap> indices() { } public Set global(ClusterBlockLevel level) { - return levelHolders[level.ordinal()].global(); + return levelHolders.get(level).global(); } public ImmutableOpenMap> indices(ClusterBlockLevel level) { - return levelHolders[level.ordinal()].indices(); + return levelHolders.get(level).indices(); } private Set blocksForIndex(ClusterBlockLevel level, String index) { return indices(level).getOrDefault(index, emptySet()); } - private static ImmutableLevelHolder[] generateLevelHolders(Set global, - ImmutableOpenMap> indicesBlocks) { - ImmutableLevelHolder[] levelHolders = new ImmutableLevelHolder[ClusterBlockLevel.values().length]; + private static EnumMap generateLevelHolders(Set global, + ImmutableOpenMap> indicesBlocks) { + EnumMap levelHolders = new EnumMap<>(ClusterBlockLevel.class); for (final ClusterBlockLevel level : ClusterBlockLevel.values()) { Predicate containsLevel = block -> block.contains(level); Set newGlobal = unmodifiableSet(global.stream() @@ -96,8 +98,7 @@ private static ImmutableLevelHolder[] generateLevelHolders(Set glo .filter(containsLevel) .collect(toSet()))); } - - levelHolders[level.ordinal()] = new ImmutableLevelHolder(newGlobal, indicesBuilder.build()); + levelHolders.put(level, new ImmutableLevelHolder(newGlobal, indicesBuilder.build())); } return levelHolders; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index d394c2c7d1479..4f5dade2219df 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -23,7 +23,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ResourceAlreadyExistsException; @@ -555,9 +554,9 @@ public ClusterState execute(ClusterState currentState) throws Exception { @Override public void onFailure(String source, Exception e) { if (e instanceof ResourceAlreadyExistsException) { - logger.trace((Supplier) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e); + logger.trace(() -> new ParameterizedMessage("[{}] failed to create", request.index()), e); } else { - logger.debug((Supplier) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e); + logger.debug(() -> new ParameterizedMessage("[{}] failed to create", request.index()), e); } super.onFailure(source, e); } @@ -695,9 +694,9 @@ static void prepareResizeIndexSettings(ClusterState currentState, Set ma } final Predicate sourceSettingsPredicate = (s) -> s.startsWith("index.similarity.") - || s.startsWith("index.analysis.") || s.startsWith("index.sort."); + || s.startsWith("index.analysis.") || s.startsWith("index.sort.") || s.equals("index.mapping.single_type"); indexSettingsBuilder - // now copy all similarity / analysis / sort settings - this overrides all settings from the user unless they + // now copy all similarity / analysis / sort / single_type settings - this overrides all settings from the user unless they // wanna add extra settings .put(IndexMetaData.SETTING_VERSION_CREATED, sourceMetaData.getCreationVersion()) .put(IndexMetaData.SETTING_VERSION_UPGRADED, sourceMetaData.getUpgradedVersion()) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index ea1dea06b6875..7230c44906d77 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.metadata; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.analysis.Analyzer; import org.elasticsearch.Version; import org.elasticsearch.common.component.AbstractComponent; @@ -207,7 +206,7 @@ IndexMetaData archiveBrokenIndexSettings(IndexMetaData indexMetaData) { final Settings upgrade = indexScopedSettings.archiveUnknownOrInvalidSettings( settings, e -> logger.warn("{} ignoring unknown index setting: [{}] with value [{}]; archiving", indexMetaData.getIndex(), e.getKey(), e.getValue()), - (e, ex) -> logger.warn((Supplier) () -> new ParameterizedMessage("{} ignoring invalid index setting: [{}] with value [{}]; archiving", indexMetaData.getIndex(), e.getKey(), e.getValue()), ex)); + (e, ex) -> logger.warn(() -> new ParameterizedMessage("{} ignoring invalid index setting: [{}] with value [{}]; archiving", indexMetaData.getIndex(), e.getKey(), e.getValue()), ex)); if (upgrade != settings) { return IndexMetaData.builder(indexMetaData).settings(upgrade).build(); } else { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 2255081522891..16695488d2626 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -191,7 +191,7 @@ private boolean refreshIndexMapping(IndexService indexService, IndexMetaData.Bui } } } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to refresh-mapping in cluster state", index), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to refresh-mapping in cluster state", index), e); } return dirty; } @@ -205,7 +205,7 @@ public void refreshMapping(final String index, final String indexUUID) { refreshTask, ClusterStateTaskConfig.build(Priority.HIGH), refreshExecutor, - (source, e) -> logger.warn((Supplier) () -> new ParameterizedMessage("failure during [{}]", source), e) + (source, e) -> logger.warn(() -> new ParameterizedMessage("failure during [{}]", source), e) ); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java index 1c3d629a72fea..0bcefa9fc7248 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -109,16 +108,16 @@ public void onFailure(String source, Exception e) { rerouting.set(false); ClusterState state = clusterService.state(); if (logger.isTraceEnabled()) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state), e); } else { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", source, state.version()), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", source, state.version()), e); } } }); } catch (Exception e) { rerouting.set(false); ClusterState state = clusterService.state(); - logger.warn((Supplier) () -> new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state), e); + logger.warn(() -> new ParameterizedMessage("failed to reroute routing table, current state:\n{}", state), e); } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java index ae79b779045f4..01fa5837387c8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java @@ -316,7 +316,7 @@ public void runOnApplierThread(final String source, Consumer clust } @Override - public void onNewClusterState(final String source, final java.util.function.Supplier clusterStateSupplier, + public void onNewClusterState(final String source, final Supplier clusterStateSupplier, final ClusterStateTaskListener listener) { Function applyFunction = currentState -> { ClusterState nextState = clusterStateSupplier.get(); @@ -401,7 +401,7 @@ protected void runTask(UpdateTask task) { } catch (Exception e) { TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); if (logger.isTraceEnabled()) { - logger.trace(new ParameterizedMessage( + logger.trace(() -> new ParameterizedMessage( "failed to execute cluster state applier in [{}], state:\nversion [{}], source [{}]\n{}{}{}", executionTime, previousClusterState.version(), @@ -439,8 +439,7 @@ protected void runTask(UpdateTask task) { final long version = newClusterState.version(); final String stateUUID = newClusterState.stateUUID(); final String fullState = newClusterState.toString(); - logger.warn( - (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "failed to apply updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}", executionTime, version, diff --git a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java index 6858866d2dc88..20a6602b5c5ad 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.Assertions; import org.elasticsearch.cluster.AckedClusterStateTaskListener; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -226,10 +225,8 @@ protected void runTasks(TaskInputs taskInputs) { clusterStatePublisher.accept(clusterChangedEvent, taskOutputs.createAckListener(threadPool, newClusterState)); } catch (Discovery.FailedToCommitClusterStateException t) { final long version = newClusterState.version(); - logger.warn( - (Supplier) () -> new ParameterizedMessage( - "failing [{}]: failed to commit cluster state version [{}]", summary, version), - t); + logger.warn(() -> new ParameterizedMessage( + "failing [{}]: failed to commit cluster state version [{}]", summary, version), t); taskOutputs.publishingFailed(t); return; } @@ -239,11 +236,9 @@ protected void runTasks(TaskInputs taskInputs) { try { taskOutputs.clusterStatePublished(clusterChangedEvent); } catch (Exception e) { - logger.error( - (Supplier) () -> new ParameterizedMessage( + logger.error(() -> new ParameterizedMessage( "exception thrown while notifying executor of new cluster state publication [{}]", - summary), - e); + summary), e); } TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); logger.debug("processing [{}]: took [{}] done publishing updated cluster state (version: {}, uuid: {})", summary, @@ -255,8 +250,7 @@ protected void runTasks(TaskInputs taskInputs) { final long version = newClusterState.version(); final String stateUUID = newClusterState.stateUUID(); final String fullState = newClusterState.toString(); - logger.warn( - (Supplier) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "failed to publish updated cluster state in [{}]:\nversion [{}], uuid [{}], source [{}]\n{}", executionTime, version, @@ -473,8 +467,7 @@ public void onFailure(String source, Exception e) { listener.onFailure(source, e); } catch (Exception inner) { inner.addSuppressed(e); - logger.error( - (Supplier) () -> new ParameterizedMessage( + logger.error(() -> new ParameterizedMessage( "exception thrown by listener notifying of failure from [{}]", source), inner); } } @@ -484,8 +477,7 @@ public void onNoLongerMaster(String source) { try { listener.onNoLongerMaster(source); } catch (Exception e) { - logger.error( - (Supplier) () -> new ParameterizedMessage( + logger.error(() -> new ParameterizedMessage( "exception thrown by listener while notifying no longer master from [{}]", source), e); } } @@ -495,12 +487,9 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS try { listener.clusterStateProcessed(source, oldState, newState); } catch (Exception e) { - logger.error( - (Supplier) () -> new ParameterizedMessage( + logger.error(() -> new ParameterizedMessage( "exception thrown by listener while notifying of cluster state processed from [{}], old cluster state:\n" + - "{}\nnew cluster state:\n{}", - source, oldState, newState), - e); + "{}\nnew cluster state:\n{}", source, oldState, newState), e); } } } @@ -614,10 +603,8 @@ public void onNodeAck(DiscoveryNode node, @Nullable Exception e) { logger.trace("ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion); } else { this.lastFailure = e; - logger.debug( - (Supplier) () -> new ParameterizedMessage( - "ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion), - e); + logger.debug(() -> new ParameterizedMessage( + "ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion), e); } if (countDown.countDown()) { @@ -650,7 +637,7 @@ protected ClusterTasksResult executeTasks(TaskInputs taskInputs, long st TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS))); if (logger.isTraceEnabled()) { logger.trace( - (Supplier) () -> new ParameterizedMessage( + () -> new ParameterizedMessage( "failed to execute cluster state update in [{}], state:\nversion [{}], source [{}]\n{}{}{}", executionTime, previousClusterState.version(), diff --git a/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java index 2668a375d1dfa..806b153c803a6 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/BytesReference.java @@ -23,6 +23,7 @@ import org.apache.lucene.util.BytesRefIterator; import org.elasticsearch.common.io.stream.BytesStream; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.ByteArrayOutputStream; @@ -35,7 +36,7 @@ /** * A reference to bytes. */ -public abstract class BytesReference implements Accountable, Comparable { +public abstract class BytesReference implements Accountable, Comparable, ToXContentFragment { private Integer hash = null; // we cache the hash of this reference since it can be quite costly to re-calculated it @@ -301,4 +302,10 @@ public long skip(long n) throws IOException { return input.skip(n); } } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + BytesRef bytes = toBytesRef(); + return builder.value(bytes.bytes, bytes.offset, bytes.length); + } } diff --git a/server/src/main/java/org/elasticsearch/common/document/DocumentField.java b/server/src/main/java/org/elasticsearch/common/document/DocumentField.java index c9236ea7840b1..f7747c9da254d 100644 --- a/server/src/main/java/org/elasticsearch/common/document/DocumentField.java +++ b/server/src/main/java/org/elasticsearch/common/document/DocumentField.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.document; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -127,11 +128,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws // Stored fields values are converted using MappedFieldType#valueForDisplay. // As a result they can either be Strings, Numbers, or Booleans, that's // all. - if (value instanceof BytesReference) { - builder.binaryValue(((BytesReference) value).toBytesRef()); - } else { - builder.value(value); - } + builder.value(value); } builder.endArray(); return builder; diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoPoint.java b/server/src/main/java/org/elasticsearch/common/geo/GeoPoint.java index 5905695fb73fe..e43c9e9a8e3cc 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoPoint.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoPoint.java @@ -25,15 +25,17 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BitUtil; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Strings; import java.io.IOException; import java.util.Arrays; import static org.elasticsearch.common.geo.GeoHashUtils.mortonEncode; import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode; +import static org.elasticsearch.index.mapper.GeoPointFieldMapper.Names.IGNORE_Z_VALUE; public final class GeoPoint implements ToXContentFragment { @@ -79,14 +81,24 @@ public GeoPoint resetLon(double lon) { } public GeoPoint resetFromString(String value) { - int comma = value.indexOf(','); - if (comma != -1) { - lat = Double.parseDouble(value.substring(0, comma).trim()); - lon = Double.parseDouble(value.substring(comma + 1).trim()); - } else { - resetFromGeoHash(value); + return resetFromString(value, false); + } + + public GeoPoint resetFromString(String value, final boolean ignoreZValue) { + if (value.contains(",")) { + String[] vals = value.split(","); + if (vals.length > 3) { + throw new ElasticsearchParseException("failed to parse [{}], expected 2 or 3 coordinates " + + "but found: [{}]", vals.length); + } + double lat = Double.parseDouble(vals[0].trim()); + double lon = Double.parseDouble(vals[1].trim()); + if (vals.length > 2) { + GeoPoint.assertZValue(ignoreZValue, Double.parseDouble(vals[2].trim())); + } + return reset(lat, lon); } - return this; + return resetFromGeoHash(value); } public GeoPoint resetFromIndexHash(long hash) { @@ -193,4 +205,12 @@ public static GeoPoint fromGeohash(long geohashLong) { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { return builder.latlon(lat, lon); } + + public static double assertZValue(final boolean ignoreZValue, double zValue) { + if (ignoreZValue == false) { + throw new ElasticsearchParseException("Exception parsing coordinates: found Z value [{}] but [{}] " + + "parameter is [{}]", zValue, IGNORE_Z_VALUE, ignoreZValue); + } + return zValue; + } } diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java b/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java index aed72f502bfe9..655b259c81074 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java @@ -24,6 +24,7 @@ import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; import org.apache.lucene.util.SloppyMath; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; @@ -345,6 +346,11 @@ public static GeoPoint parseGeoPoint(XContentParser parser) throws IOException, return parseGeoPoint(parser, new GeoPoint()); } + + public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point) throws IOException, ElasticsearchParseException { + return parseGeoPoint(parser, point, false); + } + /** * Parse a {@link GeoPoint} with a {@link XContentParser}. A geopoint has one of the following forms: * @@ -359,7 +365,8 @@ public static GeoPoint parseGeoPoint(XContentParser parser) throws IOException, * @param point A {@link GeoPoint} that will be reset by the values parsed * @return new {@link GeoPoint} parsed from the parse */ - public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point) throws IOException, ElasticsearchParseException { + public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point, final boolean ignoreZValue) + throws IOException, ElasticsearchParseException { double lat = Double.NaN; double lon = Double.NaN; String geohash = null; @@ -438,7 +445,7 @@ public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point) thro } else if(element == 2) { lat = parser.doubleValue(); } else { - throw new ElasticsearchParseException("only two values allowed"); + GeoPoint.assertZValue(ignoreZValue, parser.doubleValue()); } } else { throw new ElasticsearchParseException("numeric value expected"); @@ -446,25 +453,12 @@ public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point) thro } return point.reset(lat, lon); } else if(parser.currentToken() == Token.VALUE_STRING) { - String data = parser.text(); - return parseGeoPoint(data, point); + return point.resetFromString(parser.text(), ignoreZValue); } else { throw new ElasticsearchParseException("geo_point expected"); } } - /** parse a {@link GeoPoint} from a String */ - public static GeoPoint parseGeoPoint(String data, GeoPoint point) { - int comma = data.indexOf(','); - if(comma > 0) { - double lat = Double.parseDouble(data.substring(0, comma).trim()); - double lon = Double.parseDouble(data.substring(comma + 1).trim()); - return point.reset(lat, lon); - } else { - return point.resetFromGeoHash(data); - } - } - /** Returns the maximum distance/radius (in meters) from the point 'center' before overlapping */ public static double maxRadialDistanceMeters(final double centerLat, final double centerLon) { if (Math.abs(centerLat) == MAX_LAT) { diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java index ecc33b94ae4eb..024ec91e88765 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java @@ -173,6 +173,10 @@ public String toWKT() { throw new UnsupportedOperationException("The WKT spec does not support CIRCLE geometry"); } + public int numDimensions() { + return Double.isNaN(center.z) ? 2 : 3; + } + @Override public int hashCode() { return Objects.hash(center, radius, unit.ordinal()); diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/CoordinatesBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/CoordinatesBuilder.java index 43393d5e08630..2eaf5f26dc78b 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/CoordinatesBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/CoordinatesBuilder.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.geo.builders; import com.vividsolutions.jts.geom.Coordinate; +import org.elasticsearch.ElasticsearchException; import java.util.ArrayList; import java.util.Arrays; @@ -41,7 +42,16 @@ public class CoordinatesBuilder { * @return this */ public CoordinatesBuilder coordinate(Coordinate coordinate) { - this.points.add(coordinate); + int expectedDims; + int actualDims; + if (points.isEmpty() == false + && (expectedDims = Double.isNaN(points.get(0).z) ? 2 : 3) != (actualDims = Double.isNaN(coordinate.z) ? 2 : 3)) { + throw new ElasticsearchException("unable to add coordinate to CoordinateBuilder: " + + "coordinate dimensions do not match. Expected [{}] but found [{}]", expectedDims, actualDims); + + } else { + this.points.add(coordinate); + } return this; } diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java index 4949c3633470d..34da7e7fc2f6c 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java @@ -45,6 +45,9 @@ public class EnvelopeBuilder extends ShapeBuilder { public EnvelopeBuilder(Coordinate topLeft, Coordinate bottomRight) { Objects.requireNonNull(topLeft, "topLeft of envelope cannot be null"); Objects.requireNonNull(bottomRight, "bottomRight of envelope cannot be null"); + if (Double.isNaN(topLeft.z) != Double.isNaN(bottomRight.z)) { + throw new IllegalArgumentException("expected same number of dimensions for topLeft and bottomRight"); + } this.topLeft = topLeft; this.bottomRight = bottomRight; } @@ -114,6 +117,11 @@ public GeoShapeType type() { return TYPE; } + @Override + public int numDimensions() { + return Double.isNaN(topLeft.z) ? 2 : 3; + } + @Override public int hashCode() { return Objects.hash(topLeft, bottomRight); diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java index 84052939da48b..b9c23842a5a8c 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java @@ -159,6 +159,15 @@ public GeoShapeType type() { return TYPE; } + @Override + public int numDimensions() { + if (shapes == null || shapes.isEmpty()) { + throw new IllegalStateException("unable to get number of dimensions, " + + "GeometryCollection has not yet been initialized"); + } + return shapes.get(0).numDimensions(); + } + @Override public Shape build() { diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java index c595c126f7a62..a888ee0867cb2 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java @@ -91,6 +91,15 @@ public GeoShapeType type() { return TYPE; } + @Override + public int numDimensions() { + if (coordinates == null || coordinates.isEmpty()) { + throw new IllegalStateException("unable to get number of dimensions, " + + "LineString has not yet been initialized"); + } + return Double.isNaN(coordinates.get(0).z) ? 2 : 3; + } + @Override public JtsGeometry build() { Coordinate[] coordinates = this.coordinates.toArray(new Coordinate[this.coordinates.size()]); diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java index 34a8960f69c53..13f9968864c32 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java @@ -101,6 +101,14 @@ protected StringBuilder contentToWKT() { return sb; } + public int numDimensions() { + if (lines == null || lines.isEmpty()) { + throw new IllegalStateException("unable to get number of dimensions, " + + "LineStrings have not yet been initialized"); + } + return lines.get(0).numDimensions(); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java index ae38126f87bac..03d7683c8e113 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java @@ -80,4 +80,13 @@ public XShapeCollection build() { public GeoShapeType type() { return TYPE; } + + @Override + public int numDimensions() { + if (coordinates == null || coordinates.isEmpty()) { + throw new IllegalStateException("unable to get number of dimensions, " + + "LineString has not yet been initialized"); + } + return Double.isNaN(coordinates.get(0).z) ? 2 : 3; + } } diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java index aa577887e00d2..168d57c1764a7 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java @@ -153,6 +153,15 @@ public GeoShapeType type() { return TYPE; } + @Override + public int numDimensions() { + if (polygons == null || polygons.isEmpty()) { + throw new IllegalStateException("unable to get number of dimensions, " + + "Polygons have not yet been initialized"); + } + return polygons.get(0).numDimensions(); + } + @Override public Shape build() { diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java index fd4178a9c7679..8c37227a808b0 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java @@ -82,4 +82,9 @@ public Point build() { public GeoShapeType type() { return TYPE; } + + @Override + public int numDimensions() { + return Double.isNaN(coordinates.get(0).z) ? 2 : 3; + } } diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java index b0b37dbafa9a3..dade127456c8c 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java @@ -283,6 +283,15 @@ public GeoShapeType type() { return TYPE; } + @Override + public int numDimensions() { + if (shell == null) { + throw new IllegalStateException("unable to get number of dimensions, " + + "Polygon has not yet been initialized"); + } + return shell.numDimensions(); + } + protected static Polygon polygon(GeometryFactory factory, Coordinate[][] polygon) { LinearRing shell = factory.createLinearRing(polygon[0]); LinearRing[] holes; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java index c3d728dcd5525..866a8cfeacae9 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java @@ -25,6 +25,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.Assertions; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.parsers.ShapeParser; @@ -111,7 +112,13 @@ protected ShapeBuilder(StreamInput in) throws IOException { } protected static Coordinate readFromStream(StreamInput in) throws IOException { - return new Coordinate(in.readDouble(), in.readDouble()); + double x = in.readDouble(); + double y = in.readDouble(); + Double z = null; + if (in.getVersion().onOrAfter(Version.V_6_3_0)) { + z = in.readOptionalDouble(); + } + return z == null ? new Coordinate(x, y) : new Coordinate(x, y, z); } @Override @@ -125,6 +132,9 @@ public void writeTo(StreamOutput out) throws IOException { protected static void writeCoordinateTo(Coordinate coordinate, StreamOutput out) throws IOException { out.writeDouble(coordinate.x); out.writeDouble(coordinate.y); + if (out.getVersion().onOrAfter(Version.V_6_3_0)) { + out.writeOptionalDouble(Double.isNaN(coordinate.z) ? null : coordinate.z); + } } @SuppressWarnings("unchecked") @@ -219,6 +229,9 @@ protected static Coordinate shift(Coordinate coordinate, double dateline) { */ public abstract GeoShapeType type(); + /** tracks number of dimensions for this shape */ + public abstract int numDimensions(); + /** * Calculate the intersection of a line segment and a vertical dateline. * @@ -437,7 +450,11 @@ public static ShapeBuilder parse(XContentParser parser) throws IOException { } protected static XContentBuilder toXContent(XContentBuilder builder, Coordinate coordinate) throws IOException { - return builder.startArray().value(coordinate.x).value(coordinate.y).endArray(); + builder.startArray().value(coordinate.x).value(coordinate.y); + if (Double.isNaN(coordinate.z) == false) { + builder.value(coordinate.z); + } + return builder.endArray(); } /** diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/CoordinateNode.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/CoordinateNode.java index eb6322196373f..98f8f57d39734 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/CoordinateNode.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/CoordinateNode.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.geo.parsers; import com.vividsolutions.jts.geom.Coordinate; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -61,6 +62,16 @@ public boolean isEmpty() { return (coordinate == null && (children == null || children.isEmpty())); } + protected int numDimensions() { + if (isEmpty()) { + throw new ElasticsearchException("attempting to get number of dimensions on an empty coordinate node"); + } + if (coordinate != null) { + return Double.isNaN(coordinate.z) ? 2 : 3; + } + return children.get(0).numDimensions(); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (children == null) { diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java index 01f26498e9c69..31107d763913e 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java @@ -21,6 +21,7 @@ import com.vividsolutions.jts.geom.Coordinate; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Explicit; +import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.builders.CircleBuilder; import org.elasticsearch.common.geo.builders.GeometryCollectionBuilder; @@ -49,6 +50,7 @@ protected static ShapeBuilder parse(XContentParser parser, GeoShapeFieldMapper s ShapeBuilder.Orientation requestedOrientation = (shapeMapper == null) ? ShapeBuilder.Orientation.RIGHT : shapeMapper.fieldType().orientation(); Explicit coerce = (shapeMapper == null) ? GeoShapeFieldMapper.Defaults.COERCE : shapeMapper.coerce(); + Explicit ignoreZValue = (shapeMapper == null) ? GeoShapeFieldMapper.Defaults.IGNORE_Z_VALUE : shapeMapper.ignoreZValue(); String malformedException = null; @@ -68,7 +70,12 @@ protected static ShapeBuilder parse(XContentParser parser, GeoShapeFieldMapper s } } else if (ShapeParser.FIELD_COORDINATES.match(fieldName, parser.getDeprecationHandler())) { parser.nextToken(); - coordinateNode = parseCoordinates(parser); + CoordinateNode tempNode = parseCoordinates(parser, ignoreZValue.value()); + if (coordinateNode != null && tempNode.numDimensions() != coordinateNode.numDimensions()) { + throw new ElasticsearchParseException("Exception parsing coordinates: " + + "number of dimensions do not match"); + } + coordinateNode = tempNode; } else if (ShapeParser.FIELD_GEOMETRIES.match(fieldName, parser.getDeprecationHandler())) { if (shapeType == null) { shapeType = GeoShapeType.GEOMETRYCOLLECTION; @@ -136,36 +143,46 @@ protected static ShapeBuilder parse(XContentParser parser, GeoShapeFieldMapper s * Thrown if an error occurs while reading from the * XContentParser */ - private static CoordinateNode parseCoordinates(XContentParser parser) throws IOException { + private static CoordinateNode parseCoordinates(XContentParser parser, boolean ignoreZValue) throws IOException { XContentParser.Token token = parser.nextToken(); // Base cases if (token != XContentParser.Token.START_ARRAY && token != XContentParser.Token.END_ARRAY && token != XContentParser.Token.VALUE_NULL) { - return new CoordinateNode(parseCoordinate(parser)); + return new CoordinateNode(parseCoordinate(parser, ignoreZValue)); } else if (token == XContentParser.Token.VALUE_NULL) { throw new IllegalArgumentException("coordinates cannot contain NULL values)"); } List nodes = new ArrayList<>(); while (token != XContentParser.Token.END_ARRAY) { - nodes.add(parseCoordinates(parser)); + CoordinateNode node = parseCoordinates(parser, ignoreZValue); + if (nodes.isEmpty() == false && nodes.get(0).numDimensions() != node.numDimensions()) { + throw new ElasticsearchParseException("Exception parsing coordinates: number of dimensions do not match"); + } + nodes.add(node); token = parser.nextToken(); } return new CoordinateNode(nodes); } - private static Coordinate parseCoordinate(XContentParser parser) throws IOException { + private static Coordinate parseCoordinate(XContentParser parser, boolean ignoreZValue) throws IOException { double lon = parser.doubleValue(); parser.nextToken(); double lat = parser.doubleValue(); XContentParser.Token token = parser.nextToken(); - while (token == XContentParser.Token.VALUE_NUMBER) { - token = parser.nextToken(); + // alt (for storing purposes only - future use includes 3d shapes) + double alt = Double.NaN; + if (token == XContentParser.Token.VALUE_NUMBER) { + alt = GeoPoint.assertZValue(ignoreZValue, parser.doubleValue()); + parser.nextToken(); + } + // do not support > 3 dimensions + if (parser.currentToken() == XContentParser.Token.VALUE_NUMBER) { + throw new ElasticsearchParseException("geo coordinates greater than 3 dimensions are not supported"); } - // todo support z/alt - return new Coordinate(lon, lat); + return new Coordinate(lon, lat, alt); } /** diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java index 2a8110c5f4dc2..74e463c723a5a 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java @@ -20,6 +20,7 @@ import com.vividsolutions.jts.geom.Coordinate; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoShapeType; import java.io.StringReader; @@ -35,6 +36,7 @@ import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.GeoShapeFieldMapper; import java.io.IOException; import java.io.StreamTokenizer; @@ -52,7 +54,7 @@ public class GeoWKTParser { public static final String LPAREN = "("; public static final String RPAREN = ")"; public static final String COMMA = ","; - private static final String NAN = "NaN"; + public static final String NAN = "NaN"; private static final String NUMBER = ""; private static final String EOF = "END-OF-STREAM"; @@ -61,16 +63,23 @@ public class GeoWKTParser { // no instance private GeoWKTParser() {} - public static ShapeBuilder parse(XContentParser parser) + public static ShapeBuilder parse(XContentParser parser, final GeoShapeFieldMapper shapeMapper) throws IOException, ElasticsearchParseException { - return parseExpectedType(parser, null); + return parseExpectedType(parser, null, shapeMapper); } - /** throws an exception if the parsed geometry type does not match the expected shape type */ public static ShapeBuilder parseExpectedType(XContentParser parser, final GeoShapeType shapeType) throws IOException, ElasticsearchParseException { + return parseExpectedType(parser, shapeType, null); + } + + /** throws an exception if the parsed geometry type does not match the expected shape type */ + public static ShapeBuilder parseExpectedType(XContentParser parser, final GeoShapeType shapeType, + final GeoShapeFieldMapper shapeMapper) + throws IOException, ElasticsearchParseException { StringReader reader = new StringReader(parser.text()); try { + boolean ignoreZValue = (shapeMapper != null && shapeMapper.ignoreZValue().value() == true); // setup the tokenizer; configured to read words w/o numbers StreamTokenizer tokenizer = new StreamTokenizer(reader); tokenizer.resetSyntax(); @@ -83,7 +92,7 @@ public static ShapeBuilder parseExpectedType(XContentParser parser, final GeoSha tokenizer.wordChars('.', '.'); tokenizer.whitespaceChars(0, ' '); tokenizer.commentChar('#'); - ShapeBuilder builder = parseGeometry(tokenizer, shapeType); + ShapeBuilder builder = parseGeometry(tokenizer, shapeType, ignoreZValue); checkEOF(tokenizer); return builder; } finally { @@ -92,7 +101,7 @@ public static ShapeBuilder parseExpectedType(XContentParser parser, final GeoSha } /** parse geometry from the stream tokenizer */ - private static ShapeBuilder parseGeometry(StreamTokenizer stream, GeoShapeType shapeType) + private static ShapeBuilder parseGeometry(StreamTokenizer stream, GeoShapeType shapeType, final boolean ignoreZValue) throws IOException, ElasticsearchParseException { final GeoShapeType type = GeoShapeType.forName(nextWord(stream)); if (shapeType != null && shapeType != GeoShapeType.GEOMETRYCOLLECTION) { @@ -102,21 +111,21 @@ private static ShapeBuilder parseGeometry(StreamTokenizer stream, GeoShapeType s } switch (type) { case POINT: - return parsePoint(stream); + return parsePoint(stream, ignoreZValue); case MULTIPOINT: - return parseMultiPoint(stream); + return parseMultiPoint(stream, ignoreZValue); case LINESTRING: - return parseLine(stream); + return parseLine(stream, ignoreZValue); case MULTILINESTRING: - return parseMultiLine(stream); + return parseMultiLine(stream, ignoreZValue); case POLYGON: - return parsePolygon(stream); + return parsePolygon(stream, ignoreZValue); case MULTIPOLYGON: - return parseMultiPolygon(stream); + return parseMultiPolygon(stream, ignoreZValue); case ENVELOPE: return parseBBox(stream); case GEOMETRYCOLLECTION: - return parseGeometryCollection(stream); + return parseGeometryCollection(stream, ignoreZValue); default: throw new IllegalArgumentException("Unknown geometry type: " + type); } @@ -137,24 +146,25 @@ private static EnvelopeBuilder parseBBox(StreamTokenizer stream) throws IOExcept return new EnvelopeBuilder(new Coordinate(minLon, maxLat), new Coordinate(maxLon, minLat)); } - private static PointBuilder parsePoint(StreamTokenizer stream) throws IOException, ElasticsearchParseException { + private static PointBuilder parsePoint(StreamTokenizer stream, final boolean ignoreZValue) + throws IOException, ElasticsearchParseException { if (nextEmptyOrOpen(stream).equals(EMPTY)) { return null; } PointBuilder pt = new PointBuilder(nextNumber(stream), nextNumber(stream)); if (isNumberNext(stream) == true) { - nextNumber(stream); + GeoPoint.assertZValue(ignoreZValue, nextNumber(stream)); } nextCloser(stream); return pt; } - private static List parseCoordinateList(StreamTokenizer stream) + private static List parseCoordinateList(StreamTokenizer stream, final boolean ignoreZValue) throws IOException, ElasticsearchParseException { CoordinatesBuilder coordinates = new CoordinatesBuilder(); boolean isOpenParen = false; if (isNumberNext(stream) || (isOpenParen = nextWord(stream).equals(LPAREN))) { - coordinates.coordinate(parseCoordinate(stream)); + coordinates.coordinate(parseCoordinate(stream, ignoreZValue)); } if (isOpenParen && nextCloser(stream).equals(RPAREN) == false) { @@ -164,7 +174,7 @@ private static List parseCoordinateList(StreamTokenizer stream) while (nextCloserOrComma(stream).equals(COMMA)) { isOpenParen = false; if (isNumberNext(stream) || (isOpenParen = nextWord(stream).equals(LPAREN))) { - coordinates.coordinate(parseCoordinate(stream)); + coordinates.coordinate(parseCoordinate(stream, ignoreZValue)); } if (isOpenParen && nextCloser(stream).equals(RPAREN) == false) { throw new ElasticsearchParseException("expected: " + RPAREN + " but found: " + tokenString(stream), stream.lineno()); @@ -173,77 +183,82 @@ private static List parseCoordinateList(StreamTokenizer stream) return coordinates.build(); } - private static Coordinate parseCoordinate(StreamTokenizer stream) + private static Coordinate parseCoordinate(StreamTokenizer stream, final boolean ignoreZValue) throws IOException, ElasticsearchParseException { final double lon = nextNumber(stream); final double lat = nextNumber(stream); Double z = null; if (isNumberNext(stream)) { - z = nextNumber(stream); + z = GeoPoint.assertZValue(ignoreZValue, nextNumber(stream)); } return z == null ? new Coordinate(lon, lat) : new Coordinate(lon, lat, z); } - private static MultiPointBuilder parseMultiPoint(StreamTokenizer stream) throws IOException, ElasticsearchParseException { + private static MultiPointBuilder parseMultiPoint(StreamTokenizer stream, final boolean ignoreZValue) + throws IOException, ElasticsearchParseException { String token = nextEmptyOrOpen(stream); if (token.equals(EMPTY)) { return null; } - return new MultiPointBuilder(parseCoordinateList(stream)); + return new MultiPointBuilder(parseCoordinateList(stream, ignoreZValue)); } - private static LineStringBuilder parseLine(StreamTokenizer stream) throws IOException, ElasticsearchParseException { + private static LineStringBuilder parseLine(StreamTokenizer stream, final boolean ignoreZValue) + throws IOException, ElasticsearchParseException { String token = nextEmptyOrOpen(stream); if (token.equals(EMPTY)) { return null; } - return new LineStringBuilder(parseCoordinateList(stream)); + return new LineStringBuilder(parseCoordinateList(stream, ignoreZValue)); } - private static MultiLineStringBuilder parseMultiLine(StreamTokenizer stream) throws IOException, ElasticsearchParseException { + private static MultiLineStringBuilder parseMultiLine(StreamTokenizer stream, final boolean ignoreZValue) + throws IOException, ElasticsearchParseException { String token = nextEmptyOrOpen(stream); if (token.equals(EMPTY)) { return null; } MultiLineStringBuilder builder = new MultiLineStringBuilder(); - builder.linestring(parseLine(stream)); + builder.linestring(parseLine(stream, ignoreZValue)); while (nextCloserOrComma(stream).equals(COMMA)) { - builder.linestring(parseLine(stream)); + builder.linestring(parseLine(stream, ignoreZValue)); } return builder; } - private static PolygonBuilder parsePolygon(StreamTokenizer stream) throws IOException, ElasticsearchParseException { + private static PolygonBuilder parsePolygon(StreamTokenizer stream, final boolean ignoreZValue) + throws IOException, ElasticsearchParseException { if (nextEmptyOrOpen(stream).equals(EMPTY)) { return null; } - PolygonBuilder builder = new PolygonBuilder(parseLine(stream), ShapeBuilder.Orientation.RIGHT); + PolygonBuilder builder = new PolygonBuilder(parseLine(stream, ignoreZValue), ShapeBuilder.Orientation.RIGHT); while (nextCloserOrComma(stream).equals(COMMA)) { - builder.hole(parseLine(stream)); + builder.hole(parseLine(stream, ignoreZValue)); } return builder; } - private static MultiPolygonBuilder parseMultiPolygon(StreamTokenizer stream) throws IOException, ElasticsearchParseException { + private static MultiPolygonBuilder parseMultiPolygon(StreamTokenizer stream, final boolean ignoreZValue) + throws IOException, ElasticsearchParseException { if (nextEmptyOrOpen(stream).equals(EMPTY)) { return null; } - MultiPolygonBuilder builder = new MultiPolygonBuilder().polygon(parsePolygon(stream)); + MultiPolygonBuilder builder = new MultiPolygonBuilder().polygon(parsePolygon(stream, ignoreZValue)); while (nextCloserOrComma(stream).equals(COMMA)) { - builder.polygon(parsePolygon(stream)); + builder.polygon(parsePolygon(stream, ignoreZValue)); } return builder; } - private static GeometryCollectionBuilder parseGeometryCollection(StreamTokenizer stream) + private static GeometryCollectionBuilder parseGeometryCollection(StreamTokenizer stream, final boolean ignoreZValue) throws IOException, ElasticsearchParseException { if (nextEmptyOrOpen(stream).equals(EMPTY)) { return null; } GeometryCollectionBuilder builder = new GeometryCollectionBuilder().shape( - parseGeometry(stream, GeoShapeType.GEOMETRYCOLLECTION)); + parseGeometry(stream, GeoShapeType.GEOMETRYCOLLECTION, ignoreZValue)); while (nextCloserOrComma(stream).equals(COMMA)) { - builder.shape(parseGeometry(stream, null)); + builder.shape(parseGeometry(stream, null, ignoreZValue)); } return builder; } diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java index 0ee3333c4802c..e7ec489191762 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/ShapeParser.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.GeoPointFieldMapper; import org.elasticsearch.index.mapper.GeoShapeFieldMapper; import java.io.IOException; @@ -52,7 +53,7 @@ static ShapeBuilder parse(XContentParser parser, GeoShapeFieldMapper shapeMapper } if (parser.currentToken() == XContentParser.Token.START_OBJECT) { return GeoJsonParser.parse(parser, shapeMapper); } else if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { - return GeoWKTParser.parse(parser); + return GeoWKTParser.parse(parser, shapeMapper); } throw new ElasticsearchParseException("shape must be an object consisting of type and coordinates"); } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java index ba1450d1fb83c..5bef7bee4f10b 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.analysis.core.KeywordAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.codecs.CodecUtil; @@ -111,7 +110,7 @@ public static Version parseVersion(@Nullable String version, Version defaultVers try { return Version.parse(version); } catch (ParseException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("no version match {}, default to {}", version, defaultVersion), e); + logger.warn(() -> new ParameterizedMessage("no version match {}, default to {}", version, defaultVersion), e); return defaultVersion; } } diff --git a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index c3c6de5355af4..e8bb946c8a795 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.settings; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.search.spell.LevensteinDistance; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ExceptionsHelper; @@ -135,7 +134,7 @@ public synchronized Settings validateUpdate(Settings settings) { settingUpdater.getValue(current, previous); } catch (RuntimeException ex) { exceptions.add(ex); - logger.debug((Supplier) () -> new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex); + logger.debug(() -> new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex); } } // here we are exhaustive and record all settings that failed. @@ -163,8 +162,7 @@ public synchronized Settings applySettings(Settings newSettings) { try { applyRunnables.add(settingUpdater.updater(current, previous)); } catch (Exception ex) { - logger.warn( - (Supplier) () -> new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex); + logger.warn(() -> new ParameterizedMessage("failed to prepareCommit settings for [{}]", settingUpdater), ex); throw ex; } } diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 804340d63ed11..bcfed3388e9f2 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -79,6 +79,7 @@ import org.elasticsearch.monitor.os.OsService; import org.elasticsearch.monitor.process.ProcessService; import org.elasticsearch.node.Node; +import org.elasticsearch.persistent.decider.EnableAssignmentDecider; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.rest.BaseRestHandler; @@ -420,6 +421,7 @@ public void apply(Settings value, Settings current, Settings previous) { FastVectorHighlighter.SETTING_TV_HIGHLIGHT_MULTI_VALUE, Node.BREAKER_TYPE_KEY, OperationRouting.USE_ADAPTIVE_REPLICA_SELECTION_SETTING, - IndexGraveyard.SETTING_MAX_TOMBSTONES + IndexGraveyard.SETTING_MAX_TOMBSTONES, + EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING ))); } diff --git a/server/src/main/java/org/elasticsearch/common/text/Text.java b/server/src/main/java/org/elasticsearch/common/text/Text.java index 45a1c2d630672..bc0674d0b33c2 100644 --- a/server/src/main/java/org/elasticsearch/common/text/Text.java +++ b/server/src/main/java/org/elasticsearch/common/text/Text.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.common.text; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; @@ -125,7 +126,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } else { // TODO: TextBytesOptimization we can use a buffer here to convert it? maybe add a // request to jackson to support InputStream as well? - return builder.utf8Value(this.bytes().toBytesRef()); + BytesRef br = this.bytes().toBytesRef(); + return builder.utf8Value(br.bytes, br.offset, br.length); } } } diff --git a/server/src/main/java/org/elasticsearch/common/transport/TransportAddress.java b/server/src/main/java/org/elasticsearch/common/transport/TransportAddress.java index 965811f42ac51..f486bdd926bdf 100644 --- a/server/src/main/java/org/elasticsearch/common/transport/TransportAddress.java +++ b/server/src/main/java/org/elasticsearch/common/transport/TransportAddress.java @@ -25,6 +25,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.net.InetAddress; @@ -34,7 +37,7 @@ /** * A transport address used for IP socket address (wraps {@link java.net.InetSocketAddress}). */ -public final class TransportAddress implements Writeable { +public final class TransportAddress implements Writeable, ToXContentFragment { /** * A non-routeable v4 meta transport address that can be used for @@ -155,4 +158,9 @@ public int hashCode() { public String toString() { return NetworkAddress.format(address); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(toString()); + } } diff --git a/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java b/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java index db7678fa0f84c..6e05c576d4d89 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java +++ b/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java @@ -27,12 +27,15 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.Locale; import java.util.Objects; -public class ByteSizeValue implements Writeable, Comparable { +public class ByteSizeValue implements Writeable, Comparable, ToXContentFragment { private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(ByteSizeValue.class)); private final long size; @@ -271,4 +274,9 @@ public int compareTo(ByteSizeValue other) { long otherValue = other.size * other.unit.toBytes(1); return Long.compare(thisValue, otherValue); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(toString()); + } } diff --git a/server/src/main/java/org/elasticsearch/common/unit/TimeValue.java b/server/src/main/java/org/elasticsearch/common/unit/TimeValue.java index 0f6eabed1e3de..abd62adaa0e3e 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/TimeValue.java +++ b/server/src/main/java/org/elasticsearch/common/unit/TimeValue.java @@ -24,6 +24,9 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.joda.time.Period; import org.joda.time.PeriodType; import org.joda.time.format.PeriodFormat; @@ -40,7 +43,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; -public class TimeValue implements Writeable, Comparable { +public class TimeValue implements Writeable, Comparable, ToXContentFragment { /** How many nano-seconds in one milli-second */ public static final long NSEC_PER_MSEC = TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS); @@ -398,4 +401,9 @@ public int compareTo(TimeValue timeValue) { double otherValue = ((double) timeValue.duration) * timeValue.timeUnit.toNanos(1); return Double.compare(thisValue, otherValue); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(toString()); + } } diff --git a/server/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java b/server/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java index 3ee7d1f23add2..b709c48d8c26c 100644 --- a/server/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java +++ b/server/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.logging.Loggers; @@ -67,7 +66,7 @@ void upgrade(final Index index, final Path source, final Path target) throws IOE } catch (NoSuchFileException | FileNotFoundException exception) { // thrown when the source is non-existent because the folder was renamed // by another node (shared FS) after we checked if the target exists - logger.error((Supplier) () -> new ParameterizedMessage("multiple nodes trying to upgrade [{}] in parallel, retry " + + logger.error(() -> new ParameterizedMessage("multiple nodes trying to upgrade [{}] in parallel, retry " + "upgrading with single node", target), exception); throw exception; } finally { diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java index 825d18b7e63cb..2dc3f6677f332 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/LoggingRunnable.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; public class LoggingRunnable implements Runnable { @@ -38,7 +37,7 @@ public void run() { try { runnable.run(); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to execute [{}]", runnable.toString()), e); + logger.warn(() -> new ParameterizedMessage("failed to execute [{}]", runnable.toString()), e); } } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java index b51add28bf539..a02733e551e2d 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java @@ -19,10 +19,7 @@ package org.elasticsearch.common.xcontent; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CollectionUtils; import org.joda.time.DateTimeZone; import org.joda.time.ReadableInstant; @@ -30,11 +27,13 @@ import org.joda.time.format.ISODateTimeFormat; import java.io.ByteArrayOutputStream; +import java.io.Closeable; import java.io.Flushable; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.nio.file.Path; +import java.time.ZonedDateTime; import java.util.Arrays; import java.util.Calendar; import java.util.Collections; @@ -49,7 +48,7 @@ /** * A utility to build XContent (ie json). */ -public final class XContentBuilder implements Releasable, Flushable { +public final class XContentBuilder implements Closeable, Flushable { /** * Create a new {@link XContentBuilder} using the given {@link XContent} content. @@ -91,7 +90,6 @@ public static XContentBuilder builder(XContent xContent, Set includes, S writers.put(Boolean.class, (b, v) -> b.value((Boolean) v)); writers.put(Byte.class, (b, v) -> b.value((Byte) v)); writers.put(byte[].class, (b, v) -> b.value((byte[]) v)); - writers.put(BytesRef.class, (b, v) -> b.binaryValue((BytesRef) v)); writers.put(Date.class, (b, v) -> b.value((Date) v)); writers.put(Double.class, (b, v) -> b.value((Double) v)); writers.put(double[].class, (b, v) -> b.values((double[]) v)); @@ -105,12 +103,12 @@ public static XContentBuilder builder(XContent xContent, Set includes, S writers.put(short[].class, (b, v) -> b.values((short[]) v)); writers.put(String.class, (b, v) -> b.value((String) v)); writers.put(String[].class, (b, v) -> b.values((String[]) v)); + writers.put(Locale.class, (b, v) -> b.value(v.toString())); + writers.put(Class.class, (b, v) -> b.value(v.toString())); + writers.put(ZonedDateTime.class, (b, v) -> b.value(v.toString())); Map, HumanReadableTransformer> humanReadableTransformer = new HashMap<>(); - // These will be moved to a different class at a later time to decouple them from XContentBuilder - humanReadableTransformer.put(TimeValue.class, v -> ((TimeValue) v).millis()); - humanReadableTransformer.put(ByteSizeValue.class, v -> ((ByteSizeValue) v).getBytes()); // Load pluggable extensions for (XContentBuilderExtension service : ServiceLoader.load(XContentBuilderExtension.class)) { @@ -613,49 +611,25 @@ public XContentBuilder value(byte[] value, int offset, int length) throws IOExce } /** - * Writes the binary content of the given {@link BytesRef}. - * - * Use {@link org.elasticsearch.common.xcontent.XContentParser#binaryValue()} to read the value back - */ - public XContentBuilder field(String name, BytesRef value) throws IOException { - return field(name).binaryValue(value); - } - - /** - * Writes the binary content of the given {@link BytesRef} as UTF-8 bytes. + * Writes the binary content of the given byte array as UTF-8 bytes. * * Use {@link XContentParser#charBuffer()} to read the value back */ - public XContentBuilder utf8Field(String name, BytesRef value) throws IOException { - return field(name).utf8Value(value); - } - - /** - * Writes the binary content of the given {@link BytesRef}. - * - * Use {@link org.elasticsearch.common.xcontent.XContentParser#binaryValue()} to read the value back - */ - public XContentBuilder binaryValue(BytesRef value) throws IOException { - if (value == null) { - return nullValue(); - } - value(value.bytes, value.offset, value.length); - return this; + public XContentBuilder utf8Field(String name, byte[] bytes, int offset, int length) throws IOException { + return field(name).utf8Value(bytes, offset, length); } /** - * Writes the binary content of the given {@link BytesRef} as UTF-8 bytes. + * Writes the binary content of the given byte array as UTF-8 bytes. * * Use {@link XContentParser#charBuffer()} to read the value back */ - public XContentBuilder utf8Value(BytesRef value) throws IOException { - if (value == null) { - return nullValue(); - } - generator.writeUTF8String(value.bytes, value.offset, value.length); + public XContentBuilder utf8Value(byte[] bytes, int offset, int length) throws IOException { + generator.writeUTF8String(bytes, offset, length); return this; } + //////////////////////////////////////////////////////////////////////////// // Date ////////////////////////////////// @@ -793,10 +767,11 @@ private void unknownValue(Object value, boolean ensureNoSelfReferences) throws I value((ReadableInstant) value); } else if (value instanceof ToXContent) { value((ToXContent) value); - } else { - // This is a "value" object (like enum, DistanceUnit, etc) just toString() it - // (yes, it can be misleading when toString a Java class, but really, jackson should be used in that case) + } else if (value instanceof Enum) { + // Write out the Enum toString value(Objects.toString(value)); + } else { + throw new IllegalArgumentException("cannot write xcontent for unknown value of type " + value.getClass()); } } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java new file mode 100644 index 0000000000000..1c852c68960a7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.xcontent; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.joda.time.DateTimeZone; +import org.joda.time.tz.CachedDateTimeZone; +import org.joda.time.tz.FixedDateTimeZone; + +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * SPI extensions for Elasticsearch-specific classes (like the Lucene or Joda + * dependency classes) that need to be encoded by {@link XContentBuilder} in a + * specific way. + */ +public class XContentElasticsearchExtension implements XContentBuilderExtension { + + @Override + public Map, XContentBuilder.Writer> getXContentWriters() { + Map, XContentBuilder.Writer> writers = new HashMap<>(); + + // Fully-qualified here to reduce ambiguity around our (ES') Version class + writers.put(org.apache.lucene.util.Version.class, (b, v) -> b.value(Objects.toString(v))); + writers.put(DateTimeZone.class, (b, v) -> b.value(Objects.toString(v))); + writers.put(CachedDateTimeZone.class, (b, v) -> b.value(Objects.toString(v))); + writers.put(FixedDateTimeZone.class, (b, v) -> b.value(Objects.toString(v))); + + writers.put(BytesReference.class, (b, v) -> { + if (v == null) { + b.nullValue(); + } else { + BytesRef bytes = ((BytesReference) v).toBytesRef(); + b.value(bytes.bytes, bytes.offset, bytes.length); + } + }); + + writers.put(BytesRef.class, (b, v) -> { + if (v == null) { + b.nullValue(); + } else { + BytesRef bytes = (BytesRef) v; + b.value(bytes.bytes, bytes.offset, bytes.length); + } + }); + return writers; + } + + @Override + public Map, XContentBuilder.HumanReadableTransformer> getXContentHumanReadableTransformers() { + Map, XContentBuilder.HumanReadableTransformer> transformers = new HashMap<>(); + transformers.put(TimeValue.class, v -> ((TimeValue) v).millis()); + transformers.put(ByteSizeValue.class, v -> ((ByteSizeValue) v).getBytes()); + return transformers; + } +} diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java index a645bf81da343..06cc10713bec5 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java @@ -228,7 +228,6 @@ enum NumberType { * Reads a plain binary value that was written via one of the following methods: * *
    - *
  • {@link XContentBuilder#field(String, org.apache.lucene.util.BytesRef)}
  • *
  • {@link XContentBuilder#field(String, byte[], int, int)}}
  • *
  • {@link XContentBuilder#field(String, byte[])}}
  • *
@@ -236,8 +235,7 @@ enum NumberType { * as well as via their String variants of the separated value methods. * Note: Do not use this method to read values written with: *
    - *
  • {@link XContentBuilder#utf8Field(String, org.apache.lucene.util.BytesRef)}
  • - *
  • {@link XContentBuilder#utf8Field(String, org.apache.lucene.util.BytesRef)}
  • + *
  • {@link XContentBuilder#utf8Field(String, byte[], int, int)}
  • *
* * these methods write UTF-8 encoded strings and must be read through: diff --git a/server/src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java b/server/src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java index b432d0538c985..fd47fd0e86d51 100644 --- a/server/src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java +++ b/server/src/main/java/org/elasticsearch/discovery/AckClusterStatePublishResponseHandler.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.logging.ESLoggerFactory; @@ -70,7 +69,7 @@ private void onNodeAck(final Discovery.AckListener ackListener, DiscoveryNode no ackListener.onNodeAck(node, e); } catch (Exception inner) { inner.addSuppressed(e); - logger.debug((Supplier) () -> new ParameterizedMessage("error while processing ack for node [{}]", node), inner); + logger.debug(() -> new ParameterizedMessage("error while processing ack for node [{}]", node), inner); } } } diff --git a/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java b/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java index 2a32caabc77a4..94ea33d1a16ab 100644 --- a/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java +++ b/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java @@ -76,11 +76,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS public void onFailure(String source, Exception e) { latch.countDown(); ackListener.onNodeAck(transportService.getLocalNode(), e); - logger.warn( - (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( - "failed while applying cluster state locally [{}]", - event.source()), - e); + logger.warn(() -> new ParameterizedMessage("failed while applying cluster state locally [{}]", event.source()), e); } }; clusterApplier.onNewClusterState("apply-locally-on-node[" + event.source() + "]", () -> clusterState, listener); diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java b/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java index fff5e7cb5c983..c38cfe88619ee 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java @@ -20,7 +20,6 @@ package org.elasticsearch.discovery.zen; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -270,13 +269,9 @@ public void handleException(TransportException exp) { } int retryCount = ++MasterFaultDetection.this.retryCount; - logger.trace( - (Supplier) () -> new ParameterizedMessage( + logger.trace(() -> new ParameterizedMessage( "[master] failed to ping [{}], retry [{}] out of [{}]", - masterNode, - retryCount, - pingRetryCount), - exp); + masterNode, retryCount, pingRetryCount), exp); if (retryCount >= pingRetryCount) { logger.debug("[master] failed to ping [{}], tried [{}] times, each with maximum [{}] timeout", masterNode, pingRetryCount, pingRetryTimeout); diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java b/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java index 7d10466b638a8..e36497d09164f 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -364,7 +363,7 @@ public void onFailure(String source, Exception e) { try { callback.onFailure(e); } catch (Exception inner) { - logger.error((Supplier) () -> new ParameterizedMessage("error handling task failure [{}]", e), inner); + logger.error(() -> new ParameterizedMessage("error handling task failure [{}]", e), inner); } } } @@ -375,7 +374,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS try { callback.onSuccess(); } catch (Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected error during [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected error during [{}]", source), e); } } } diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java b/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java index 5cd02a52504f5..218e6e3f63f95 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java @@ -20,7 +20,6 @@ package org.elasticsearch.discovery.zen; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -177,12 +176,8 @@ public void run() { } }); } catch (EsRejectedExecutionException ex) { - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "[node ] [{}] ignoring node failure (reason [{}]). Local node is shutting down", - node, - reason), - ex); + logger.trace(() -> new ParameterizedMessage( + "[node ] [{}] ignoring node failure (reason [{}]). Local node is shutting down", node, reason), ex); } } @@ -247,13 +242,8 @@ public void handleException(TransportException exp) { } retryCount++; - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "[node ] failed to ping [{}], retry [{}] out of [{}]", - node, - retryCount, - pingRetryCount), - exp); + logger.trace( () -> new ParameterizedMessage( + "[node ] failed to ping [{}], retry [{}] out of [{}]", node, retryCount, pingRetryCount), exp); if (retryCount >= pingRetryCount) { logger.debug("[node ] failed to ping [{}], tried [{}] times, each with maximum [{}] timeout", node, pingRetryCount, pingRetryTimeout); diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java b/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java index 13bcf1f15f56a..382a42141d83a 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java @@ -247,9 +247,7 @@ private void sendFullClusterState(ClusterState clusterState, Map) () -> - new ParameterizedMessage("failed to serialize cluster_state before publishing it to node {}", node), e); + logger.warn(() -> new ParameterizedMessage("failed to serialize cluster_state before publishing it to node {}", node), e); sendingController.onNodeSendFailed(node, e); return; } @@ -297,16 +295,13 @@ public void handleException(TransportException exp) { logger.debug("resending full cluster state to node {} reason {}", node, exp.getDetailedMessage()); sendFullClusterState(clusterState, serializedStates, node, publishTimeout, sendingController); } else { - logger.debug((org.apache.logging.log4j.util.Supplier) () -> - new ParameterizedMessage("failed to send cluster state to {}", node), exp); + logger.debug(() -> new ParameterizedMessage("failed to send cluster state to {}", node), exp); sendingController.onNodeSendFailed(node, exp); } } }); } catch (Exception e) { - logger.warn( - (org.apache.logging.log4j.util.Supplier) () -> - new ParameterizedMessage("error sending cluster state to {}", node), e); + logger.warn(() -> new ParameterizedMessage("error sending cluster state to {}", node), e); sendingController.onNodeSendFailed(node, e); } } @@ -333,15 +328,13 @@ public void handleResponse(TransportResponse.Empty response) { @Override public void handleException(TransportException exp) { - logger.debug((org.apache.logging.log4j.util.Supplier) () -> - new ParameterizedMessage("failed to commit cluster state (uuid [{}], version [{}]) to {}", + logger.debug(() -> new ParameterizedMessage("failed to commit cluster state (uuid [{}], version [{}]) to {}", clusterState.stateUUID(), clusterState.version(), node), exp); sendingController.getPublishResponseHandler().onFailure(node, exp); } }); } catch (Exception t) { - logger.warn((org.apache.logging.log4j.util.Supplier) () -> - new ParameterizedMessage("error sending cluster state commit (uuid [{}], version [{}]) to {}", + logger.warn(() -> new ParameterizedMessage("error sending cluster state commit (uuid [{}], version [{}]) to {}", clusterState.stateUUID(), clusterState.version(), node), t); sendingController.getPublishResponseHandler().onFailure(node, t); } @@ -616,7 +609,7 @@ private synchronized boolean markAsFailed(String details, Exception reason) { if (committedOrFailed()) { return committed == false; } - logger.trace((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("failed to commit version [{}]. {}", + logger.trace(() -> new ParameterizedMessage("failed to commit version [{}]. {}", clusterState.version(), details), reason); committed = false; committedOrFailedLatch.countDown(); diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java index 312c954cf6484..64d51c2b5c4b3 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java @@ -22,7 +22,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; @@ -513,18 +512,13 @@ protected void doRun() throws Exception { public void onFailure(Exception e) { if (e instanceof ConnectTransportException || e instanceof AlreadyClosedException) { // can't connect to the node - this is more common path! - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "[{}] failed to ping {}", pingingRound.id(), node), e); + logger.trace(() -> new ParameterizedMessage("[{}] failed to ping {}", pingingRound.id(), node), e); } else if (e instanceof RemoteTransportException) { // something went wrong on the other side - logger.debug( - (Supplier) () -> new ParameterizedMessage( + logger.debug(() -> new ParameterizedMessage( "[{}] received a remote error as a response to ping {}", pingingRound.id(), node), e); } else { - logger.warn( - (Supplier) () -> new ParameterizedMessage( - "[{}] failed send ping to {}", pingingRound.id(), node), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed send ping to {}", pingingRound.id(), node), e); } } @@ -574,9 +568,9 @@ public void handleException(TransportException exp) { if (exp instanceof ConnectTransportException || exp.getCause() instanceof ConnectTransportException || exp.getCause() instanceof AlreadyClosedException) { // ok, not connected... - logger.trace((Supplier) () -> new ParameterizedMessage("failed to connect to {}", node), exp); + logger.trace(() -> new ParameterizedMessage("failed to connect to {}", node), exp); } else if (closed == false) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to send ping to [{}]", node), exp); + logger.warn(() -> new ParameterizedMessage("failed to send ping to [{}]", node), exp); } } }; diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 066299d07fbd1..7d8485fee09d4 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -291,7 +291,7 @@ protected void doStop() { try { membership.sendLeaveRequestBlocking(nodes.getMasterNode(), nodes.getLocalNode(), TimeValue.timeValueSeconds(1)); } catch (Exception e) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to send leave request to master [{}]", nodes.getMasterNode()), e); + logger.debug(() -> new ParameterizedMessage("failed to send leave request to master [{}]", nodes.getMasterNode()), e); } } else { // we're master -> let other potential master we left and start a master election now rather then wait for masterFD @@ -303,7 +303,7 @@ protected void doStop() { try { membership.sendLeaveRequest(nodes.getLocalNode(), possibleMaster); } catch (Exception e) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to send leave request from master [{}] to possible master [{}]", nodes.getMasterNode(), possibleMaster), e); + logger.debug(() -> new ParameterizedMessage("failed to send leave request from master [{}] to possible master [{}]", nodes.getMasterNode(), possibleMaster), e); } } } @@ -367,11 +367,8 @@ public void onNewClusterStateFailed(Exception e) { processedOrFailed.set(true); latch.countDown(); ackListener.onNodeAck(localNode, e); - logger.warn( - (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( - "failed while applying cluster state locally [{}]", - clusterChangedEvent.source()), - e); + logger.warn(() -> new ParameterizedMessage( + "failed while applying cluster state locally [{}]", clusterChangedEvent.source()), e); } }); @@ -393,11 +390,8 @@ public void onNewClusterStateFailed(Exception e) { try { latch.await(); } catch (InterruptedException e) { - logger.debug( - (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( - "interrupted while applying cluster state locally [{}]", - clusterChangedEvent.source()), - e); + logger.debug(() -> new ParameterizedMessage( + "interrupted while applying cluster state locally [{}]", clusterChangedEvent.source()), e); Thread.currentThread().interrupt(); } } @@ -514,7 +508,7 @@ private boolean joinElectedMaster(DiscoveryNode masterNode) { // first, make sure we can connect to the master transportService.connectToNode(masterNode); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to connect to master [{}], retrying...", masterNode), e); + logger.warn(() -> new ParameterizedMessage("failed to connect to master [{}], retrying...", masterNode), e); return false; } int joinAttempt = 0; // we retry on illegal state if the master is not yet ready @@ -534,7 +528,7 @@ private boolean joinElectedMaster(DiscoveryNode masterNode) { } } else { if (logger.isTraceEnabled()) { - logger.trace((Supplier) () -> new ParameterizedMessage("failed to send join request to master [{}]", masterNode), e); + logger.trace(() -> new ParameterizedMessage("failed to send join request to master [{}]", masterNode), e); } else { logger.info("failed to send join request to master [{}], reason [{}]", masterNode, ExceptionsHelper.detailedMessage(e)); } @@ -646,7 +640,7 @@ ClusterState remainingNodesClusterState(final ClusterState currentState, Discove @Override public void onFailure(final String source, final Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); } @Override @@ -718,7 +712,7 @@ private void handleMasterGone(final DiscoveryNode masterNode, final Throwable ca return; } - logger.info((Supplier) () -> new ParameterizedMessage("master_left [{}], reason [{}]", masterNode, reason), cause); + logger.info(() -> new ParameterizedMessage("master_left [{}], reason [{}]", masterNode, reason), cause); synchronized (stateMutex) { if (localNodeMaster() == false && masterNode.equals(committedState.get().nodes().getMasterNode())) { @@ -764,7 +758,7 @@ boolean processNextCommittedClusterState(String reason) { pendingStatesQueue.markAsFailed(newClusterState, e); } catch (Exception inner) { inner.addSuppressed(e); - logger.error((Supplier) () -> new ParameterizedMessage("unexpected exception while failing [{}]", reason), inner); + logger.error(() -> new ParameterizedMessage("unexpected exception while failing [{}]", reason), inner); } return false; } @@ -807,14 +801,14 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure applying [{}]", reason), e); + logger.error(() -> new ParameterizedMessage("unexpected failure applying [{}]", reason), e); try { // TODO: use cluster state uuid instead of full cluster state so that we don't keep reference to CS around // for too long. pendingStatesQueue.markAsFailed(newClusterState, e); } catch (Exception inner) { inner.addSuppressed(e); - logger.error((Supplier) () -> new ParameterizedMessage("unexpected exception while failing [{}]", reason), inner); + logger.error(() -> new ParameterizedMessage("unexpected exception while failing [{}]", reason), inner); } } }); @@ -880,7 +874,7 @@ void handleJoinRequest(final DiscoveryNode node, final ClusterState state, final try { membership.sendValidateJoinRequestBlocking(node, state, joinTimeout); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to validate incoming join request from node [{}]", node), + logger.warn(() -> new ParameterizedMessage("failed to validate incoming join request from node [{}]", node), e); callback.onFailure(new IllegalStateException("failure when sending a validation request to node", e)); return; @@ -1029,11 +1023,11 @@ private void handleAnotherMaster(ClusterState localClusterState, final Discovery @Override public void handleException(TransportException exp) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), exp); + logger.warn(() -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), exp); } }); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), e); + logger.warn(() -> new ParameterizedMessage("failed to send rejoin request to [{}]", otherMaster), e); } } } diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 326393ac84ee3..fc1d0872e53a3 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.Directory; @@ -218,8 +217,8 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce } } catch (IOException e) { - startupTraceLogger.trace( - (Supplier) () -> new ParameterizedMessage("failed to obtain node lock on {}", dir.toAbsolutePath()), e); + startupTraceLogger.trace(() -> new ParameterizedMessage( + "failed to obtain node lock on {}", dir.toAbsolutePath()), e); lastException = new IOException("failed to obtain lock on " + dir.toAbsolutePath(), e); // release all the ones that were obtained up until now releaseAndNullLocks(locks); @@ -905,7 +904,7 @@ public void close() { logger.trace("releasing lock [{}]", lock); lock.close(); } catch (IOException e) { - logger.trace((Supplier) () -> new ParameterizedMessage("failed to release lock [{}]", lock), e); + logger.trace(() -> new ParameterizedMessage("failed to release lock [{}]", lock), e); } } } diff --git a/server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java b/server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java index e2bbae775e5d7..0a91ba81443ed 100644 --- a/server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java +++ b/server/src/main/java/org/elasticsearch/gateway/AsyncShardFetch.java @@ -21,7 +21,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; @@ -217,7 +216,7 @@ protected synchronized void processAsyncFetch(List responses, List) () -> new ParameterizedMessage("{}: failed to list shard for {} on node [{}]", + logger.warn(() -> new ParameterizedMessage("{}: failed to list shard for {} on node [{}]", shardId, type, failure.nodeId()), failure); nodeEntry.doneFetching(failure.getCause()); } diff --git a/server/src/main/java/org/elasticsearch/gateway/Gateway.java b/server/src/main/java/org/elasticsearch/gateway/Gateway.java index f4d191ac28a8a..ae8f5a85def44 100644 --- a/server/src/main/java/org/elasticsearch/gateway/Gateway.java +++ b/server/src/main/java/org/elasticsearch/gateway/Gateway.java @@ -128,9 +128,7 @@ public void performStateRecovery(final GatewayStateRecoveredListener listener) t } } catch (Exception e) { final Index electedIndex = electedIndexMetaData.getIndex(); - logger.warn( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("recovering index {} failed - recovering as closed", electedIndex), e); + logger.warn(() -> new ParameterizedMessage("recovering index {} failed - recovering as closed", electedIndex), e); electedIndexMetaData = IndexMetaData.builder(electedIndexMetaData).state(IndexMetaData.State.CLOSE).build(); } @@ -159,13 +157,8 @@ private void logUnknownSetting(String settingType, Map.Entry e) } private void logInvalidSetting(String settingType, Map.Entry e, IllegalArgumentException ex) { - logger.warn( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("ignoring invalid {} setting: [{}] with value [{}]; archiving", - settingType, - e.getKey(), - e.getValue()), - ex); + logger.warn(() -> new ParameterizedMessage("ignoring invalid {} setting: [{}] with value [{}]; archiving", + settingType, e.getKey(), e.getValue()), ex); } public interface GatewayStateRecoveredListener { diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java index 91ce90bd8b58c..d77031218179c 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -21,7 +21,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; @@ -283,7 +282,7 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); GatewayRecoveryListener.this.onFailure("failed to updated cluster state"); } diff --git a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index 5f75771e9e63f..116d181ccd3a2 100644 --- a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -20,7 +20,6 @@ package org.elasticsearch.gateway; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -158,7 +157,7 @@ public ClusterState execute(ClusterState currentState) { minIndexCompatibilityVersion); } catch (Exception ex) { // upgrade failed - adding index as closed - logger.warn((Supplier) () -> new ParameterizedMessage("found dangled index [{}] on node [{}]. This index cannot be upgraded to the latest version, adding as closed", indexMetaData.getIndex(), request.fromNode), ex); + logger.warn(() -> new ParameterizedMessage("found dangled index [{}] on node [{}]. This index cannot be upgraded to the latest version, adding as closed", indexMetaData.getIndex(), request.fromNode), ex); upgradedIndexMetaData = IndexMetaData.builder(indexMetaData).state(IndexMetaData.State.CLOSE).version(indexMetaData.getVersion() + 1).build(); } metaData.put(upgradedIndexMetaData, false); @@ -183,7 +182,7 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); try { channel.sendResponse(e); } catch (Exception inner) { diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java b/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java index 0ac421b699faa..b6c8d411474c9 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; @@ -323,8 +322,7 @@ public T loadLatestState(Logger logger, NamedXContentRegistry namedXContentRegi return state; } catch (Exception e) { exceptions.add(new IOException("failed to read " + pathAndStateId.toString(), e)); - logger.debug( - (Supplier) () -> new ParameterizedMessage( + logger.debug(() -> new ParameterizedMessage( "{}: failed to read [{}], ignoring...", pathAndStateId.file.toAbsolutePath(), prefix), e); } } diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java index 7fab7acc5f22d..00b981175f228 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -20,7 +20,6 @@ package org.elasticsearch.gateway; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Nullable; @@ -125,7 +124,7 @@ public void writeIndex(String reason, IndexMetaData indexMetaData) throws IOExce IndexMetaData.FORMAT.write(indexMetaData, nodeEnv.indexPaths(indexMetaData.getIndex())); } catch (Exception ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}]: failed to write index state", index), ex); + logger.warn(() -> new ParameterizedMessage("[{}]: failed to write index state", index), ex); throw new IOException("failed to write state for [" + index + "]", ex); } } diff --git a/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java index c66c00728a715..f9344186c5753 100644 --- a/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RecoverySource; @@ -259,9 +258,9 @@ protected static NodeShardsResult buildNodeShardsResult(ShardRouting shard, bool } else { final String finalAllocationId = allocationId; if (nodeShardState.storeException() instanceof ShardLockObtainFailedException) { - logger.trace((Supplier) () -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened as it's locked, treating as valid shard", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException()); + logger.trace(() -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened as it's locked, treating as valid shard", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException()); } else { - logger.trace((Supplier) () -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException()); + logger.trace(() -> new ParameterizedMessage("[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id", shard, nodeShardState.getNode(), finalAllocationId), nodeShardState.storeException()); allocationId = null; } } diff --git a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java index 11df875d4dd99..e854584b150d8 100644 --- a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java @@ -20,7 +20,6 @@ package org.elasticsearch.gateway; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -146,8 +145,7 @@ protected NodeGatewayStartedShards nodeOperation(NodeRequest request) { Store.tryOpenIndex(shardPath.resolveIndex(), shardId, nodeEnv::shardLock, logger); } catch (Exception exception) { final ShardPath finalShardPath = shardPath; - logger.trace( - (Supplier) () -> new ParameterizedMessage( + logger.trace(() -> new ParameterizedMessage( "{} can't open index for shard [{}] in path [{}]", shardId, shardStateMetaData, diff --git a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java index 90d8a205e8b57..1bdec683bfbd0 100644 --- a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java +++ b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.logging.Loggers; @@ -61,7 +60,7 @@ public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting ol try { listener.shardRoutingChanged(indexShard, oldRouting, newRouting); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke shard touring changed callback", indexShard.shardId().getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke shard touring changed callback", indexShard.shardId().getId()), e); } } } @@ -72,7 +71,7 @@ public void afterIndexShardCreated(IndexShard indexShard) { try { listener.afterIndexShardCreated(indexShard); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke after shard created callback", indexShard.shardId().getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard created callback", indexShard.shardId().getId()), e); throw e; } } @@ -84,7 +83,7 @@ public void afterIndexShardStarted(IndexShard indexShard) { try { listener.afterIndexShardStarted(indexShard); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke after shard started callback", indexShard.shardId().getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard started callback", indexShard.shardId().getId()), e); throw e; } } @@ -97,7 +96,7 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh try { listener.beforeIndexShardClosed(shardId, indexShard, indexSettings); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke before shard closed callback", shardId.getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke before shard closed callback", shardId.getId()), e); throw e; } } @@ -110,7 +109,7 @@ public void afterIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSha try { listener.afterIndexShardClosed(shardId, indexShard, indexSettings); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke after shard closed callback", shardId.getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard closed callback", shardId.getId()), e); throw e; } } @@ -122,7 +121,7 @@ public void onShardInactive(IndexShard indexShard) { try { listener.onShardInactive(indexShard); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke on shard inactive callback", indexShard.shardId().getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke on shard inactive callback", indexShard.shardId().getId()), e); throw e; } } @@ -134,7 +133,7 @@ public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardSt try { listener.indexShardStateChanged(indexShard, previousState, indexShard.state(), reason); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke index shard state changed callback", indexShard.shardId().getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke index shard state changed callback", indexShard.shardId().getId()), e); throw e; } } @@ -170,7 +169,7 @@ public void beforeIndexShardCreated(ShardId shardId, Settings indexSettings) { try { listener.beforeIndexShardCreated(shardId, indexSettings); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke before shard created callback", shardId), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke before shard created callback", shardId), e); throw e; } } @@ -207,7 +206,7 @@ public void beforeIndexShardDeleted(ShardId shardId, try { listener.beforeIndexShardDeleted(shardId, indexSettings); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke before shard deleted callback", shardId.getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke before shard deleted callback", shardId.getId()), e); throw e; } } @@ -220,7 +219,7 @@ public void afterIndexShardDeleted(ShardId shardId, try { listener.afterIndexShardDeleted(shardId, indexSettings); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to invoke after shard deleted callback", shardId.getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard deleted callback", shardId.getId()), e); throw e; } } diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index 680617d6498dd..13f76baa27695 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -430,8 +430,7 @@ private void closeShard(String reason, ShardId sId, IndexShard indexShard, Store final boolean flushEngine = deleted.get() == false && closed.get(); indexShard.close(reason, flushEngine); } catch (Exception e) { - logger.debug((org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("[{}] failed to close index shard", shardId), e); + logger.debug(() -> new ParameterizedMessage("[{}] failed to close index shard", shardId), e); // ignore } } diff --git a/server/src/main/java/org/elasticsearch/index/IndexWarmer.java b/server/src/main/java/org/elasticsearch/index/IndexWarmer.java index 0c901cf65010b..f8b9d9d2ef805 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexWarmer.java +++ b/server/src/main/java/org/elasticsearch/index/IndexWarmer.java @@ -20,7 +20,6 @@ package org.elasticsearch.index; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.DirectoryReader; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; @@ -154,9 +153,7 @@ public TerminationHandle warmReader(final IndexShard indexShard, final Engine.Se indexShard .warmerService() .logger() - .warn( - (Supplier) () -> new ParameterizedMessage( - "failed to warm-up global ordinals for [{}]", fieldType.name()), e); + .warn(() -> new ParameterizedMessage("failed to warm-up global ordinals for [{}]", fieldType.name()), e); } finally { latch.countDown(); } diff --git a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index 6af9c5eeb6e51..a59af29036b7d 100644 --- a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.cache.bitset; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; @@ -263,7 +262,7 @@ public IndexWarmer.TerminationHandle warmReader(final IndexShard indexShard, fin indexShard.warmerService().logger().trace("warmed bitset for [{}], took [{}]", filterToWarm, TimeValue.timeValueNanos(System.nanoTime() - start)); } } catch (Exception e) { - indexShard.warmerService().logger().warn((Supplier) () -> new ParameterizedMessage("failed to load bitset for [{}]", filterToWarm), e); + indexShard.warmerService().logger().warn(() -> new ParameterizedMessage("failed to load bitset for [{}]", filterToWarm), e); } finally { latch.countDown(); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 1452c5de49278..1ca4468539da1 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFileNames; @@ -597,7 +596,7 @@ private ImmutableOpenMap getSegmentFileSizes(SegmentReader segment try { directory = engineConfig.getCodec().compoundFormat().getCompoundReader(segmentReader.directory(), segmentCommitInfo.info, IOContext.READ); } catch (IOException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("Error when opening compound reader for Directory [{}] and SegmentCommitInfo [{}]", segmentReader.directory(), segmentCommitInfo), e); + logger.warn(() -> new ParameterizedMessage("Error when opening compound reader for Directory [{}] and SegmentCommitInfo [{}]", segmentReader.directory(), segmentCommitInfo), e); return ImmutableOpenMap.of(); } @@ -613,15 +612,14 @@ private ImmutableOpenMap getSegmentFileSizes(SegmentReader segment files = directory.listAll(); } catch (IOException e) { final Directory finalDirectory = directory; - logger.warn( - (Supplier) () -> new ParameterizedMessage("Couldn't list Compound Reader Directory [{}]", finalDirectory), e); + logger.warn(() -> new ParameterizedMessage("Couldn't list Compound Reader Directory [{}]", finalDirectory), e); return ImmutableOpenMap.of(); } } else { try { files = segmentReader.getSegmentInfo().files().toArray(new String[]{}); } catch (IOException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("Couldn't list Directory from SegmentReader [{}] and SegmentInfo [{}]", segmentReader, segmentReader.getSegmentInfo()), e); + logger.warn(() -> new ParameterizedMessage("Couldn't list Directory from SegmentReader [{}] and SegmentInfo [{}]", segmentReader, segmentReader.getSegmentInfo()), e); return ImmutableOpenMap.of(); } } @@ -634,13 +632,10 @@ private ImmutableOpenMap getSegmentFileSizes(SegmentReader segment length = directory.fileLength(file); } catch (NoSuchFileException | FileNotFoundException e) { final Directory finalDirectory = directory; - logger.warn((Supplier) - () -> new ParameterizedMessage("Tried to query fileLength but file is gone [{}] [{}]", finalDirectory, file), e); + logger.warn(() -> new ParameterizedMessage("Tried to query fileLength but file is gone [{}] [{}]", finalDirectory, file), e); } catch (IOException e) { final Directory finalDirectory = directory; - logger.warn( - (Supplier) - () -> new ParameterizedMessage("Error when trying to query fileLength [{}] [{}]", finalDirectory, file), e); + logger.warn(() -> new ParameterizedMessage("Error when trying to query fileLength [{}] [{}]", finalDirectory, file), e); } if (length == 0L) { continue; @@ -653,9 +648,7 @@ private ImmutableOpenMap getSegmentFileSizes(SegmentReader segment directory.close(); } catch (IOException e) { final Directory finalDirectory = directory; - logger.warn( - (Supplier) - () -> new ParameterizedMessage("Error when closing compound reader on Directory [{}]", finalDirectory), e); + logger.warn(() -> new ParameterizedMessage("Error when closing compound reader on Directory [{}]", finalDirectory), e); } } @@ -706,7 +699,7 @@ protected Segment[] getSegmentInfo(SegmentInfos lastCommittedSegmentInfos, boole try { segment.sizeInBytes = info.sizeInBytes(); } catch (IOException e) { - logger.trace((Supplier) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); + logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); } segments.put(info.info.name, segment); } else { @@ -732,7 +725,7 @@ private void fillSegmentInfo(SegmentReader segmentReader, boolean verbose, boole try { segment.sizeInBytes = info.sizeInBytes(); } catch (IOException e) { - logger.trace((Supplier) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); + logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); } segment.memoryInBytes = segmentReader.ramBytesUsed(); segment.segmentSort = info.info.getIndexSort(); @@ -880,7 +873,7 @@ public void failEngine(String reason, @Nullable Exception failure) { store.incRef(); try { if (failedEngine.get() != null) { - logger.warn((Supplier) () -> new ParameterizedMessage("tried to fail engine but engine is already failed. ignoring. [{}]", reason), failure); + logger.warn(() -> new ParameterizedMessage("tried to fail engine but engine is already failed. ignoring. [{}]", reason), failure); return; } // this must happen before we close IW or Translog such that we can check this state to opt out of failing the engine @@ -890,7 +883,7 @@ public void failEngine(String reason, @Nullable Exception failure) { // we just go and close this engine - no way to recover closeNoLock("engine failed on: [" + reason + "]", closedLatch); } finally { - logger.warn((Supplier) () -> new ParameterizedMessage("failed engine [{}]", reason), failure); + logger.warn(() -> new ParameterizedMessage("failed engine [{}]", reason), failure); // we must set a failure exception, generate one if not supplied // we first mark the store as corrupted before we notify any listeners // this must happen first otherwise we might try to reallocate so quickly @@ -913,7 +906,7 @@ public void failEngine(String reason, @Nullable Exception failure) { store.decRef(); } } else { - logger.debug((Supplier) () -> new ParameterizedMessage("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", reason), failure); + logger.debug(() -> new ParameterizedMessage("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", reason), failure); } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 02582b9c771ef..6fde5d47d2e4d 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFormatTooOldException; @@ -1527,7 +1526,8 @@ final boolean tryRenewSyncCommit() { ensureOpen(); ensureCanFlush(); String syncId = lastCommittedSegmentInfos.getUserData().get(SYNC_COMMIT_ID); - if (syncId != null && translog.uncommittedOperations() == 0 && indexWriter.hasUncommittedChanges()) { + long translogGenOfLastCommit = Long.parseLong(lastCommittedSegmentInfos.userData.get(Translog.TRANSLOG_GENERATION_KEY)); + if (syncId != null && indexWriter.hasUncommittedChanges() && translog.totalOperationsByMinGen(translogGenOfLastCommit) == 0) { logger.trace("start renewing sync commit [{}]", syncId); commitIndexWriter(indexWriter, translog, syncId); logger.debug("successfully sync committed. sync id [{}].", syncId); @@ -1549,19 +1549,30 @@ final boolean tryRenewSyncCommit() { @Override public boolean shouldPeriodicallyFlush() { ensureOpen(); + final long translogGenerationOfLastCommit = Long.parseLong(lastCommittedSegmentInfos.userData.get(Translog.TRANSLOG_GENERATION_KEY)); final long flushThreshold = config().getIndexSettings().getFlushThresholdSize().getBytes(); - final long uncommittedSizeOfCurrentCommit = translog.uncommittedSizeInBytes(); - if (uncommittedSizeOfCurrentCommit < flushThreshold) { + if (translog.sizeInBytesByMinGen(translogGenerationOfLastCommit) < flushThreshold) { return false; } /* - * We should only flush ony if the shouldFlush condition can become false after flushing. - * This condition will change if the `uncommittedSize` of the new commit is smaller than - * the `uncommittedSize` of the current commit. This method is to maintain translog only, - * thus the IndexWriter#hasUncommittedChanges condition is not considered. + * We flush to reduce the size of uncommitted translog but strictly speaking the uncommitted size won't always be + * below the flush-threshold after a flush. To avoid getting into an endless loop of flushing, we only enable the + * periodically flush condition if this condition is disabled after a flush. The condition will change if the new + * commit points to the later generation the last commit's(eg. gen-of-last-commit < gen-of-new-commit)[1]. + * + * When the local checkpoint equals to max_seqno, and translog-gen of the last commit equals to translog-gen of + * the new commit, we know that the last generation must contain operations because its size is above the flush + * threshold and the flush-threshold is guaranteed to be higher than an empty translog by the setting validation. + * This guarantees that the new commit will point to the newly rolled generation. In fact, this scenario only + * happens when the generation-threshold is close to or above the flush-threshold; otherwise we have rolled + * generations as the generation-threshold was reached, then the first condition (eg. [1]) is already satisfied. + * + * This method is to maintain translog only, thus IndexWriter#hasUncommittedChanges condition is not considered. */ - final long uncommittedSizeOfNewCommit = translog.sizeOfGensAboveSeqNoInBytes(localCheckpointTracker.getCheckpoint() + 1); - return uncommittedSizeOfNewCommit < uncommittedSizeOfCurrentCommit; + final long translogGenerationOfNewCommit = + translog.getMinGenerationForSeqNo(localCheckpointTracker.getCheckpoint() + 1).translogFileGeneration; + return translogGenerationOfLastCommit < translogGenerationOfNewCommit + || localCheckpointTracker.getCheckpoint() == localCheckpointTracker.getMaxSeqNo(); } @Override @@ -1960,7 +1971,7 @@ public Searcher acquireSearcher(String source, SearcherScope scope) { throw ex; } catch (Exception ex) { ensureOpen(ex); // throw EngineCloseException here if we are already closed - logger.error((Supplier) () -> new ParameterizedMessage("failed to acquire searcher, source {}", source), ex); + logger.error(() -> new ParameterizedMessage("failed to acquire searcher, source {}", source), ex); throw new EngineException(shardId, "failed to acquire searcher, source " + source, ex); } finally { Releasables.close(releasable); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java index 36acc456ab741..a2a16596b0244 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoPointFieldMapper.java @@ -29,6 +29,7 @@ import org.apache.lucene.search.TermQuery; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Explicit; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.settings.Settings; @@ -57,11 +58,13 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper public static class Names { public static final String IGNORE_MALFORMED = "ignore_malformed"; + public static final ParseField IGNORE_Z_VALUE = new ParseField("ignore_z_value"); } public static class Defaults { public static final Explicit IGNORE_MALFORMED = new Explicit<>(false, false); public static final GeoPointFieldType FIELD_TYPE = new GeoPointFieldType(); + public static final Explicit IGNORE_Z_VALUE = new Explicit<>(true, false); static { FIELD_TYPE.setTokenized(false); @@ -73,6 +76,7 @@ public static class Defaults { public static class Builder extends FieldMapper.Builder { protected Boolean ignoreMalformed; + private Boolean ignoreZValue; public Builder(String name) { super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); @@ -94,19 +98,32 @@ protected Explicit ignoreMalformed(BuilderContext context) { return GeoPointFieldMapper.Defaults.IGNORE_MALFORMED; } + protected Explicit ignoreZValue(BuilderContext context) { + if (ignoreZValue != null) { + return new Explicit<>(ignoreZValue, true); + } + return Defaults.IGNORE_Z_VALUE; + } + + public Builder ignoreZValue(final boolean ignoreZValue) { + this.ignoreZValue = ignoreZValue; + return this; + } + public GeoPointFieldMapper build(BuilderContext context, String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, Explicit ignoreMalformed, - CopyTo copyTo) { + Explicit ignoreZValue, CopyTo copyTo) { setupFieldType(context); return new GeoPointFieldMapper(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, - ignoreMalformed, copyTo); + ignoreMalformed, ignoreZValue, copyTo); } @Override public GeoPointFieldMapper build(BuilderContext context) { return build(context, name, fieldType, defaultFieldType, context.indexSettings(), - multiFieldsBuilder.build(this, context), ignoreMalformed(context), copyTo); + multiFieldsBuilder.build(this, context), ignoreMalformed(context), + ignoreZValue(context), copyTo); } } @@ -125,6 +142,10 @@ public Mapper.Builder parse(String name, Map node, ParserContext if (propName.equals(Names.IGNORE_MALFORMED)) { builder.ignoreMalformed(TypeParsers.nodeBooleanValue(name, Names.IGNORE_MALFORMED, propNode, parserContext)); iterator.remove(); + } else if (propName.equals(Names.IGNORE_Z_VALUE.getPreferredName())) { + builder.ignoreZValue(TypeParsers.nodeBooleanValue(propName, Names.IGNORE_Z_VALUE.getPreferredName(), + propNode, parserContext)); + iterator.remove(); } } @@ -133,12 +154,14 @@ public Mapper.Builder parse(String name, Map node, ParserContext } protected Explicit ignoreMalformed; + protected Explicit ignoreZValue; public GeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, MultiFields multiFields, Explicit ignoreMalformed, - CopyTo copyTo) { + Explicit ignoreZValue, CopyTo copyTo) { super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); this.ignoreMalformed = ignoreMalformed; + this.ignoreZValue = ignoreZValue; } @Override @@ -148,6 +171,9 @@ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { if (gpfmMergeWith.ignoreMalformed.explicit()) { this.ignoreMalformed = gpfmMergeWith.ignoreMalformed; } + if (gpfmMergeWith.ignoreZValue.explicit()) { + this.ignoreZValue = gpfmMergeWith.ignoreZValue; + } } @Override @@ -266,12 +292,18 @@ public Mapper parse(ParseContext context) throws IOException { double lon = context.parser().doubleValue(); token = context.parser().nextToken(); double lat = context.parser().doubleValue(); - while ((token = context.parser().nextToken()) != XContentParser.Token.END_ARRAY); + token = context.parser().nextToken(); + Double alt = Double.NaN; + if (token == XContentParser.Token.VALUE_NUMBER) { + alt = GeoPoint.assertZValue(ignoreZValue.value(), context.parser().doubleValue()); + } else if (token != XContentParser.Token.END_ARRAY) { + throw new ElasticsearchParseException("[{}] field type does not accept > 3 dimensions", CONTENT_TYPE); + } parse(context, sparse.reset(lat, lon)); } else { while (token != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.VALUE_STRING) { - parsePointFromString(context, sparse, context.parser().text()); + parse(context, sparse.resetFromString(context.parser().text(), ignoreZValue.value())); } else { try { parse(context, GeoUtils.parseGeoPoint(context.parser(), sparse)); @@ -286,7 +318,7 @@ public Mapper parse(ParseContext context) throws IOException { } } } else if (token == XContentParser.Token.VALUE_STRING) { - parsePointFromString(context, sparse, context.parser().text()); + parse(context, sparse.resetFromString(context.parser().text(), ignoreZValue.value())); } else if (token != XContentParser.Token.VALUE_NULL) { try { parse(context, GeoUtils.parseGeoPoint(context.parser(), sparse)); @@ -302,19 +334,18 @@ public Mapper parse(ParseContext context) throws IOException { return null; } - private void parsePointFromString(ParseContext context, GeoPoint sparse, String point) throws IOException { - if (point.indexOf(',') < 0) { - parse(context, sparse.resetFromGeoHash(point)); - } else { - parse(context, sparse.resetFromString(point)); - } - } - @Override protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { super.doXContentBody(builder, includeDefaults, params); if (includeDefaults || ignoreMalformed.explicit()) { builder.field(GeoPointFieldMapper.Names.IGNORE_MALFORMED, ignoreMalformed.value()); } + if (includeDefaults || ignoreZValue.explicit()) { + builder.field(Names.IGNORE_Z_VALUE.getPreferredName(), ignoreZValue.value()); + } + } + + public Explicit ignoreZValue() { + return ignoreZValue; } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java index 89ce0923606d7..d71b3c17e4e91 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java @@ -101,6 +101,7 @@ public static class Defaults { public static final double LEGACY_DISTANCE_ERROR_PCT = 0.025d; public static final Explicit COERCE = new Explicit<>(false, false); public static final Explicit IGNORE_MALFORMED = new Explicit<>(false, false); + public static final Explicit IGNORE_Z_VALUE = new Explicit<>(true, false); public static final MappedFieldType FIELD_TYPE = new GeoShapeFieldType(); @@ -121,6 +122,7 @@ public static class Builder extends FieldMapper.Builder ignoreMalformed(BuilderContext context) { return Defaults.IGNORE_MALFORMED; } + protected Explicit ignoreZValue(BuilderContext context) { + if (ignoreZValue != null) { + return new Explicit<>(ignoreZValue, true); + } + return Defaults.IGNORE_Z_VALUE; + } + + public Builder ignoreZValue(final boolean ignoreZValue) { + this.ignoreZValue = ignoreZValue; + return this; + } + @Override public GeoShapeFieldMapper build(BuilderContext context) { GeoShapeFieldType geoShapeFieldType = (GeoShapeFieldType)fieldType; @@ -175,8 +189,8 @@ public GeoShapeFieldMapper build(BuilderContext context) { } setupFieldType(context); - return new GeoShapeFieldMapper(name, fieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), - multiFieldsBuilder.build(this, context), copyTo); + return new GeoShapeFieldMapper(name, fieldType, ignoreMalformed(context), coerce(context), ignoreZValue(context), + context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); } } @@ -213,6 +227,10 @@ public Mapper.Builder parse(String name, Map node, ParserContext } else if (Names.COERCE.equals(fieldName)) { builder.coerce(TypeParsers.nodeBooleanValue(fieldName, Names.COERCE, fieldNode, parserContext)); iterator.remove(); + } else if (GeoPointFieldMapper.Names.IGNORE_Z_VALUE.getPreferredName().equals(fieldName)) { + builder.ignoreZValue(TypeParsers.nodeBooleanValue(fieldName, GeoPointFieldMapper.Names.IGNORE_Z_VALUE.getPreferredName(), + fieldNode, parserContext)); + iterator.remove(); } else if (Names.STRATEGY_POINTS_ONLY.equals(fieldName) && builder.fieldType().strategyName.equals(SpatialStrategy.TERM.getStrategyName()) == false) { boolean pointsOnly = TypeParsers.nodeBooleanValue(fieldName, Names.STRATEGY_POINTS_ONLY, fieldNode, parserContext); @@ -453,12 +471,15 @@ public Query termQuery(Object value, QueryShardContext context) { protected Explicit coerce; protected Explicit ignoreMalformed; + protected Explicit ignoreZValue; public GeoShapeFieldMapper(String simpleName, MappedFieldType fieldType, Explicit ignoreMalformed, - Explicit coerce, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { + Explicit coerce, Explicit ignoreZValue, Settings indexSettings, + MultiFields multiFields, CopyTo copyTo) { super(simpleName, fieldType, Defaults.FIELD_TYPE, indexSettings, multiFields, copyTo); this.coerce = coerce; this.ignoreMalformed = ignoreMalformed; + this.ignoreZValue = ignoreZValue; } @Override @@ -522,6 +543,9 @@ protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { if (gsfm.ignoreMalformed.explicit()) { this.ignoreMalformed = gsfm.ignoreMalformed; } + if (gsfm.ignoreZValue.explicit()) { + this.ignoreZValue = gsfm.ignoreZValue; + } } @Override @@ -555,6 +579,9 @@ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, if (includeDefaults || ignoreMalformed.explicit()) { builder.field(IGNORE_MALFORMED, ignoreMalformed.value()); } + if (includeDefaults || ignoreZValue.explicit()) { + builder.field(GeoPointFieldMapper.Names.IGNORE_Z_VALUE.getPreferredName(), ignoreZValue.value()); + } } public Explicit coerce() { @@ -565,6 +592,10 @@ public Explicit ignoreMalformed() { return ignoreMalformed; } + public Explicit ignoreZValue() { + return ignoreZValue; + } + @Override protected String contentType() { return CONTENT_TYPE; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index a7410ea5efc89..e015d449cb081 100755 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -234,7 +234,7 @@ public boolean updateMapping(IndexMetaData indexMetaData) throws IOException { // only update entries if needed updatedEntries = internalMerge(indexMetaData, MergeReason.MAPPING_RECOVERY, true, true); } catch (Exception e) { - logger.warn((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage("[{}] failed to apply mappings", index()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to apply mappings", index()), e); throw e; } diff --git a/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java b/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java index 52c2b2ae2cf8a..67e0f5400b389 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.bulk.BackoffPolicy; @@ -106,7 +105,7 @@ public void onResponse(ClearScrollResponse response) { @Override public void onFailure(Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("Failed to clear scroll [{}]", scrollId), e); + logger.warn(() -> new ParameterizedMessage("Failed to clear scroll [{}]", scrollId), e); onCompletion.run(); } }); @@ -155,12 +154,11 @@ public void onFailure(Exception e) { if (retries.hasNext()) { retryCount += 1; TimeValue delay = retries.next(); - logger.trace((Supplier) () -> new ParameterizedMessage("retrying rejected search after [{}]", delay), e); + logger.trace(() -> new ParameterizedMessage("retrying rejected search after [{}]", delay), e); countSearchRetry.run(); threadPool.schedule(delay, ThreadPool.Names.SAME, retryWithContext); } else { - logger.warn( - (Supplier) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "giving up on search because we retried [{}] times without success", retryCount), e); fail.accept(e); } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 5ea08f421aa97..828dbbdc5c5a9 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -893,8 +893,7 @@ public DocsStats docStats() { try { sizeInBytes += info.sizeInBytes(); } catch (IOException e) { - logger.trace((org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); + logger.trace(() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e); } } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java b/server/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java index 335196fe68198..288832f1375c6 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexingOperationListener.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.index.engine.Engine; import java.util.List; @@ -94,7 +93,7 @@ public Engine.Index preIndex(ShardId shardId, Engine.Index operation) { try { listener.preIndex(shardId, operation); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("preIndex listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("preIndex listener [{}] failed", listener), e); } } return operation; @@ -107,7 +106,7 @@ public void postIndex(ShardId shardId, Engine.Index index, Engine.IndexResult re try { listener.postIndex(shardId, index, result); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("postIndex listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("postIndex listener [{}] failed", listener), e); } } } @@ -120,7 +119,7 @@ public void postIndex(ShardId shardId, Engine.Index index, Exception ex) { listener.postIndex(shardId, index, ex); } catch (Exception inner) { inner.addSuppressed(ex); - logger.warn((Supplier) () -> new ParameterizedMessage("postIndex listener [{}] failed", listener), inner); + logger.warn(() -> new ParameterizedMessage("postIndex listener [{}] failed", listener), inner); } } } @@ -132,7 +131,7 @@ public Engine.Delete preDelete(ShardId shardId, Engine.Delete delete) { try { listener.preDelete(shardId, delete); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("preDelete listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("preDelete listener [{}] failed", listener), e); } } return delete; @@ -145,7 +144,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResul try { listener.postDelete(shardId, delete, result); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("postDelete listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("postDelete listener [{}] failed", listener), e); } } } @@ -158,7 +157,7 @@ public void postDelete(ShardId shardId, Engine.Delete delete, Exception ex) { listener.postDelete(shardId, delete, ex); } catch (Exception inner) { inner.addSuppressed(ex); - logger.warn((Supplier) () -> new ParameterizedMessage("postDelete listener [{}] failed", listener), inner); + logger.warn(() -> new ParameterizedMessage("postDelete listener [{}] failed", listener), inner); } } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java b/server/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java index 153a985ab0892..b148d1efba340 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java +++ b/server/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.transport.TransportRequest; @@ -133,7 +132,7 @@ public void onPreQueryPhase(SearchContext searchContext) { try { listener.onPreQueryPhase(searchContext); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onPreQueryPhase listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onPreQueryPhase listener [{}] failed", listener), e); } } } @@ -144,7 +143,7 @@ public void onFailedQueryPhase(SearchContext searchContext) { try { listener.onFailedQueryPhase(searchContext); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onFailedQueryPhase listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onFailedQueryPhase listener [{}] failed", listener), e); } } } @@ -155,7 +154,7 @@ public void onQueryPhase(SearchContext searchContext, long tookInNanos) { try { listener.onQueryPhase(searchContext, tookInNanos); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onQueryPhase listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onQueryPhase listener [{}] failed", listener), e); } } } @@ -166,7 +165,7 @@ public void onPreFetchPhase(SearchContext searchContext) { try { listener.onPreFetchPhase(searchContext); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onPreFetchPhase listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onPreFetchPhase listener [{}] failed", listener), e); } } } @@ -177,7 +176,7 @@ public void onFailedFetchPhase(SearchContext searchContext) { try { listener.onFailedFetchPhase(searchContext); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onFailedFetchPhase listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onFailedFetchPhase listener [{}] failed", listener), e); } } } @@ -188,7 +187,7 @@ public void onFetchPhase(SearchContext searchContext, long tookInNanos) { try { listener.onFetchPhase(searchContext, tookInNanos); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onFetchPhase listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onFetchPhase listener [{}] failed", listener), e); } } } @@ -199,7 +198,7 @@ public void onNewContext(SearchContext context) { try { listener.onNewContext(context); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onNewContext listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onNewContext listener [{}] failed", listener), e); } } } @@ -210,7 +209,7 @@ public void onFreeContext(SearchContext context) { try { listener.onFreeContext(context); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onFreeContext listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onFreeContext listener [{}] failed", listener), e); } } } @@ -221,7 +220,7 @@ public void onNewScrollContext(SearchContext context) { try { listener.onNewScrollContext(context); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onNewScrollContext listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onNewScrollContext listener [{}] failed", listener), e); } } } @@ -232,7 +231,7 @@ public void onFreeScrollContext(SearchContext context) { try { listener.onFreeScrollContext(context); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("onFreeScrollContext listener [{}] failed", listener), e); + logger.warn(() -> new ParameterizedMessage("onFreeScrollContext listener [{}] failed", listener), e); } } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardId.java b/server/src/main/java/org/elasticsearch/index/shard/ShardId.java index a806c414e9aea..085fd6e339282 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardId.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardId.java @@ -23,6 +23,9 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.Index; import java.io.IOException; @@ -30,7 +33,7 @@ /** * Allows for shard level components to be injected with the shard id. */ -public class ShardId implements Streamable, Comparable { +public class ShardId implements Streamable, Comparable, ToXContentFragment { private Index index; @@ -137,4 +140,9 @@ public int compareTo(ShardId o) { } return Integer.compare(shardId, o.getId()); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(toString()); + } } diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java index 124b538d3facf..ee285cc4f9569 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java @@ -266,7 +266,8 @@ public static void toXContent(FileInfo file, XContentBuilder builder, ToXContent } if (file.metadata.hash() != null && file.metadata().hash().length > 0) { - builder.field(META_HASH, file.metadata.hash()); + BytesRef br = file.metadata.hash(); + builder.field(META_HASH, br.bytes, br.offset, br.length); } builder.endObject(); } diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 7b73a945d6e31..2a78bbe5ba046 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.CorruptIndexException; @@ -329,7 +328,7 @@ public int compare(Map.Entry o1, Map.Entry o2) { directory.deleteFile(origFile); } catch (FileNotFoundException | NoSuchFileException e) { } catch (Exception ex) { - logger.debug((Supplier) () -> new ParameterizedMessage("failed to delete file [{}]", origFile), ex); + logger.debug(() -> new ParameterizedMessage("failed to delete file [{}]", origFile), ex); } // now, rename the files... and fail it it won't work directory.rename(tempFile, origFile); @@ -462,7 +461,7 @@ public static MetadataSnapshot readMetadataSnapshot(Path indexLocation, ShardId } catch (FileNotFoundException | NoSuchFileException ex) { logger.info("Failed to open / find files while reading metadata snapshot"); } catch (ShardLockObtainFailedException ex) { - logger.info((Supplier) () -> new ParameterizedMessage("{}: failed to obtain shard lock", shardId), ex); + logger.info(() -> new ParameterizedMessage("{}: failed to obtain shard lock", shardId), ex); } return MetadataSnapshot.EMPTY; } @@ -476,7 +475,7 @@ public static boolean canOpenIndex(Logger logger, Path indexLocation, ShardId sh try { tryOpenIndex(indexLocation, shardId, shardLocker, logger); } catch (Exception ex) { - logger.trace((Supplier) () -> new ParameterizedMessage("Can't open index for path [{}]", indexLocation), ex); + logger.trace(() -> new ParameterizedMessage("Can't open index for path [{}]", indexLocation), ex); return false; } return true; @@ -676,7 +675,7 @@ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetaData) thr // if one of those files can't be deleted we better fail the cleanup otherwise we might leave an old commit point around? throw new IllegalStateException("Can't delete " + existingFile + " - cleanup failed", ex); } - logger.debug((Supplier) () -> new ParameterizedMessage("failed to delete file [{}]", existingFile), ex); + logger.debug(() -> new ParameterizedMessage("failed to delete file [{}]", existingFile), ex); // ignore, we don't really care, will get deleted later on } } @@ -886,7 +885,7 @@ static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, Logg // Lucene checks the checksum after it tries to lookup the codec etc. // in that case we might get only IAE or similar exceptions while we are really corrupt... // TODO we should check the checksum in lucene if we hit an exception - logger.warn((Supplier) () -> new ParameterizedMessage("failed to build store metadata. checking segment info integrity (with commit [{}])", commit == null ? "no" : "yes"), ex); + logger.warn(() -> new ParameterizedMessage("failed to build store metadata. checking segment info integrity (with commit [{}])", commit == null ? "no" : "yes"), ex); Lucene.checkSegmentInfoIntegrity(directory); } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException cex) { cex.addSuppressed(ex); @@ -921,7 +920,7 @@ private static void checksumFromLuceneFile(Directory directory, String file, Map } } catch (Exception ex) { - logger.debug((Supplier) () -> new ParameterizedMessage("Can retrieve checksum from file [{}]", file), ex); + logger.debug(() -> new ParameterizedMessage("Can retrieve checksum from file [{}]", file), ex); throw ex; } builder.put(file, new StoreFileMetaData(file, length, checksum, version, fileHash.get())); diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index f4fa056376d24..b73ea783b2a25 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.translog; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.Term; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.core.internal.io.IOUtils; @@ -262,7 +261,7 @@ private ArrayList recoverFromFiles(Checkpoint checkpoint) throws try { Files.delete(tempFile); } catch (IOException ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to delete temp file {}", tempFile), ex); + logger.warn(() -> new ParameterizedMessage("failed to delete temp file {}", tempFile), ex); } } } @@ -356,26 +355,11 @@ public long getMinFileGeneration() { } } - - /** - * Returns the number of operations in the translog files that aren't committed to lucene. - */ - public int uncommittedOperations() { - return totalOperations(deletionPolicy.getTranslogGenerationOfLastCommit()); - } - - /** - * Returns the size in bytes of the translog files that aren't committed to lucene. - */ - public long uncommittedSizeInBytes() { - return sizeInBytesByMinGen(deletionPolicy.getTranslogGenerationOfLastCommit()); - } - /** * Returns the number of operations in the translog files */ public int totalOperations() { - return totalOperations(-1); + return totalOperationsByMinGen(-1); } /** @@ -406,9 +390,9 @@ static long findEarliestLastModifiedAge(long currentTime, Iterable) () -> new ParameterizedMessage("failed to write indexing buffer for shard [{}]; ignoring", shard.shardId()), e); + logger.warn(() -> new ParameterizedMessage("failed to write indexing buffer for shard [{}]; ignoring", shard.shardId()), e); } }); } @@ -384,7 +383,7 @@ protected void checkIdle(IndexShard shard, long inactiveTimeNS) { try { shard.checkIdle(inactiveTimeNS); } catch (AlreadyClosedException e) { - logger.trace((Supplier) () -> new ParameterizedMessage("ignore exception while checking if shard {} is inactive", shard.shardId()), e); + logger.trace(() -> new ParameterizedMessage("ignore exception while checking if shard {} is inactive", shard.shardId()), e); } } } diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java b/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java index 1712f90c206ec..4a55b86291e63 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java @@ -19,7 +19,6 @@ package org.elasticsearch.indices.analysis; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.analysis.hunspell.Dictionary; import org.apache.lucene.store.Directory; import org.apache.lucene.store.SimpleFSDirectory; @@ -140,8 +139,7 @@ private void scanAndLoadDictionaries() throws IOException { } catch (Exception e) { // The cache loader throws unchecked exception (see #loadDictionary()), // here we simply report the exception and continue loading the dictionaries - logger.error( - (Supplier) () -> new ParameterizedMessage( + logger.error(() -> new ParameterizedMessage( "exception while loading dictionary {}", file.getFileName()), e); } } @@ -200,7 +198,7 @@ private Dictionary loadDictionary(String locale, Settings nodeSettings, Environm } } catch (Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("Could not load hunspell dictionary [{}]", locale), e); + logger.error(() -> new ParameterizedMessage("Could not load hunspell dictionary [{}]", locale), e); throw e; } finally { IOUtils.close(affixStream); diff --git a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index d17740ed60004..472cb04936d64 100644 --- a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.store.LockObtainFailedException; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.Version; @@ -307,8 +306,7 @@ private void deleteIndices(final ClusterChangedEvent event) { threadPool.generic().execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { - logger.warn( - (Supplier) () -> new ParameterizedMessage("[{}] failed to complete pending deletion for index", index), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to complete pending deletion for index", index), e); } @Override @@ -670,8 +668,7 @@ private void failAndRemoveShard(ShardRouting shardRouting, boolean sendShardFail // the node got closed on us, ignore it } catch (Exception inner) { inner.addSuppressed(failure); - logger.warn( - (Supplier) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "[{}][{}] failed to remove shard after failure ([{}])", shardRouting.getIndexName(), shardRouting.getId(), @@ -685,15 +682,13 @@ private void failAndRemoveShard(ShardRouting shardRouting, boolean sendShardFail private void sendFailShard(ShardRouting shardRouting, String message, @Nullable Exception failure, ClusterState state) { try { - logger.warn( - (Supplier) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "[{}] marking and sending shard failed due to [{}]", shardRouting.shardId(), message), failure); failedShardsCache.put(shardRouting.shardId(), shardRouting); shardStateAction.localShardFailed(shardRouting, message, failure, SHARD_STATE_ACTION_LISTENER, state); } catch (Exception inner) { if (failure != null) inner.addSuppressed(failure); - logger.warn( - (Supplier) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "[{}][{}] failed to mark shard as failed (because of [{}])", shardRouting.getIndexName(), shardRouting.getId(), diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index b8b294a90d422..553744e66ef04 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -19,7 +19,6 @@ package org.elasticsearch.indices.flush; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -107,7 +106,7 @@ public void onResponse(ShardsSyncedFlushResult syncedFlushResult) { @Override public void onFailure(Exception e) { - logger.debug((Supplier) () -> new ParameterizedMessage("{} sync flush on inactive shard failed", indexShard.shardId()), e); + logger.debug(() -> new ParameterizedMessage("{} sync flush on inactive shard failed", indexShard.shardId()), e); } }); } @@ -397,7 +396,7 @@ public void handleResponse(ShardSyncedFlushResponse response) { @Override public void handleException(TransportException exp) { - logger.trace((Supplier) () -> new ParameterizedMessage("{} error while performing synced flush on [{}], skipping", shardId, shard), exp); + logger.trace(() -> new ParameterizedMessage("{} error while performing synced flush on [{}], skipping", shardId, shard), exp); results.put(shard, new ShardSyncedFlushResponse(exp.getMessage())); countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); } @@ -453,7 +452,7 @@ public void handleResponse(PreSyncedFlushResponse response) { @Override public void handleException(TransportException exp) { - logger.trace((Supplier) () -> new ParameterizedMessage("{} error while performing pre synced flush on [{}], skipping", shardId, shard), exp); + logger.trace(() -> new ParameterizedMessage("{} error while performing pre synced flush on [{}], skipping", shardId, shard), exp); if (countDown.countDown()) { listener.onResponse(presyncResponses); } @@ -561,11 +560,14 @@ static final class PreSyncedFlushResponse extends TransportResponse { } boolean includeNumDocs(Version version) { - return version.onOrAfter(Version.V_5_6_8); + if (version.major == Version.V_5_6_8.major) { + return version.onOrAfter(Version.V_5_6_8); + } + return version.onOrAfter(Version.V_6_2_2); } boolean includeExistingSyncId(Version version) { - return version.onOrAfter(Version.V_5_6_9); + return version.onOrAfter(Version.V_6_3_0); } @Override diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 392136184b4af..3b4f4a64e06db 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.store.AlreadyClosedException; @@ -144,8 +143,7 @@ public void startRecovery(final IndexShard indexShard, final DiscoveryNode sourc } protected void retryRecovery(final long recoveryId, final Throwable reason, TimeValue retryAfter, TimeValue activityTimeout) { - logger.trace( - (Supplier) () -> new ParameterizedMessage( + logger.trace(() -> new ParameterizedMessage( "will retry recovery with id [{}] in [{}]", recoveryId, retryAfter), reason); retryRecovery(recoveryId, retryAfter, activityTimeout); } @@ -229,12 +227,8 @@ public RecoveryResponse newInstance() { logger.trace("recovery cancelled", e); } catch (Exception e) { if (logger.isTraceEnabled()) { - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "[{}][{}] Got exception on recovery", - request.shardId().getIndex().getName(), - request.shardId().id()), - e); + logger.trace(() -> new ParameterizedMessage( + "[{}][{}] Got exception on recovery", request.shardId().getIndex().getName(), request.shardId().id()), e); } Throwable cause = ExceptionsHelper.unwrapCause(e); if (cause instanceof CancellableThreads.ExecutionCancelledException) { @@ -532,12 +526,9 @@ public void onTimeout(TimeValue timeout) { long currentVersion = future.get(); logger.trace("successfully waited for cluster state with version {} (current: {})", clusterStateVersion, currentVersion); } catch (Exception e) { - logger.debug( - (Supplier) () -> new ParameterizedMessage( + logger.debug(() -> new ParameterizedMessage( "failed waiting for cluster state with version {} (current: {})", - clusterStateVersion, - clusterService.state().getVersion()), - e); + clusterStateVersion, clusterService.state().getVersion()), e); throw ExceptionsHelper.convertToRuntime(e); } } @@ -615,16 +606,13 @@ class RecoveryRunner extends AbstractRunnable { public void onFailure(Exception e) { try (RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) { if (recoveryRef != null) { - logger.error( - (Supplier) () -> new ParameterizedMessage( - "unexpected error during recovery [{}], failing shard", recoveryId), e); + logger.error(() -> new ParameterizedMessage("unexpected error during recovery [{}], failing shard", recoveryId), e); onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(recoveryRef.target().state(), "unexpected error", e), true // be safe ); } else { - logger.debug( - (Supplier) () -> new ParameterizedMessage( + logger.debug(() -> new ParameterizedMessage( "unexpected error during recovery, but recovery id [{}] is finished", recoveryId), e); } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java index 6b81d34ab5fe3..bbb02231e7a59 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.unit.TimeValue; @@ -269,7 +268,7 @@ private RecoveryMonitor(long recoveryId, long lastSeenAccessTime, TimeValue chec @Override public void onFailure(Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected error while monitoring recovery [{}]", recoveryId), e); + logger.error(() -> new ParameterizedMessage("unexpected error while monitoring recovery [{}]", recoveryId), e); } @Override diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index cb807600cf4e8..5e66aa53236bc 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -407,12 +407,9 @@ public void phase1(final IndexCommit snapshot, final Supplier translogO RemoteTransportException exception = new RemoteTransportException("File corruption occurred on recovery but " + "checksums are ok", null); exception.addSuppressed(targetException); - logger.warn( - (org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "{} Remote file corruption during finalization of recovery on node {}. local checksum OK", - shard.shardId(), - request.targetNode()), - corruptIndexException); + shard.shardId(), request.targetNode()), corruptIndexException); throw exception; } else { throw targetException; @@ -681,13 +678,9 @@ void sendFiles(Store store, StoreFileMetaData[] files, Function) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "{} Remote file corruption on node {}, recovering {}. local checksum OK", - shardId, - request.targetNode(), - md), - corruptIndexException); + shardId, request.targetNode(), md), corruptIndexException); throw exception; } } else { diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 41df6ec73e020..d7e3d56fa813f 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; @@ -329,8 +328,7 @@ protected void closeInternal() { try { entry.getValue().close(); } catch (Exception e) { - logger.debug( - (Supplier) () -> new ParameterizedMessage("error while closing recovery output [{}]", entry.getValue()), e); + logger.debug(() -> new ParameterizedMessage("error while closing recovery output [{}]", entry.getValue()), e); } iterator.remove(); } diff --git a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index 294484c659863..37f67ddf102ac 100644 --- a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -20,7 +20,6 @@ package org.elasticsearch.indices.store; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -256,7 +255,7 @@ public void handleResponse(ShardActiveResponse response) { @Override public void handleException(TransportException exp) { - logger.debug((Supplier) () -> new ParameterizedMessage("shards active request failed for {}", shardId), exp); + logger.debug(() -> new ParameterizedMessage("shards active request failed for {}", shardId), exp); if (awaitingResponses.decrementAndGet() == 0) { allNodesResponded(); } @@ -288,10 +287,10 @@ private void allNodesResponded() { try { indicesService.deleteShardStore("no longer used", shardId, currentState); } catch (Exception ex) { - logger.debug((Supplier) () -> new ParameterizedMessage("{} failed to delete unallocated shard, ignoring", shardId), ex); + logger.debug(() -> new ParameterizedMessage("{} failed to delete unallocated shard, ignoring", shardId), ex); } }, - (source, e) -> logger.error((Supplier) () -> new ParameterizedMessage("{} unexpected error during deletion of unallocated shard", shardId), e) + (source, e) -> logger.error(() -> new ParameterizedMessage("{} unexpected error during deletion of unallocated shard", shardId), e) ); } @@ -340,9 +339,9 @@ public void sendResult(boolean shardActive) { try { channel.sendResponse(new ShardActiveResponse(shardActive, clusterService.localNode())); } catch (IOException e) { - logger.error((Supplier) () -> new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e); + logger.error(() -> new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e); } catch (EsRejectedExecutionException e) { - logger.error((Supplier) () -> new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e); + logger.error(() -> new ParameterizedMessage("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", request.shardId), e); } } }, newState -> { diff --git a/server/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java b/server/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java index f88ddcf482530..cacba54d80ad4 100644 --- a/server/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java +++ b/server/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java @@ -20,7 +20,6 @@ package org.elasticsearch.monitor.fs; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.Constants; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.DiskUsage; @@ -123,8 +122,7 @@ final FsInfo.IoStats ioStats(final Set> devicesNumbers, } catch (Exception e) { // do not fail Elasticsearch if something unexpected // happens here - logger.debug( - (Supplier) () -> new ParameterizedMessage( + logger.debug(() -> new ParameterizedMessage( "unexpected exception processing /proc/diskstats for devices {}", devicesNumbers), e); return null; } diff --git a/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java b/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java index a0572f93e5e00..b311e559c6e91 100644 --- a/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java +++ b/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java @@ -20,7 +20,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.elasticsearch.common.Nullable; @@ -148,8 +147,7 @@ private void completeAndNotifyIfNeeded(@Nullable Exception failure) { logger.warn("attempt to complete task [{}] with id [{}] in the [{}] state", getAction(), getPersistentTaskId(), prevState); } else { if (failure != null) { - logger.warn((Supplier) () -> new ParameterizedMessage( - "task {} failed with an exception", getPersistentTaskId()), failure); + logger.warn(() -> new ParameterizedMessage("task {} failed with an exception", getPersistentTaskId()), failure); } try { this.failure = failure; @@ -165,9 +163,8 @@ public void onResponse(PersistentTasksCustomMetaData.PersistentTask persisten @Override public void onFailure(Exception e) { - logger.warn((Supplier) () -> - new ParameterizedMessage("notification for task [{}] with id [{}] failed", - getAction(), getPersistentTaskId()), e); + logger.warn(() -> new ParameterizedMessage( + "notification for task [{}] with id [{}] failed", getAction(), getPersistentTaskId()), e); } }); } diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java index 9e064c3d20924..cf44556ee5ddc 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java @@ -34,6 +34,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; +import org.elasticsearch.persistent.decider.AssignmentDecision; +import org.elasticsearch.persistent.decider.EnableAssignmentDecider; import org.elasticsearch.tasks.Task; import java.util.Objects; @@ -45,12 +47,14 @@ public class PersistentTasksClusterService extends AbstractComponent implements private final ClusterService clusterService; private final PersistentTasksExecutorRegistry registry; + private final EnableAssignmentDecider decider; public PersistentTasksClusterService(Settings settings, PersistentTasksExecutorRegistry registry, ClusterService clusterService) { super(settings); this.clusterService = clusterService; clusterService.addListener(this); this.registry = registry; + this.decider = new EnableAssignmentDecider(settings, clusterService.getClusterSettings()); } /** @@ -224,6 +228,12 @@ private Assignment createAssignment(final final @Nullable Params taskParams, final ClusterState currentState) { PersistentTasksExecutor persistentTasksExecutor = registry.getPersistentTaskExecutorSafe(taskName); + + AssignmentDecision decision = decider.canAssign(); + if (decision.getType() == AssignmentDecision.Type.NO) { + return new Assignment(null, "persistent task [" + taskName + "] cannot be assigned [" + decision.getReason() + "]"); + } + return persistentTasksExecutor.getAssignment(taskParams, currentState); } @@ -249,7 +259,8 @@ public void onFailure(String source, Exception e) { /** * Returns true if the cluster state change(s) require to reassign some persistent tasks. It can happen in the following - * situations: a node left or is added, the routing table changed, the master node changed or the persistent tasks changed. + * situations: a node left or is added, the routing table changed, the master node changed, the metadata changed or the + * persistent tasks changed. */ boolean shouldReassignPersistentTasks(final ClusterChangedEvent event) { final PersistentTasksCustomMetaData tasks = event.state().getMetaData().custom(PersistentTasksCustomMetaData.TYPE); @@ -259,7 +270,12 @@ boolean shouldReassignPersistentTasks(final ClusterChangedEvent event) { boolean masterChanged = event.previousState().nodes().isLocalNodeElectedMaster() == false; - if (persistentTasksChanged(event) || event.nodesChanged() || event.routingTableChanged() || masterChanged) { + if (persistentTasksChanged(event) + || event.nodesChanged() + || event.routingTableChanged() + || event.metaDataChanged() + || masterChanged) { + for (PersistentTask task : tasks.tasks()) { if (needsReassignment(task.getAssignment(), event.state().nodes())) { Assignment assignment = createAssignment(task.getTaskName(), task.getParams(), event.state()); diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java index e53834d6f4655..6c410bc41a220 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java @@ -19,7 +19,6 @@ package org.elasticsearch.persistent; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -207,9 +206,9 @@ public void onResponse(CancelTasksResponse cancelTasksResponse) { @Override public void onFailure(Exception e) { // There is really nothing we can do in case of failure here - logger.warn((Supplier) () -> - new ParameterizedMessage("failed to cancel task [{}] with id [{}] and allocation id [{}]", task.getAction(), - task.getPersistentTaskId(), task.getAllocationId()), e); + logger.warn(() -> new ParameterizedMessage( + "failed to cancel task [{}] with id [{}] and allocation id [{}]", + task.getAction(), task.getPersistentTaskId(), task.getAllocationId()), e); } }); } diff --git a/server/src/main/java/org/elasticsearch/persistent/decider/AssignmentDecision.java b/server/src/main/java/org/elasticsearch/persistent/decider/AssignmentDecision.java new file mode 100644 index 0000000000000..eb8f851a68dab --- /dev/null +++ b/server/src/main/java/org/elasticsearch/persistent/decider/AssignmentDecision.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.persistent.decider; + +import java.util.Locale; +import java.util.Objects; + +/** + * {@link AssignmentDecision} represents the decision made during the process of + * assigning a persistent task to a node of the cluster. + * + * @see EnableAssignmentDecider + */ +public final class AssignmentDecision { + + public static final AssignmentDecision YES = new AssignmentDecision(Type.YES, ""); + + private final Type type; + private final String reason; + + public AssignmentDecision(final Type type, final String reason) { + this.type = Objects.requireNonNull(type); + this.reason = Objects.requireNonNull(reason); + } + + public Type getType() { + return type; + } + + public String getReason() { + return reason; + } + + @Override + public String toString() { + return "assignment decision [type=" + type + ", reason=" + reason + "]"; + } + + public enum Type { + NO(0), YES(1); + + private final int id; + + Type(int id) { + this.id = id; + } + + public int getId() { + return id; + } + + public static Type resolve(final String s) { + return Type.valueOf(s.toUpperCase(Locale.ROOT)); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/persistent/decider/EnableAssignmentDecider.java b/server/src/main/java/org/elasticsearch/persistent/decider/EnableAssignmentDecider.java new file mode 100644 index 0000000000000..525e1379a4098 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/persistent/decider/EnableAssignmentDecider.java @@ -0,0 +1,101 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.persistent.decider; + +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; + +import java.util.Locale; + +import static org.elasticsearch.common.settings.Setting.Property.Dynamic; +import static org.elasticsearch.common.settings.Setting.Property.NodeScope; + +/** + * {@link EnableAssignmentDecider} is used to allow/disallow the persistent tasks + * to be assigned to cluster nodes. + *

+ * Allocation settings can have the following values (non-casesensitive): + *

    + *
  • NONE - no persistent tasks can be assigned + *
  • ALL - all persistent tasks can be assigned to nodes + *
+ * + * @see Allocation + */ +public class EnableAssignmentDecider { + + public static final Setting CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING = + new Setting<>("cluster.persistent_tasks.allocation.enable", Allocation.ALL.toString(), Allocation::fromString, Dynamic, NodeScope); + + private volatile Allocation enableAssignment; + + public EnableAssignmentDecider(final Settings settings, final ClusterSettings clusterSettings) { + this.enableAssignment = CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING, this::setEnableAssignment); + } + + public void setEnableAssignment(final Allocation enableAssignment) { + this.enableAssignment = enableAssignment; + } + + /** + * Returns a {@link AssignmentDecision} whether the given persistent task can be assigned + * to a node of the cluster. The decision depends on the current value of the setting + * {@link EnableAssignmentDecider#CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING}. + * + * @return the {@link AssignmentDecision} + */ + public AssignmentDecision canAssign() { + if (enableAssignment == Allocation.NONE) { + return new AssignmentDecision(AssignmentDecision.Type.NO, "no persistent task assignments are allowed due to cluster settings"); + } + return AssignmentDecision.YES; + } + + /** + * Allocation values or rather their string representation to be used used with + * {@link EnableAssignmentDecider#CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING} + * via cluster settings. + */ + public enum Allocation { + + NONE, + ALL; + + public static Allocation fromString(final String strValue) { + if (strValue == null) { + return null; + } else { + String value = strValue.toUpperCase(Locale.ROOT); + try { + return valueOf(value); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Illegal value [" + value + "] for [" + + CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey() + "]"); + } + } + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/persistent/package-info.java b/server/src/main/java/org/elasticsearch/persistent/package-info.java index f948e3ace448e..3e71716e60643 100644 --- a/server/src/main/java/org/elasticsearch/persistent/package-info.java +++ b/server/src/main/java/org/elasticsearch/persistent/package-info.java @@ -30,7 +30,7 @@ * task. *

* 2. The master node updates the {@link org.elasticsearch.persistent.PersistentTasksCustomMetaData} in the cluster state to indicate - * that there is a new persistent task is running in the system. + * that there is a new persistent task running in the system. *

* 3. The {@link org.elasticsearch.persistent.PersistentTasksNodeService} running on every node in the cluster monitors changes in * the cluster state and starts execution of all new tasks assigned to the node it is running on. diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index 2efbae5961e9d..577ccc78de7b8 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -20,7 +20,6 @@ package org.elasticsearch.repositories; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -142,7 +141,7 @@ public ClusterState execute(ClusterState currentState) throws IOException { @Override public void onFailure(String source, Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to create repository [{}]", request.name), e); + logger.warn(() -> new ParameterizedMessage("failed to create repository [{}]", request.name), e); super.onFailure(source, e); } @@ -217,7 +216,7 @@ public void onResponse(VerifyResponse verifyResponse) { try { repository.endVerification(verificationToken); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), e); listener.onFailure(e); return; } @@ -234,7 +233,7 @@ public void onFailure(Exception e) { repository.endVerification(verificationToken); } catch (Exception inner) { inner.addSuppressed(e); - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), inner); + logger.warn(() -> new ParameterizedMessage("[{}] failed to finish repository verification", repositoryName), inner); } listener.onFailure(e); } @@ -296,14 +295,14 @@ public void applyClusterState(ClusterChangedEvent event) { } catch (RepositoryException ex) { // TODO: this catch is bogus, it means the old repo is already closed, // but we have nothing to replace it - logger.warn((Supplier) () -> new ParameterizedMessage("failed to change repository [{}]", repositoryMetaData.name()), ex); + logger.warn(() -> new ParameterizedMessage("failed to change repository [{}]", repositoryMetaData.name()), ex); } } } else { try { repository = createRepository(repositoryMetaData); } catch (RepositoryException ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to create repository [{}]", repositoryMetaData.name()), ex); + logger.warn(() -> new ParameterizedMessage("failed to create repository [{}]", repositoryMetaData.name()), ex); } } if (repository != null) { @@ -385,7 +384,7 @@ private Repository createRepository(RepositoryMetaData repositoryMetaData) { repository.start(); return repository; } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to create repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()), e); + logger.warn(() -> new ParameterizedMessage("failed to create repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()), e); throw new RepositoryException(repositoryMetaData.name(), "failed to create repository", e); } } diff --git a/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java b/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java index cc1170a4841a2..ba3f9c048d08a 100644 --- a/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java @@ -22,7 +22,6 @@ import com.carrotsearch.hppc.ObjectContainer; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -81,7 +80,7 @@ public void verify(String repository, String verificationToken, final ActionList try { doVerify(repository, verificationToken, localNode); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to verify repository", repository), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to verify repository", repository), e); errors.add(new VerificationFailure(node.getId(), e)); } if (counter.decrementAndGet() == 0) { @@ -152,7 +151,7 @@ public void messageReceived(VerifyNodeRepositoryRequest request, TransportChanne try { doVerify(request.repository, request.verificationToken, localNode); } catch (Exception ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to verify repository", request.repository), ex); + logger.warn(() -> new ParameterizedMessage("[{}] failed to verify repository", request.repository), ex); throw ex; } channel.sendResponse(TransportResponse.Empty.INSTANCE); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index b364b17295404..cb90e4ae13087 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -20,7 +20,6 @@ package org.elasticsearch.repositories.blobstore; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFormatTooNewException; @@ -351,7 +350,7 @@ public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) { } catch (SnapshotMissingException ex) { throw ex; } catch (IllegalStateException | SnapshotException | ElasticsearchParseException ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("cannot read snapshot file [{}]", snapshotId), ex); + logger.warn(() -> new ParameterizedMessage("cannot read snapshot file [{}]", snapshotId), ex); } MetaData metaData = null; try { @@ -361,7 +360,7 @@ public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) { metaData = readSnapshotMetaData(snapshotId, null, repositoryData.resolveIndices(indices), true); } } catch (IOException | SnapshotException ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("cannot read metadata for snapshot [{}]", snapshotId), ex); + logger.warn(() -> new ParameterizedMessage("cannot read metadata for snapshot [{}]", snapshotId), ex); } try { // Delete snapshot from the index file, since it is the maintainer of truth of active snapshots @@ -381,7 +380,7 @@ public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) { try { indexMetaDataFormat.delete(indexMetaDataBlobContainer, snapshotId.getUUID()); } catch (IOException ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to delete metadata for index [{}]", snapshotId, index), ex); + logger.warn(() -> new ParameterizedMessage("[{}] failed to delete metadata for index [{}]", snapshotId, index), ex); } if (metaData != null) { IndexMetaData indexMetaData = metaData.index(index); @@ -391,7 +390,7 @@ public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) { delete(snapshotId, snapshot.version(), indexId, new ShardId(indexMetaData.getIndex(), shardId)); } catch (SnapshotException ex) { final int finalShardId = shardId; - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to delete shard data for shard [{}][{}]", snapshotId, index, finalShardId), ex); + logger.warn(() -> new ParameterizedMessage("[{}] failed to delete shard data for shard [{}][{}]", snapshotId, index, finalShardId), ex); } } } @@ -410,11 +409,11 @@ public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) { // we'll ignore that and accept that cleanup didn't fully succeed. // since we are using UUIDs for path names, this won't be an issue for // snapshotting indices of the same name - logger.debug((Supplier) () -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + + logger.debug(() -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + "its index folder due to the directory not being empty.", metadata.name(), indexId), dnee); } catch (IOException ioe) { // a different IOException occurred while trying to delete - will just log the issue for now - logger.debug((Supplier) () -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + + logger.debug(() -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + "its index folder.", metadata.name(), indexId), ioe); } } @@ -428,10 +427,10 @@ private void deleteSnapshotBlobIgnoringErrors(final SnapshotInfo snapshotInfo, f snapshotFormat.delete(snapshotsBlobContainer, blobId); } catch (IOException e) { if (snapshotInfo != null) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] Unable to delete snapshot file [{}]", + logger.warn(() -> new ParameterizedMessage("[{}] Unable to delete snapshot file [{}]", snapshotInfo.snapshotId(), blobId), e); } else { - logger.warn((Supplier) () -> new ParameterizedMessage("Unable to delete snapshot file [{}]", blobId), e); + logger.warn(() -> new ParameterizedMessage("Unable to delete snapshot file [{}]", blobId), e); } } } @@ -441,10 +440,10 @@ private void deleteGlobalMetaDataBlobIgnoringErrors(final SnapshotInfo snapshotI globalMetaDataFormat.delete(snapshotsBlobContainer, blobId); } catch (IOException e) { if (snapshotInfo != null) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] Unable to delete global metadata file [{}]", + logger.warn(() -> new ParameterizedMessage("[{}] Unable to delete global metadata file [{}]", snapshotInfo.snapshotId(), blobId), e); } else { - logger.warn((Supplier) () -> new ParameterizedMessage("Unable to delete global metadata file [{}]", blobId), e); + logger.warn(() -> new ParameterizedMessage("Unable to delete global metadata file [{}]", blobId), e); } } } @@ -522,7 +521,7 @@ private MetaData readSnapshotMetaData(SnapshotId snapshotId, Version snapshotVer metaDataBuilder.put(indexMetaDataFormat.read(indexMetaDataBlobContainer, snapshotId.getUUID()), false); } catch (ElasticsearchParseException | IOException ex) { if (ignoreIndexErrors) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] [{}] failed to read metadata for index", snapshotId, index.getName()), ex); + logger.warn(() -> new ParameterizedMessage("[{}] [{}] failed to read metadata for index", snapshotId, index.getName()), ex); } else { throw ex; } @@ -983,7 +982,7 @@ protected void finalize(List snapshots, int fileListGeneration, M blobContainer.deleteBlob(blobName); } catch (IOException e) { // TODO: don't catch and let the user handle it? - logger.debug((Supplier) () -> new ParameterizedMessage("[{}] [{}] error deleting blob [{}] during cleanup", snapshotId, shardId, blobName), e); + logger.debug(() -> new ParameterizedMessage("[{}] [{}] error deleting blob [{}] during cleanup", snapshotId, shardId, blobName), e); } } } @@ -1062,7 +1061,7 @@ protected Tuple buildBlobStoreIndexShardS return new Tuple<>(shardSnapshots, latest); } catch (IOException e) { final String file = SNAPSHOT_INDEX_PREFIX + latest; - logger.warn((Supplier) () -> new ParameterizedMessage("failed to read index file [{}]", file), e); + logger.warn(() -> new ParameterizedMessage("failed to read index file [{}]", file), e); } } else if (blobKeys.isEmpty() == false) { logger.debug("Could not find a readable index-N file in a non-empty shard snapshot directory [{}]", blobContainer.path()); @@ -1080,7 +1079,7 @@ protected Tuple buildBlobStoreIndexShardS snapshots.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles())); } } catch (IOException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to read commit point [{}]", name), e); + logger.warn(() -> new ParameterizedMessage("failed to read commit point [{}]", name), e); } } return new Tuple<>(new BlobStoreIndexShardSnapshots(snapshots), -1); @@ -1166,7 +1165,7 @@ public void snapshot(final IndexCommit snapshotIndexCommit) { // in a bwc compatible way. maybeRecalculateMetadataHash(blobContainer, fileInfo, metadata); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("{} Can't calculate hash from blob for file [{}] [{}]", shardId, fileInfo.physicalName(), fileInfo.metadata()), e); + logger.warn(() -> new ParameterizedMessage("{} Can't calculate hash from blob for file [{}] [{}]", shardId, fileInfo.physicalName(), fileInfo.metadata()), e); } if (fileInfo.isSame(md) && snapshotFileExistsInBlobs(fileInfo, blobs)) { // a commit point file with the same name, size and checksum was already copied to repository @@ -1441,7 +1440,7 @@ public void restore() throws IOException { logger.trace("[{}] [{}] restoring from to an empty shard", shardId, snapshotId); recoveryTargetMetadata = Store.MetadataSnapshot.EMPTY; } catch (IOException e) { - logger.warn((Supplier) () -> new ParameterizedMessage("{} Can't read metadata from store, will not reuse any local file while restoring", shardId), e); + logger.warn(() -> new ParameterizedMessage("{} Can't read metadata from store, will not reuse any local file while restoring", shardId), e); recoveryTargetMetadata = Store.MetadataSnapshot.EMPTY; } @@ -1457,7 +1456,7 @@ public void restore() throws IOException { maybeRecalculateMetadataHash(blobContainer, fileInfo, recoveryTargetMetadata); } catch (Exception e) { // if the index is broken we might not be able to read it - logger.warn((Supplier) () -> new ParameterizedMessage("{} Can't calculate hash from blog for file [{}] [{}]", shardId, fileInfo.physicalName(), fileInfo.metadata()), e); + logger.warn(() -> new ParameterizedMessage("{} Can't calculate hash from blog for file [{}] [{}]", shardId, fileInfo.physicalName(), fileInfo.metadata()), e); } snapshotMetaData.put(fileInfo.metadata().name(), fileInfo.metadata()); fileInfos.put(fileInfo.metadata().name(), fileInfo); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java index c24456052622d..4b56e2b00ee74 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestClearIndicesCacheAction.java @@ -20,27 +20,22 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; -import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import java.util.Map; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.rest.RestStatus.OK; public class RestClearIndicesCacheAction extends BaseRestHandler { public RestClearIndicesCacheAction(Settings settings, RestController controller) { @@ -63,16 +58,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC Strings.splitStringByCommaToArray(request.param("index"))); clearIndicesCacheRequest.indicesOptions(IndicesOptions.fromRequest(request, clearIndicesCacheRequest.indicesOptions())); fromRequest(request, clearIndicesCacheRequest); - return channel -> - client.admin().indices().clearCache(clearIndicesCacheRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(ClearIndicesCacheResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + return channel -> client.admin().indices().clearCache(clearIndicesCacheRequest, new RestToXContentListener<>(channel)); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFlushAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFlushAction.java index 8eb318e660c60..4879a54f4feae 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFlushAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFlushAction.java @@ -20,24 +20,19 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.rest.RestStatus.OK; public class RestFlushAction extends BaseRestHandler { public RestFlushAction(Settings settings, RestController controller) { @@ -60,14 +55,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC flushRequest.indicesOptions(IndicesOptions.fromRequest(request, flushRequest.indicesOptions())); flushRequest.force(request.paramAsBoolean("force", flushRequest.force())); flushRequest.waitIfOngoing(request.paramAsBoolean("wait_if_ongoing", flushRequest.waitIfOngoing())); - return channel -> client.admin().indices().flush(flushRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(FlushResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + return channel -> client.admin().indices().flush(flushRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java index 79beb66d40b1b..dcc397be14263 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java @@ -20,24 +20,18 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.rest.RestStatus.OK; -import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; public class RestForceMergeAction extends BaseRestHandler { public RestForceMergeAction(Settings settings, RestController controller) { @@ -58,14 +52,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC mergeRequest.maxNumSegments(request.paramAsInt("max_num_segments", mergeRequest.maxNumSegments())); mergeRequest.onlyExpungeDeletes(request.paramAsBoolean("only_expunge_deletes", mergeRequest.onlyExpungeDeletes())); mergeRequest.flush(request.paramAsBoolean("flush", mergeRequest.flush())); - return channel -> client.admin().indices().forceMerge(mergeRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(ForceMergeResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - buildBroadcastShardsHeader(builder, request, response); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + return channel -> client.admin().indices().forceMerge(mergeRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java index a57a404baf2ef..1beec61e6dd37 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java @@ -19,25 +19,19 @@ package org.elasticsearch.rest.action.admin.indices; -import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; -import static org.elasticsearch.rest.RestStatus.OK; -import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; public class RestIndicesSegmentsAction extends BaseRestHandler { public RestIndicesSegmentsAction(Settings settings, RestController controller) { @@ -57,16 +51,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC Strings.splitStringByCommaToArray(request.param("index"))); indicesSegmentsRequest.verbose(request.paramAsBoolean("verbose", false)); indicesSegmentsRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesSegmentsRequest.indicesOptions())); - return channel -> - client.admin().indices().segments(indicesSegmentsRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(IndicesSegmentResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - buildBroadcastShardsHeader(builder, request, response); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + return channel -> client.admin().indices().segments(indicesSegmentsRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java index ca554301b937d..1dbbd6f1696db 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java @@ -20,18 +20,14 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import java.util.Collections; @@ -43,8 +39,6 @@ import java.util.function.Consumer; import static org.elasticsearch.rest.RestRequest.Method.GET; -import static org.elasticsearch.rest.RestStatus.OK; -import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; public class RestIndicesStatsAction extends BaseRestHandler { public RestIndicesStatsAction(Settings settings, RestController controller) { @@ -141,16 +135,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC indicesStatsRequest.includeSegmentFileSizes(request.paramAsBoolean("include_segment_file_sizes", false)); } - return channel -> client.admin().indices().stats(indicesStatsRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(IndicesStatsResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - buildBroadcastShardsHeader(builder, request, response); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + return channel -> client.admin().indices().stats(indicesStatsRequest, new RestToXContentListener<>(channel)); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRecoveryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRecoveryAction.java index 4516ebeeb565d..b445cb3a6764a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRecoveryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRecoveryAction.java @@ -20,23 +20,18 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest; -import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; -import static org.elasticsearch.rest.RestStatus.OK; /** * REST handler to report on index recoveries. @@ -60,18 +55,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC recoveryRequest.detailed(request.paramAsBoolean("detailed", false)); recoveryRequest.activeOnly(request.paramAsBoolean("active_only", false)); recoveryRequest.indicesOptions(IndicesOptions.fromRequest(request, recoveryRequest.indicesOptions())); - - return channel -> client.admin().indices().recoveries(recoveryRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(RecoveryResponse response, XContentBuilder builder) throws Exception { - response.detailed(recoveryRequest.detailed()); - builder.startObject(); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); - + return channel -> client.admin().indices().recoveries(recoveryRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRefreshAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRefreshAction.java index 486d8664a49d2..1f0f81e0285ce 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRefreshAction.java @@ -25,13 +25,11 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -57,13 +55,10 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { RefreshRequest refreshRequest = new RefreshRequest(Strings.splitStringByCommaToArray(request.param("index"))); refreshRequest.indicesOptions(IndicesOptions.fromRequest(request, refreshRequest.indicesOptions())); - return channel -> client.admin().indices().refresh(refreshRequest, new RestBuilderListener(channel) { + return channel -> client.admin().indices().refresh(refreshRequest, new RestToXContentListener(channel) { @Override - public RestResponse buildResponse(RefreshResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(response.getStatus(), builder); + protected RestStatus getStatus(RefreshResponse response) { + return response.getStatus(); } }); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeAction.java index 1d32c14655ade..9201c4504823d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeAction.java @@ -19,40 +19,26 @@ package org.elasticsearch.rest.action.admin.indices; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusRequest; -import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest; -import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; -import java.util.Map; -import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.rest.RestStatus.OK; -import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; public class RestUpgradeAction extends BaseRestHandler { public RestUpgradeAction(Settings settings, RestController controller) { super(settings); controller.registerHandler(POST, "/_upgrade", this); controller.registerHandler(POST, "/{index}/_upgrade", this); - - controller.registerHandler(GET, "/_upgrade", this); - controller.registerHandler(GET, "/{index}/_upgrade", this); } @Override @@ -62,50 +48,9 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - if (request.method().equals(RestRequest.Method.GET)) { - return handleGet(request, client); - } else if (request.method().equals(RestRequest.Method.POST)) { - return handlePost(request, client); - } else { - throw new IllegalArgumentException("illegal method [" + request.method() + "] for request [" + request.path() + "]"); - } - } - - private RestChannelConsumer handleGet(final RestRequest request, NodeClient client) { - UpgradeStatusRequest statusRequest = new UpgradeStatusRequest(Strings.splitStringByCommaToArray(request.param("index"))); - statusRequest.indicesOptions(IndicesOptions.fromRequest(request, statusRequest.indicesOptions())); - return channel -> client.admin().indices().upgradeStatus(statusRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(UpgradeStatusResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - response.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); - } - - private RestChannelConsumer handlePost(final RestRequest request, NodeClient client) { UpgradeRequest upgradeReq = new UpgradeRequest(Strings.splitStringByCommaToArray(request.param("index"))); upgradeReq.indicesOptions(IndicesOptions.fromRequest(request, upgradeReq.indicesOptions())); upgradeReq.upgradeOnlyAncientSegments(request.paramAsBoolean("only_ancient_segments", false)); - return channel -> client.admin().indices().upgrade(upgradeReq, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(UpgradeResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - buildBroadcastShardsHeader(builder, request, response); - builder.startObject("upgraded_indices"); - for (Map.Entry> entry : response.versions().entrySet()) { - builder.startObject(entry.getKey()); - builder.field("upgrade_version", entry.getValue().v1()); - builder.field("oldest_lucene_segment_version", entry.getValue().v2()); - builder.endObject(); - } - builder.endObject(); - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + return channel -> client.admin().indices().upgrade(upgradeReq, new RestToXContentListener<>(channel)); } - } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeStatusAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeStatusAction.java new file mode 100644 index 0000000000000..1b21e125cdc47 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpgradeStatusAction.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.indices; + +import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +public class RestUpgradeStatusAction extends BaseRestHandler { + + public RestUpgradeStatusAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(GET, "/_upgrade", this); + controller.registerHandler(GET, "/{index}/_upgrade", this); + } + + @Override + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + UpgradeStatusRequest statusRequest = new UpgradeStatusRequest(Strings.splitStringByCommaToArray(request.param("index"))); + statusRequest.indicesOptions(IndicesOptions.fromRequest(request, statusRequest.indicesOptions())); + return channel -> client.admin().indices().upgradeStatus(statusRequest, new RestToXContentListener<>(channel)); + } + + @Override + public String getName() { + return "upgrade_status_action"; + } +} diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java index df1c14c480650..57486396f911b 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.admin.indices; -import org.elasticsearch.action.admin.indices.validate.query.QueryExplanation; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; import org.elasticsearch.action.support.IndicesOptions; @@ -33,16 +32,14 @@ import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestActions; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestStatus.OK; -import static org.elasticsearch.rest.action.RestActions.buildBroadcastShardsHeader; public class RestValidateQueryAction extends BaseRestHandler { public RestValidateQueryAction(Settings settings, RestController controller) { @@ -91,37 +88,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC handleException(validateQueryRequest, finalBodyParsingException.getMessage(), channel); } } else { - client.admin().indices().validateQuery(validateQueryRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(ValidateQueryResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - builder.field(VALID_FIELD, response.isValid()); - buildBroadcastShardsHeader(builder, request, response); - if (response.getQueryExplanation() != null && !response.getQueryExplanation().isEmpty()) { - builder.startArray(EXPLANATIONS_FIELD); - for (QueryExplanation explanation : response.getQueryExplanation()) { - builder.startObject(); - if (explanation.getIndex() != null) { - builder.field(INDEX_FIELD, explanation.getIndex()); - } - if(explanation.getShard() >= 0) { - builder.field(SHARD_FIELD, explanation.getShard()); - } - builder.field(VALID_FIELD, explanation.isValid()); - if (explanation.getError() != null) { - builder.field(ERROR_FIELD, explanation.getError()); - } - if (explanation.getExplanation() != null) { - builder.field(EXPLANATION_FIELD, explanation.getExplanation()); - } - builder.endObject(); - } - builder.endArray(); - } - builder.endObject(); - return new BytesRestResponse(OK, builder); - } - }); + client.admin().indices().validateQuery(validateQueryRequest, new RestToXContentListener<>(channel)); } }; } @@ -132,18 +99,11 @@ private void handleException(final ValidateQueryRequest request, final String me private static BytesRestResponse buildErrorResponse(XContentBuilder builder, String error, boolean explain) throws IOException { builder.startObject(); - builder.field(VALID_FIELD, false); + builder.field(ValidateQueryResponse.VALID_FIELD, false); if (explain) { - builder.field(ERROR_FIELD, error); + builder.field(ValidateQueryResponse.ERROR_FIELD, error); } builder.endObject(); return new BytesRestResponse(OK, builder); } - - private static final String INDEX_FIELD = "index"; - private static final String SHARD_FIELD = "shard"; - private static final String VALID_FIELD = "valid"; - private static final String EXPLANATIONS_FIELD = "explanations"; - private static final String ERROR_FIELD = "error"; - private static final String EXPLANATION_FIELD = "explanation"; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java index b7abf82a58ea3..caa16b7a9d57f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java @@ -120,7 +120,7 @@ protected void doXContentBody(XContentBuilder builder, Params params) throws IOE builder.field(Histogram.INTERVAL_FIELD.getPreferredName(), dateHistogramInterval.toString()); } if (timeZone != null) { - builder.field("time_zone", timeZone); + builder.field("time_zone", timeZone.toString()); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramInterval.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramInterval.java index 9310142aa9c41..9b34739b96d6e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramInterval.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramInterval.java @@ -22,6 +22,9 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.Objects; @@ -29,7 +32,7 @@ /** * The interval the date histogram is based on. */ -public class DateHistogramInterval implements Writeable { +public class DateHistogramInterval implements Writeable, ToXContentFragment { public static final DateHistogramInterval SECOND = new DateHistogramInterval("1s"); public static final DateHistogramInterval MINUTE = new DateHistogramInterval("1m"); @@ -100,4 +103,9 @@ public boolean equals(Object obj) { DateHistogramInterval other = (DateHistogramInterval) obj; return Objects.equals(expression, other.expression); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.value(toString()); + } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java index eb81d5a9b6b7e..81b6d23f3873d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java @@ -338,7 +338,7 @@ public final XContentBuilder internalXContent(XContentBuilder builder, Params pa builder.field("format", format); } if (timeZone != null) { - builder.field("time_zone", timeZone); + builder.field("time_zone", timeZone.toString()); } if (valueType != null) { builder.field("value_type", valueType.getPreferredName()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java index c51bb83741ac4..d8414c7b31f94 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java @@ -263,7 +263,7 @@ public VS toValuesSource(QueryShardContext context) throws IOException { return (VS) MissingValues.replaceMissing((ValuesSource.Numeric) vs, missing); } else if (vs instanceof ValuesSource.GeoPoint) { // TODO: also support the structured formats of geo points - final GeoPoint missing = GeoUtils.parseGeoPoint(missing().toString(), new GeoPoint()); + final GeoPoint missing = new GeoPoint(missing().toString()); return (VS) MissingValues.replaceMissing((ValuesSource.GeoPoint) vs, missing); } else { // Should not happen diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java index b464d6069e79e..c4f7d8a500064 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java @@ -133,7 +133,7 @@ protected XContentBuilder toInnerXContent(XContentBuilder builder, Params params *

  • String/Object/Array:
    "GEO POINT"
  • * * - * see {@link GeoUtils#parseGeoPoint(String, GeoPoint)} for GEO POINT + * see {@code GeoPoint(String)} for GEO POINT */ @Override public Set parseContext(ParseContext parseContext, XContentParser parser) throws IOException, ElasticsearchParseException { @@ -249,7 +249,7 @@ protected GeoQueryContext fromXContent(XContentParser parser) throws IOException * *
  • String:
    GEO POINT
  • * - * see {@link GeoUtils#parseGeoPoint(String, GeoPoint)} for GEO POINT + * see {@code GeoPoint(String)} for GEO POINT */ @Override public List toInternalQueryContexts(List queryContexts) { diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 0a929cc8f0bc1..e6b54a20a1e07 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -24,7 +24,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.IndicesOptions; @@ -455,7 +454,7 @@ private void restoreGlobalStateIfRequested(MetaData.Builder mdBuilder) { @Override public void onFailure(String source, Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to restore snapshot", snapshotId), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to restore snapshot", snapshotId), e); listener.onFailure(e); } @@ -472,7 +471,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to restore snapshot", request.repositoryName + ":" + request.snapshotName), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to restore snapshot", request.repositoryName + ":" + request.snapshotName), e); listener.onFailure(e); } } @@ -679,7 +678,7 @@ public ClusterTasksResult execute(final ClusterState currentState, final L @Override public void onFailure(final String source, final Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}]", source), e); } @Override diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index f737080fe5fa5..226aa973cb3e5 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -21,7 +21,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; @@ -357,8 +356,7 @@ public void doRun() { @Override public void onFailure(Exception e) { - logger.warn((Supplier) () -> - new ParameterizedMessage("[{}][{}] failed to snapshot shard", shardId, snapshot), e); + logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to snapshot shard", shardId, snapshot), e); failure.set(e); } @@ -567,7 +565,7 @@ void sendSnapshotShardUpdate(final Snapshot snapshot, transportService.sendRequest(masterNode, UPDATE_SNAPSHOT_STATUS_ACTION_NAME_V6, requestV6, INSTANCE_SAME); } } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", snapshot, status), e); + logger.warn(() -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", snapshot, status), e); } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 7230ac46e1306..7ab1ba43158ad 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -22,7 +22,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; @@ -192,7 +191,7 @@ public List snapshots(final String repositoryName, } } catch (Exception ex) { if (ignoreUnavailable) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to get snapshot [{}]", snapshotId), ex); + logger.warn(() -> new ParameterizedMessage("failed to get snapshot [{}]", snapshotId), ex); } else { throw new SnapshotException(repositoryName, snapshotId, "Snapshot could not be read", ex); } @@ -270,7 +269,7 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}][{}] failed to create snapshot", repositoryName, snapshotName), e); + logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to create snapshot", repositoryName, snapshotName), e); newSnapshot = null; listener.onFailure(e); } @@ -432,7 +431,7 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to create snapshot", snapshot.snapshot().getSnapshotId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to create snapshot", snapshot.snapshot().getSnapshotId()), e); removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, true, userCreateSnapshotListener, e)); } @@ -463,7 +462,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } }); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to create snapshot [{}]", snapshot.snapshot().getSnapshotId()), e); + logger.warn(() -> new ParameterizedMessage("failed to create snapshot [{}]", snapshot.snapshot().getSnapshotId()), e); removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, snapshotCreated, userCreateSnapshotListener, e)); } } @@ -511,7 +510,7 @@ private void cleanupAfterError(Exception exception) { snapshot.includeGlobalState()); } catch (Exception inner) { inner.addSuppressed(exception); - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to close snapshot in repository", snapshot.snapshot()), inner); + logger.warn(() -> new ParameterizedMessage("[{}] failed to close snapshot in repository", snapshot.snapshot()), inner); } } userCreateSnapshotListener.onFailure(e); @@ -824,7 +823,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { @Override public void onFailure(String source, Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to update snapshot state after shards started from [{}] ", source), e); + logger.warn(() -> new ParameterizedMessage("failed to update snapshot state after shards started from [{}] ", source), e); } }); } @@ -983,7 +982,7 @@ private void endSnapshot(final SnapshotsInProgress.Entry entry, final String fai removeSnapshotFromClusterState(snapshot, snapshotInfo, null); logger.info("snapshot [{}] completed with state [{}]", snapshot, snapshotInfo.state()); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to finalize snapshot", snapshot), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to finalize snapshot", snapshot), e); removeSnapshotFromClusterState(snapshot, null, e); } }); @@ -1032,7 +1031,7 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to remove snapshot metadata", snapshot), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to remove snapshot metadata", snapshot), e); if (listener != null) { listener.onFailure(e); } @@ -1055,7 +1054,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS listener.onSnapshotFailure(snapshot, failure); } } catch (Exception t) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to notify listener [{}]", listener), t); + logger.warn(() -> new ParameterizedMessage("failed to notify listener [{}]", listener), t); } } if (listener != null) { @@ -1224,8 +1223,7 @@ public void onSnapshotCompletion(Snapshot completedSnapshot, SnapshotInfo snapsh listener, true); } catch (Exception ex) { - logger.warn((Supplier) () -> - new ParameterizedMessage("[{}] failed to delete snapshot", snapshot), ex); + logger.warn(() -> new ParameterizedMessage("[{}] failed to delete snapshot", snapshot), ex); } } ); @@ -1244,7 +1242,7 @@ public void onSnapshotFailure(Snapshot failedSnapshot, Exception e) { listener, true); } catch (SnapshotMissingException smex) { - logger.info((Supplier) () -> new ParameterizedMessage( + logger.info(() -> new ParameterizedMessage( "Tried deleting in-progress snapshot [{}], but it " + "could not be found after failing to abort.", smex.getSnapshotName()), e); @@ -1339,7 +1337,7 @@ public ClusterState execute(ClusterState currentState) { @Override public void onFailure(String source, Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("[{}] failed to remove snapshot deletion metadata", snapshot), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to remove snapshot deletion metadata", snapshot), e); if (listener != null) { listener.onFailure(e); } diff --git a/server/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java b/server/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java index c3eeaa6ee8da5..632c085dbd773 100644 --- a/server/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java +++ b/server/src/main/java/org/elasticsearch/tasks/LoggingTaskListener.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.common.logging.Loggers; /** @@ -51,6 +50,6 @@ public void onResponse(Task task, Response response) { @Override public void onFailure(Task task, Throwable e) { - logger.warn((Supplier) () -> new ParameterizedMessage("{} failed with exception", task.getId()), e); + logger.warn(() -> new ParameterizedMessage("{} failed with exception", task.getId()), e); } } diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskManager.java b/server/src/main/java/org/elasticsearch/tasks/TaskManager.java index 16212e066bbff..80427b197239d 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskManager.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskManager.java @@ -20,7 +20,6 @@ package org.elasticsearch.tasks; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.ExceptionsHelper; @@ -197,8 +196,7 @@ public void storeResult(Task task, Exception e try { taskResult = task.result(localNode, error); } catch (IOException ex) { - logger.warn( - (Supplier) () -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), ex); + logger.warn(() -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), ex); listener.onFailure(ex); return; } @@ -210,8 +208,7 @@ public void onResponse(Void aVoid) { @Override public void onFailure(Exception e) { - logger.warn( - (Supplier) () -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), e); + logger.warn(() -> new ParameterizedMessage("couldn't store error {}", ExceptionsHelper.detailedMessage(error)), e); listener.onFailure(e); } }); @@ -232,7 +229,7 @@ public void storeResult(Task task, Response re try { taskResult = task.result(localNode, response); } catch (IOException ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("couldn't store response {}", response), ex); + logger.warn(() -> new ParameterizedMessage("couldn't store response {}", response), ex); listener.onFailure(ex); return; } @@ -245,7 +242,7 @@ public void onResponse(Void aVoid) { @Override public void onFailure(Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("couldn't store response {}", response), e); + logger.warn(() -> new ParameterizedMessage("couldn't store response {}", response), e); listener.onFailure(e); } }); diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java b/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java index 0c6c22671e8dc..de63994457a1f 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java @@ -185,8 +185,7 @@ public String taskResultIndexMapping() { Streams.copy(is, out); return out.toString(StandardCharsets.UTF_8.name()); } catch (Exception e) { - logger.error( - (Supplier) () -> new ParameterizedMessage( + logger.error(() -> new ParameterizedMessage( "failed to create tasks results index template [{}]", TASK_RESULT_INDEX_MAPPING_FILE), e); throw new IllegalStateException("failed to create tasks results index template [" + TASK_RESULT_INDEX_MAPPING_FILE + "]", e); } diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index c7d16d1979b20..f3d8745092a14 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -20,7 +20,6 @@ package org.elasticsearch.threadpool; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.Counter; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; @@ -58,7 +57,6 @@ import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; import static java.util.Collections.unmodifiableMap; @@ -139,9 +137,7 @@ public static ThreadPoolType fromType(String type) { THREAD_POOL_TYPES = Collections.unmodifiableMap(map); } - private final Map executors; - - private final ThreadPoolInfo threadPoolInfo; + private Map executors = new HashMap<>(); private final CachedTimeThread cachedTimeThread; @@ -210,15 +206,6 @@ public ThreadPool(final Settings settings, final ExecutorBuilder... customBui executors.put(Names.SAME, new ExecutorHolder(DIRECT_EXECUTOR, new Info(Names.SAME, ThreadPoolType.DIRECT))); this.executors = unmodifiableMap(executors); - - final List infos = - executors - .values() - .stream() - .filter(holder -> holder.info.getName().equals("same") == false) - .map(holder -> holder.info) - .collect(Collectors.toList()); - this.threadPoolInfo = new ThreadPoolInfo(infos); this.scheduler = Scheduler.initScheduler(settings); TimeValue estimatedTimeInterval = ESTIMATED_TIME_INTERVAL_SETTING.get(settings); this.cachedTimeThread = new CachedTimeThread(EsExecutors.threadName(settings, "[timer]"), estimatedTimeInterval.millis()); @@ -251,7 +238,16 @@ public Counter estimatedTimeInMillisCounter() { } public ThreadPoolInfo info() { - return threadPoolInfo; + List infos = new ArrayList<>(); + for (ExecutorHolder holder : executors.values()) { + String name = holder.info.getName(); + // no need to have info on "same" thread pool + if ("same".equals(name)) { + continue; + } + infos.add(holder.info); + } + return new ThreadPoolInfo(infos); } public Info info(String name) { @@ -351,11 +347,11 @@ public Cancellable scheduleWithFixedDelay(Runnable command, TimeValue interval, return new ReschedulingRunnable(command, interval, executor, this, (e) -> { if (logger.isDebugEnabled()) { - logger.debug((Supplier) () -> new ParameterizedMessage("scheduled task [{}] was rejected on thread pool [{}]", + logger.debug(() -> new ParameterizedMessage("scheduled task [{}] was rejected on thread pool [{}]", command, executor), e); } }, - (e) -> logger.warn((Supplier) () -> new ParameterizedMessage("failed to run scheduled task [{}] on thread pool [{}]", + (e) -> logger.warn(() -> new ParameterizedMessage("failed to run scheduled task [{}] on thread pool [{}]", command, executor), e)); } @@ -443,7 +439,7 @@ public void run() { try { runnable.run(); } catch (Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failed to run {}", runnable.toString()), e); + logger.warn(() -> new ParameterizedMessage("failed to run {}", runnable.toString()), e); throw e; } } @@ -658,29 +654,32 @@ public SizeValue getQueueSize() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(name); - builder.field("type", type.getType()); - - if (type == ThreadPoolType.SCALING) { - assert min != -1; - builder.field("core", min); - assert max != -1; - builder.field("max", max); - } else { - assert max != -1; - builder.field("size", max); + builder.field(Fields.TYPE, type.getType()); + if (min != -1) { + builder.field(Fields.MIN, min); + } + if (max != -1) { + builder.field(Fields.MAX, max); } if (keepAlive != null) { - builder.field("keep_alive", keepAlive.toString()); + builder.field(Fields.KEEP_ALIVE, keepAlive.toString()); } if (queueSize == null) { - builder.field("queue_size", -1); + builder.field(Fields.QUEUE_SIZE, -1); } else { - builder.field("queue_size", queueSize.singles()); + builder.field(Fields.QUEUE_SIZE, queueSize.singles()); } builder.endObject(); return builder; } + static final class Fields { + static final String TYPE = "type"; + static final String MIN = "min"; + static final String MAX = "max"; + static final String KEEP_ALIVE = "keep_alive"; + static final String QUEUE_SIZE = "queue_size"; + } } /** diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index aa4dec48b46bd..fb4586d201bd7 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -19,7 +19,6 @@ package org.elasticsearch.transport; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.SetOnce; @@ -65,6 +64,7 @@ import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Predicate; +import java.util.function.Supplier; import java.util.stream.Collectors; /** @@ -433,7 +433,7 @@ void collectRemoteNodes(Iterator seedNodes, handshakeNode = transportService.handshake(connection, remoteProfile.getHandshakeTimeout().millis(), (c) -> remoteClusterName.get() == null ? true : c.equals(remoteClusterName.get())); } catch (IllegalStateException ex) { - logger.warn((Supplier) () -> new ParameterizedMessage("seed node {} cluster name mismatch expected " + + logger.warn(() -> new ParameterizedMessage("seed node {} cluster name mismatch expected " + "cluster name {}", connection.getNode(), remoteClusterName.get()), ex); throw ex; } @@ -475,8 +475,7 @@ void collectRemoteNodes(Iterator seedNodes, } catch (ConnectTransportException | IOException | IllegalStateException ex) { // ISE if we fail the handshake with an version incompatible node if (seedNodes.hasNext()) { - logger.debug((Supplier) () -> new ParameterizedMessage("fetching nodes from external cluster {} failed", - clusterAlias), ex); + logger.debug(() -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), ex); collectRemoteNodes(seedNodes, transportService, listener); } else { listener.onFailure(ex); @@ -551,8 +550,7 @@ public void handleResponse(ClusterStateResponse response) { } catch (ConnectTransportException | IllegalStateException ex) { // ISE if we fail the handshake with an version incompatible node // fair enough we can't connect just move on - logger.debug((Supplier) - () -> new ParameterizedMessage("failed to connect to node {}", node), ex); + logger.debug(() -> new ParameterizedMessage("failed to connect to node {}", node), ex); } } } @@ -562,9 +560,7 @@ public void handleResponse(ClusterStateResponse response) { } catch (CancellableThreads.ExecutionCancelledException ex) { listener.onFailure(ex); // we got canceled - fail the listener and step out } catch (Exception ex) { - logger.warn((Supplier) - () -> new ParameterizedMessage("fetching nodes from external cluster {} failed", - clusterAlias), ex); + logger.warn(() -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), ex); collectRemoteNodes(seedNodes, transportService, listener); } } @@ -572,9 +568,7 @@ public void handleResponse(ClusterStateResponse response) { @Override public void handleException(TransportException exp) { assert transportService.getThreadPool().getThreadContext().isSystemContext() == false : "context is a system context"; - logger.warn((Supplier) - () -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), - exp); + logger.warn(() -> new ParameterizedMessage("fetching nodes from external cluster {} failed", clusterAlias), exp); try { IOUtils.closeWhileHandlingException(connection); } finally { diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index c066cbc1136bc..292ccc6bab886 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -21,7 +21,6 @@ import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.IntSet; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -351,11 +350,10 @@ protected void innerInnerOnResponse(Void v) { @Override protected void innerOnFailure(Exception e) { if (channel.isOpen()) { - logger.debug( - (Supplier) () -> new ParameterizedMessage("[{}] failed to send ping transport message", node), e); + logger.debug(() -> new ParameterizedMessage("[{}] failed to send ping transport message", node), e); failedPings.inc(); } else { - logger.trace((Supplier) () -> + logger.trace(() -> new ParameterizedMessage("[{}] failed to send ping transport message (channel closed)", node), e); } @@ -542,9 +540,7 @@ public void connectToNode(DiscoveryNode node, ConnectionProfile connectionProfil throw new ConnectTransportException(node, "general node connection failure", e); } finally { if (success == false) { // close the connection if there is a failure - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "failed to connect to [{}], cleaning dangling connections", node)); + logger.trace(() -> new ParameterizedMessage("failed to connect to [{}], cleaning dangling connections", node)); IOUtils.closeWhileHandlingException(nodeChannels); } } @@ -989,27 +985,21 @@ protected void onException(TcpChannel channel, Exception e) { } if (isCloseConnectionException(e)) { - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "close connection exception caught on transport layer [{}], disconnecting from relevant node", - channel), - e); + logger.trace(() -> new ParameterizedMessage( + "close connection exception caught on transport layer [{}], disconnecting from relevant node", channel), e); // close the channel, which will cause a node to be disconnected if relevant TcpChannel.closeChannel(channel, false); } else if (isConnectException(e)) { - logger.trace((Supplier) () -> new ParameterizedMessage("connect exception caught on transport layer [{}]", channel), e); + logger.trace(() -> new ParameterizedMessage("connect exception caught on transport layer [{}]", channel), e); // close the channel as safe measure, which will cause a node to be disconnected if relevant TcpChannel.closeChannel(channel, false); } else if (e instanceof BindException) { - logger.trace((Supplier) () -> new ParameterizedMessage("bind exception caught on transport layer [{}]", channel), e); + logger.trace(() -> new ParameterizedMessage("bind exception caught on transport layer [{}]", channel), e); // close the channel as safe measure, which will cause a node to be disconnected if relevant TcpChannel.closeChannel(channel, false); } else if (e instanceof CancelledKeyException) { - logger.trace( - (Supplier) () -> new ParameterizedMessage( - "cancelled key exception caught on transport layer [{}], disconnecting from relevant node", - channel), - e); + logger.trace(() -> new ParameterizedMessage( + "cancelled key exception caught on transport layer [{}], disconnecting from relevant node", channel), e); // close the channel as safe measure, which will cause a node to be disconnected if relevant TcpChannel.closeChannel(channel, false); } else if (e instanceof TcpTransport.HttpOnTransportException) { @@ -1031,8 +1021,7 @@ protected void innerOnFailure(Exception e) { internalSendMessage(channel, message, closeChannel); } } else { - logger.warn( - (Supplier) () -> new ParameterizedMessage("exception caught on transport layer [{}], closing connection", channel), e); + logger.warn(() -> new ParameterizedMessage("exception caught on transport layer [{}], closing connection", channel), e); // close the channel, which will cause a node to be disconnected if relevant TcpChannel.closeChannel(channel, false); } @@ -1477,7 +1466,7 @@ private void handleException(final TransportResponseHandler handler, Throwable e try { handler.handleException(rtx); } catch (Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e); + logger.error(() -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e); } }); } @@ -1520,9 +1509,7 @@ protected String handleRequest(TcpChannel channel, String profileName, final Str transportChannel.sendResponse(e); } catch (IOException inner) { inner.addSuppressed(e); - logger.warn( - (Supplier) () -> new ParameterizedMessage( - "Failed to send error message back to client for action [{}]", action), inner); + logger.warn(() -> new ParameterizedMessage("Failed to send error message back to client for action [{}]", action), inner); } } return action; @@ -1568,8 +1555,7 @@ public void onFailure(Exception e) { transportChannel.sendResponse(e); } catch (Exception inner) { inner.addSuppressed(e); - logger.warn( - (Supplier) () -> new ParameterizedMessage( + logger.warn(() -> new ParameterizedMessage( "Failed to send error message back to client for action [{}]", reg.getAction()), inner); } } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java b/server/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java index 3d46c0853ec49..4ba2769edb4a2 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java @@ -63,12 +63,8 @@ public void handleException(TransportException exp) { try { channel.sendResponse(exp); } catch (IOException e) { - logger.debug( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage( - "failed to send failure {}", - extraInfoOnError == null ? "" : "(" + extraInfoOnError + ")"), - e); + logger.debug(() -> new ParameterizedMessage( + "failed to send failure {}", extraInfoOnError == null ? "" : "(" + extraInfoOnError + ")"), e); } } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index 577b299944662..5d3ac517c7a31 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -812,9 +812,7 @@ void onResponseSent(long requestId, String action, Exception e) { } protected void traceResponseSent(long requestId, String action, Exception e) { - tracerLog.trace( - (org.apache.logging.log4j.util.Supplier) - () -> new ParameterizedMessage("[{}][{}] sent error response", requestId, action), e); + tracerLog.trace(() -> new ParameterizedMessage("[{}][{}] sent error response", requestId, action), e); } /** diff --git a/server/src/main/resources/META-INF/services/org.elasticsearch.common.xcontent.XContentBuilderExtension b/server/src/main/resources/META-INF/services/org.elasticsearch.common.xcontent.XContentBuilderExtension new file mode 100644 index 0000000000000..841c2e60d3d82 --- /dev/null +++ b/server/src/main/resources/META-INF/services/org.elasticsearch.common.xcontent.XContentBuilderExtension @@ -0,0 +1 @@ +org.elasticsearch.common.xcontent.XContentElasticsearchExtension diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponseTests.java new file mode 100644 index 0000000000000..f5e86fdcdfe9b --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponseTests.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.forcemerge; + +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.AbstractBroadcastResponseTestCase; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.util.List; + +public class ForceMergeResponseTests extends AbstractBroadcastResponseTestCase { + @Override + protected ForceMergeResponse createTestInstance(int totalShards, int successfulShards, int failedShards, + List failures) { + return new ForceMergeResponse(totalShards, successfulShards, failedShards, failures); + } + + @Override + protected ForceMergeResponse doParseInstance(XContentParser parser) { + return ForceMergeResponse.fromXContent(parser); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java index 0f24a520b84b7..a7e3ee57a08c3 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponseTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.indices.stats; import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ESTestCase; import java.util.Collections; @@ -34,7 +35,8 @@ public void testInvalidLevel() { final IndicesStatsResponse response = new IndicesStatsResponse(); final String level = randomAlphaOfLength(16); final ToXContent.Params params = new ToXContent.MapParams(Collections.singletonMap("level", level)); - final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> response.toXContent(null, params)); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> response.toXContent(JsonXContent.contentBuilder(), params)); assertThat( e, hasToString(containsString("level parameter must be one of [cluster] or [indices] or [shards] but was [" + level + "]"))); diff --git a/server/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java b/server/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java index 8514cb4ac2e1b..6b33b7eb3e2a8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/ClusterServiceIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.service; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; @@ -104,7 +103,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("failed to execute callback in test {}", source), e); + logger.error(() -> new ParameterizedMessage("failed to execute callback in test {}", source), e); onFailure.set(true); latch.countDown(); } @@ -172,7 +171,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("failed to execute callback in test {}", source), e); + logger.error(() -> new ParameterizedMessage("failed to execute callback in test {}", source), e); onFailure.set(true); latch.countDown(); } @@ -243,7 +242,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("failed to execute callback in test {}", source), e); + logger.error(() -> new ParameterizedMessage("failed to execute callback in test {}", source), e); onFailure.set(true); latch.countDown(); } @@ -314,7 +313,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("failed to execute callback in test {}", source), e); + logger.error(() -> new ParameterizedMessage("failed to execute callback in test {}", source), e); onFailure.set(true); latch.countDown(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/service/TaskBatcherTests.java b/server/src/test/java/org/elasticsearch/cluster/service/TaskBatcherTests.java index d5af9dd558155..ebb15b42b7a3a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/TaskBatcherTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/TaskBatcherTests.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.ClusterStateTaskConfig; import org.elasticsearch.cluster.metadata.ProcessClusterEventTimeoutException; @@ -209,7 +208,7 @@ public void testTasksAreExecutedInOrder() throws BrokenBarrierException, Interru final TestListener listener = new TestListener() { @Override public void onFailure(String source, Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected failure: [{}]", source), e); + logger.error(() -> new ParameterizedMessage("unexpected failure: [{}]", source), e); failures.add(new Tuple<>(source, e)); updateLatch.countDown(); } diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java index 98a7fe514543f..0a0b9d6583bbb 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java @@ -28,11 +28,18 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Strings; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.geo.parsers.ShapeParser; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.index.mapper.ContentPath; +import org.elasticsearch.index.mapper.GeoShapeFieldMapper; +import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.test.hamcrest.ElasticsearchGeoAssertions; import org.locationtech.spatial4j.exception.InvalidShapeException; import org.locationtech.spatial4j.shape.Circle; @@ -135,8 +142,9 @@ public void testParseMultiDimensionShapes() throws IOException { .startArray("coordinates").value(100.0).value(0.0).value(15.0).value(18.0).endArray() .endObject(); - Point expectedPt = GEOMETRY_FACTORY.createPoint(new Coordinate(100.0, 0.0)); - assertGeometryEquals(new JtsPoint(expectedPt, SPATIAL_CONTEXT), pointGeoJson); + XContentParser parser = createParser(pointGeoJson); + parser.nextToken(); + ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class); // multi dimension linestring XContentBuilder lineGeoJson = XContentFactory.jsonBuilder() @@ -148,13 +156,9 @@ public void testParseMultiDimensionShapes() throws IOException { .endArray() .endObject(); - List lineCoordinates = new ArrayList<>(); - lineCoordinates.add(new Coordinate(100, 0)); - lineCoordinates.add(new Coordinate(101, 1)); - - LineString expectedLS = GEOMETRY_FACTORY.createLineString( - lineCoordinates.toArray(new Coordinate[lineCoordinates.size()])); - assertGeometryEquals(jtsGeom(expectedLS), lineGeoJson); + parser = createParser(lineGeoJson); + parser.nextToken(); + ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class); } @Override @@ -231,6 +235,61 @@ public void testParsePolygon() throws IOException { assertGeometryEquals(jtsGeom(expected), polygonGeoJson); } + public void testParse3DPolygon() throws IOException { + XContentBuilder polygonGeoJson = XContentFactory.jsonBuilder() + .startObject() + .field("type", "Polygon") + .startArray("coordinates") + .startArray() + .startArray().value(100.0).value(1.0).value(10.0).endArray() + .startArray().value(101.0).value(1.0).value(10.0).endArray() + .startArray().value(101.0).value(0.0).value(10.0).endArray() + .startArray().value(100.0).value(0.0).value(10.0).endArray() + .startArray().value(100.0).value(1.0).value(10.0).endArray() + .endArray() + .endArray() + .endObject(); + + List shellCoordinates = new ArrayList<>(); + shellCoordinates.add(new Coordinate(100, 0, 10)); + shellCoordinates.add(new Coordinate(101, 0, 10)); + shellCoordinates.add(new Coordinate(101, 1, 10)); + shellCoordinates.add(new Coordinate(100, 1, 10)); + shellCoordinates.add(new Coordinate(100, 0, 10)); + + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); + LinearRing shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[shellCoordinates.size()])); + Polygon expected = GEOMETRY_FACTORY.createPolygon(shell, null); + Mapper.BuilderContext mockBuilderContext = new Mapper.BuilderContext(indexSettings, new ContentPath()); + final GeoShapeFieldMapper mapperBuilder = new GeoShapeFieldMapper.Builder("test").ignoreZValue(true).build(mockBuilderContext); + XContentParser parser = createParser(polygonGeoJson); + parser.nextToken(); + ElasticsearchGeoAssertions.assertEquals(jtsGeom(expected), ShapeParser.parse(parser, mapperBuilder).build()); + } + + public void testInvalidDimensionalPolygon() throws IOException { + XContentBuilder polygonGeoJson = XContentFactory.jsonBuilder() + .startObject() + .field("type", "Polygon") + .startArray("coordinates") + .startArray() + .startArray().value(100.0).value(1.0).value(10.0).endArray() + .startArray().value(101.0).value(1.0).endArray() + .startArray().value(101.0).value(0.0).value(10.0).endArray() + .startArray().value(100.0).value(0.0).value(10.0).endArray() + .startArray().value(100.0).value(1.0).value(10.0).endArray() + .endArray() + .endArray() + .endObject(); + XContentParser parser = createParser(polygonGeoJson); + parser.nextToken(); + ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class); + } + public void testParseInvalidPoint() throws IOException { // test case 1: create an invalid point object with multipoint data format XContentBuilder invalidPoint1 = XContentFactory.jsonBuilder() @@ -326,6 +385,46 @@ public void testParseInvalidMultiPolygon() throws IOException { ElasticsearchGeoAssertions.assertValidException(parser, InvalidShapeException.class); } + public void testParseInvalidDimensionalMultiPolygon() throws IOException { + // test invalid multipolygon (an "accidental" polygon with inner rings outside outer ring) + String multiPolygonGeoJson = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .field("type", "MultiPolygon") + .startArray("coordinates") + .startArray()//first poly (without holes) + .startArray() + .startArray().value(102.0).value(2.0).endArray() + .startArray().value(103.0).value(2.0).endArray() + .startArray().value(103.0).value(3.0).endArray() + .startArray().value(102.0).value(3.0).endArray() + .startArray().value(102.0).value(2.0).endArray() + .endArray() + .endArray() + .startArray()//second poly (with hole) + .startArray() + .startArray().value(100.0).value(0.0).endArray() + .startArray().value(101.0).value(0.0).endArray() + .startArray().value(101.0).value(1.0).endArray() + .startArray().value(100.0).value(1.0).endArray() + .startArray().value(100.0).value(0.0).endArray() + .endArray() + .startArray()//hole + .startArray().value(100.2).value(0.8).endArray() + .startArray().value(100.2).value(0.2).value(10.0).endArray() + .startArray().value(100.8).value(0.2).endArray() + .startArray().value(100.8).value(0.8).endArray() + .startArray().value(100.2).value(0.8).endArray() + .endArray() + .endArray() + .endArray() + .endObject()); + + XContentParser parser = createParser(JsonXContent.jsonXContent, multiPolygonGeoJson); + parser.nextToken(); + ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class); + } + + public void testParseOGCPolygonWithoutHoles() throws IOException { // test 1: ccw poly not crossing dateline String polygonGeoJson = Strings.toString(XContentFactory.jsonBuilder().startObject().field("type", "Polygon") diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java index 7249277338322..0a113549d1664 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java @@ -25,7 +25,11 @@ import com.vividsolutions.jts.geom.Point; import com.vividsolutions.jts.geom.Polygon; import org.apache.lucene.geo.GeoTestUtil; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.geo.builders.CoordinatesBuilder; import org.elasticsearch.common.geo.builders.EnvelopeBuilder; import org.elasticsearch.common.geo.builders.GeometryCollectionBuilder; @@ -37,9 +41,14 @@ import org.elasticsearch.common.geo.builders.PolygonBuilder; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.geo.parsers.GeoWKTParser; +import org.elasticsearch.common.geo.parsers.ShapeParser; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.ContentPath; +import org.elasticsearch.index.mapper.GeoShapeFieldMapper; +import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.test.geo.RandomShapeGenerator; import org.locationtech.spatial4j.exception.InvalidShapeException; import org.locationtech.spatial4j.shape.Rectangle; @@ -80,7 +89,7 @@ private void assertExpected(Shape expected, ShapeBuilder builder) throws IOExcep assertGeometryEquals(expected, xContentBuilder); } - private void assertMalformed(Shape expected, ShapeBuilder builder) throws IOException { + private void assertMalformed(ShapeBuilder builder) throws IOException { XContentBuilder xContentBuilder = toWKTContent(builder, true); assertValidException(xContentBuilder, ElasticsearchParseException.class); } @@ -91,7 +100,7 @@ public void testParsePoint() throws IOException { Coordinate c = new Coordinate(p.lon(), p.lat()); Point expected = GEOMETRY_FACTORY.createPoint(c); assertExpected(new JtsPoint(expected, SPATIAL_CONTEXT), new PointBuilder().coordinate(c)); - assertMalformed(new JtsPoint(expected, SPATIAL_CONTEXT), new PointBuilder().coordinate(c)); + assertMalformed(new PointBuilder().coordinate(c)); } @Override @@ -107,7 +116,7 @@ public void testParseMultiPoint() throws IOException { } ShapeCollection expected = shapeCollection(shapes); assertExpected(expected, new MultiPointBuilder(coordinates)); - assertMalformed(expected, new MultiPointBuilder(coordinates)); + assertMalformed(new MultiPointBuilder(coordinates)); } private List randomLineStringCoords() { @@ -142,7 +151,7 @@ public void testParseMultiLineString() throws IOException { MultiLineString expected = GEOMETRY_FACTORY.createMultiLineString( lineStrings.toArray(new LineString[lineStrings.size()])); assertExpected(jtsGeom(expected), builder); - assertMalformed(jtsGeom(expected), builder); + assertMalformed(builder); } @Override @@ -153,7 +162,7 @@ public void testParsePolygon() throws IOException { LinearRing shell = GEOMETRY_FACTORY.createLinearRing(coords); Polygon expected = GEOMETRY_FACTORY.createPolygon(shell, null); assertExpected(jtsGeom(expected), builder); - assertMalformed(jtsGeom(expected), builder); + assertMalformed(builder); } @Override @@ -173,16 +182,16 @@ public void testParseMultiPolygon() throws IOException { } Shape expected = shapeCollection(shapes); assertExpected(expected, builder); - assertMalformed(expected, builder); + assertMalformed(builder); } public void testParsePolygonWithHole() throws IOException { // add 3d point to test ISSUE #10501 List shellCoordinates = new ArrayList<>(); - shellCoordinates.add(new Coordinate(100, 0, 15.0)); + shellCoordinates.add(new Coordinate(100, 0)); shellCoordinates.add(new Coordinate(101, 0)); shellCoordinates.add(new Coordinate(101, 1)); - shellCoordinates.add(new Coordinate(100, 1, 10.0)); + shellCoordinates.add(new Coordinate(100, 1)); shellCoordinates.add(new Coordinate(100, 0)); List holeCoordinates = new ArrayList<>(); @@ -203,7 +212,110 @@ public void testParsePolygonWithHole() throws IOException { Polygon expected = GEOMETRY_FACTORY.createPolygon(shell, holes); assertExpected(jtsGeom(expected), polygonWithHole); - assertMalformed(jtsGeom(expected), polygonWithHole); + assertMalformed(polygonWithHole); + } + + public void testParseMixedDimensionPolyWithHole() throws IOException { + List shellCoordinates = new ArrayList<>(); + shellCoordinates.add(new Coordinate(100, 0)); + shellCoordinates.add(new Coordinate(101, 0)); + shellCoordinates.add(new Coordinate(101, 1)); + shellCoordinates.add(new Coordinate(100, 1)); + shellCoordinates.add(new Coordinate(100, 0)); + + // add 3d point to test ISSUE #10501 + List holeCoordinates = new ArrayList<>(); + holeCoordinates.add(new Coordinate(100.2, 0.2, 15.0)); + holeCoordinates.add(new Coordinate(100.8, 0.2)); + holeCoordinates.add(new Coordinate(100.8, 0.8)); + holeCoordinates.add(new Coordinate(100.2, 0.8, 10.0)); + holeCoordinates.add(new Coordinate(100.2, 0.2)); + + PolygonBuilder builder = new PolygonBuilder(new CoordinatesBuilder().coordinates(shellCoordinates)); + builder.hole(new LineStringBuilder(holeCoordinates)); + + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().value(builder.toWKT()); + XContentParser parser = createParser(xContentBuilder); + parser.nextToken(); + + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); + + Mapper.BuilderContext mockBuilderContext = new Mapper.BuilderContext(indexSettings, new ContentPath()); + final GeoShapeFieldMapper mapperBuilder = new GeoShapeFieldMapper.Builder("test").ignoreZValue(false).build(mockBuilderContext); + + // test store z disabled + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, + () -> ShapeParser.parse(parser, mapperBuilder)); + assertThat(e, hasToString(containsString("but [ignore_z_value] parameter is [false]"))); + } + + public void testParseMixedDimensionPolyWithHoleStoredZ() throws IOException { + List shellCoordinates = new ArrayList<>(); + shellCoordinates.add(new Coordinate(100, 0)); + shellCoordinates.add(new Coordinate(101, 0)); + shellCoordinates.add(new Coordinate(101, 1)); + shellCoordinates.add(new Coordinate(100, 1)); + shellCoordinates.add(new Coordinate(100, 0)); + + // add 3d point to test ISSUE #10501 + List holeCoordinates = new ArrayList<>(); + holeCoordinates.add(new Coordinate(100.2, 0.2, 15.0)); + holeCoordinates.add(new Coordinate(100.8, 0.2)); + holeCoordinates.add(new Coordinate(100.8, 0.8)); + holeCoordinates.add(new Coordinate(100.2, 0.8, 10.0)); + holeCoordinates.add(new Coordinate(100.2, 0.2)); + + PolygonBuilder builder = new PolygonBuilder(new CoordinatesBuilder().coordinates(shellCoordinates)); + builder.hole(new LineStringBuilder(holeCoordinates)); + + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().value(builder.toWKT()); + XContentParser parser = createParser(xContentBuilder); + parser.nextToken(); + + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); + + Mapper.BuilderContext mockBuilderContext = new Mapper.BuilderContext(indexSettings, new ContentPath()); + final GeoShapeFieldMapper mapperBuilder = new GeoShapeFieldMapper.Builder("test").ignoreZValue(true).build(mockBuilderContext); + + // test store z disabled + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> ShapeParser.parse(parser, mapperBuilder)); + assertThat(e, hasToString(containsString("unable to add coordinate to CoordinateBuilder: coordinate dimensions do not match"))); + } + + public void testParsePolyWithStoredZ() throws IOException { + List shellCoordinates = new ArrayList<>(); + shellCoordinates.add(new Coordinate(100, 0, 0)); + shellCoordinates.add(new Coordinate(101, 0, 0)); + shellCoordinates.add(new Coordinate(101, 1, 0)); + shellCoordinates.add(new Coordinate(100, 1, 5)); + shellCoordinates.add(new Coordinate(100, 0, 5)); + + PolygonBuilder builder = new PolygonBuilder(new CoordinatesBuilder().coordinates(shellCoordinates)); + + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().value(builder.toWKT()); + XContentParser parser = createParser(xContentBuilder); + parser.nextToken(); + + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); + + Mapper.BuilderContext mockBuilderContext = new Mapper.BuilderContext(indexSettings, new ContentPath()); + final GeoShapeFieldMapper mapperBuilder = new GeoShapeFieldMapper.Builder("test").ignoreZValue(true).build(mockBuilderContext); + + ShapeBuilder shapeBuilder = ShapeParser.parse(parser, mapperBuilder); + assertEquals(shapeBuilder.numDimensions(), 3); } public void testParseSelfCrossingPolygon() throws IOException { @@ -235,7 +347,7 @@ public void testParseEnvelope() throws IOException { EnvelopeBuilder builder = new EnvelopeBuilder(new Coordinate(r.minLon, r.maxLat), new Coordinate(r.maxLon, r.minLat)); Rectangle expected = SPATIAL_CONTEXT.makeRectangle(r.minLon, r.maxLon, r.minLat, r.maxLat); assertExpected(expected, builder); - assertMalformed(expected, builder); + assertMalformed(builder); } public void testInvalidGeometryType() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java index d1f7d5601a6cc..22877b8ff3b3c 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java @@ -653,4 +653,49 @@ public void testInvalidShapeWithConsecutiveDuplicatePoints() { Exception e = expectThrows(InvalidShapeException.class, () -> builder.close().build()); assertThat(e.getMessage(), containsString("duplicate consecutive coordinates at: (")); } + + public void testPolygon3D() { + String expected = "{\n" + + " \"type\" : \"polygon\",\n" + + " \"orientation\" : \"right\",\n" + + " \"coordinates\" : [\n" + + " [\n" + + " [\n" + + " -45.0,\n" + + " 30.0,\n" + + " 100.0\n" + + " ],\n" + + " [\n" + + " 45.0,\n" + + " 30.0,\n" + + " 75.0\n" + + " ],\n" + + " [\n" + + " 45.0,\n" + + " -30.0,\n" + + " 77.0\n" + + " ],\n" + + " [\n" + + " -45.0,\n" + + " -30.0,\n" + + " 101.0\n" + + " ],\n" + + " [\n" + + " -45.0,\n" + + " 30.0,\n" + + " 110.0\n" + + " ]\n" + + " ]\n" + + " ]\n" + + "}"; + + PolygonBuilder pb = new PolygonBuilder(new CoordinatesBuilder() + .coordinate(new Coordinate(-45, 30, 100)) + .coordinate(new Coordinate(45, 30, 75)) + .coordinate(new Coordinate(45, -30, 77)) + .coordinate(new Coordinate(-45, -30, 101)) + .coordinate(new Coordinate(-45, 30, 110))); + + assertEquals(expected, pb.toString()); + } } diff --git a/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java b/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java new file mode 100644 index 0000000000000..6c18bd0afab1b --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/logging/LoggersTests.java @@ -0,0 +1,106 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.appender.AbstractAppender; +import org.apache.logging.log4j.core.filter.RegexFilter; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.net.UnknownHostException; +import java.util.Arrays; + +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +public class LoggersTests extends ESTestCase { + + static class MockAppender extends AbstractAppender { + private LogEvent lastEvent; + + MockAppender(final String name) throws IllegalAccessException { + super(name, RegexFilter.createFilter(".*(\n.*)*", new String[0], false, null, null), null); + } + + @Override + public void append(LogEvent event) { + lastEvent = event; + } + + ParameterizedMessage lastParameterizedMessage() { + return (ParameterizedMessage) lastEvent.getMessage(); + } + } + + public void testParameterizedMessageLambda() throws Exception { + final MockAppender appender = new MockAppender("trace_appender"); + appender.start(); + final Logger testLogger = Loggers.getLogger(LoggersTests.class); + Loggers.addAppender(testLogger, appender); + Loggers.setLevel(testLogger, Level.TRACE); + + Throwable ex = randomException(); + testLogger.error(() -> new ParameterizedMessage("an error message"), ex); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.ERROR)); + assertThat(appender.lastEvent.getThrown(), equalTo(ex)); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("an error message")); + + ex = randomException(); + testLogger.warn(() -> new ParameterizedMessage("a warn message: [{}]", "long gc"), ex); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.WARN)); + assertThat(appender.lastEvent.getThrown(), equalTo(ex)); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a warn message: [long gc]")); + assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining("long gc")); + + testLogger.info(() -> new ParameterizedMessage("an info message a=[{}], b=[{}], c=[{}]", 1, 2, 3)); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.INFO)); + assertThat(appender.lastEvent.getThrown(), nullValue()); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("an info message a=[1], b=[2], c=[3]")); + assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(1, 2, 3)); + + ex = randomException(); + testLogger.debug(() -> new ParameterizedMessage("a debug message options = {}", Arrays.asList("yes", "no")), ex); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.DEBUG)); + assertThat(appender.lastEvent.getThrown(), equalTo(ex)); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a debug message options = [yes, no]")); + assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(Arrays.asList("yes", "no"))); + + ex = randomException(); + testLogger.trace(() -> new ParameterizedMessage("a trace message; element = [{}]", new Object[]{null}), ex); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.TRACE)); + assertThat(appender.lastEvent.getThrown(), equalTo(ex)); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a trace message; element = [null]")); + assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(new Object[]{null})); + } + + private Throwable randomException(){ + return randomFrom( + new IOException("file not found"), + new UnknownHostException("unknown hostname"), + new OutOfMemoryError("out of space"), + new IllegalArgumentException("index must be between 10 and 100") + ); + } +} diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java index e74d3b7acea97..8f7a177fae720 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/BaseXContentTestCase.java @@ -326,14 +326,14 @@ public void testBinaryValueWithOffsetLength() throws Exception { } public void testBinaryUTF8() throws Exception { - assertResult("{'utf8':null}", () -> builder().startObject().utf8Field("utf8", null).endObject()); + assertResult("{'utf8':null}", () -> builder().startObject().nullField("utf8").endObject()); final BytesRef randomBytesRef = new BytesRef(randomBytes()); XContentBuilder builder = builder().startObject(); if (randomBoolean()) { - builder.utf8Field("utf8", randomBytesRef); + builder.utf8Field("utf8", randomBytesRef.bytes, randomBytesRef.offset, randomBytesRef.length); } else { - builder.field("utf8").utf8Value(randomBytesRef); + builder.field("utf8").utf8Value(randomBytesRef.bytes, randomBytesRef.offset, randomBytesRef.length); } builder.endObject(); diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index 55f5b70e70299..2998ec8a6ba66 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.discovery; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.DocWriteResponse; @@ -142,9 +141,7 @@ public void testAckedIndexing() throws Exception { } catch (ElasticsearchException e) { exceptedExceptions.add(e); final String docId = id; - logger.trace( - (Supplier) - () -> new ParameterizedMessage("[{}] failed id [{}] through node [{}]", name, docId, node), e); + logger.trace(() -> new ParameterizedMessage("[{}] failed id [{}] through node [{}]", name, docId, node), e); } finally { countDownLatchRef.get().countDown(); logger.trace("[{}] decreased counter : {}", name, countDownLatchRef.get().getCount()); @@ -152,9 +149,7 @@ public void testAckedIndexing() throws Exception { } catch (InterruptedException e) { // fine - semaphore interrupt } catch (AssertionError | Exception e) { - logger.info( - (Supplier) () -> new ParameterizedMessage("unexpected exception in background thread of [{}]", node), - e); + logger.info(() -> new ParameterizedMessage("unexpected exception in background thread of [{}]", node), e); } } }); diff --git a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java index 4225b6802ce96..43e3b2ef01b67 100644 --- a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.discovery; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -223,7 +222,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { @Override public void onFailure(String source, Exception e) { - logger.warn((Supplier) () -> new ParameterizedMessage("failure [{}]", source), e); + logger.warn(() -> new ParameterizedMessage("failure [{}]", source), e); } }); diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java index 0fdb732be9535..9e57382bb4bc8 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.discovery.zen; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; @@ -835,7 +834,7 @@ public void onSuccess() { @Override public void onFailure(Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("unexpected error for {}", future), e); + logger.error(() -> new ParameterizedMessage("unexpected error for {}", future), e); future.markAsFailed(e); } }); diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 61f64eda94a96..4aa4120151eb7 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -722,14 +722,13 @@ public void testTranslogRecoveryDoesNotReplayIntoTranslog() throws IOException { recoveringEngine = new InternalEngine(copy(initialEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG)) { @Override public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException { - assertThat(getTranslog().uncommittedOperations(), equalTo(docs)); + assertThat(getTranslog().stats().getUncommittedOperations(), equalTo(docs)); final CommitId commitId = super.flush(force, waitIfOngoing); flushed.set(true); return commitId; } }; - - assertThat(recoveringEngine.getTranslog().uncommittedOperations(), equalTo(docs)); + assertThat(recoveringEngine.getTranslog().stats().getUncommittedOperations(), equalTo(docs)); recoveringEngine.recoverFromTranslog(); assertTrue(flushed.get()); } finally { @@ -2747,7 +2746,7 @@ public void testCurrentTranslogIDisCommitted() throws IOException { assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); expectThrows(IllegalStateException.class, () -> engine.recoverFromTranslog()); assertEquals(2, engine.getTranslog().currentFileGeneration()); - assertEquals(0L, engine.getTranslog().uncommittedOperations()); + assertEquals(0L, engine.getTranslog().stats().getUncommittedOperations()); } } @@ -3703,7 +3702,7 @@ protected long doGenerateSeqNoForOperation(Operation operation) { System.nanoTime(), reason)); assertThat(noOpEngine.getLocalCheckpointTracker().getCheckpoint(), equalTo((long) (maxSeqNo + 1))); - assertThat(noOpEngine.getTranslog().uncommittedOperations(), equalTo(1 + gapsFilled)); + assertThat(noOpEngine.getTranslog().stats().getUncommittedOperations(), equalTo(1 + gapsFilled)); // skip to the op that we added to the translog Translog.Operation op; Translog.Operation last = null; @@ -3904,7 +3903,7 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { assertEquals(checkpointOnReplica, replicaEngine.getLocalCheckpointTracker().getCheckpoint()); recoveringEngine = new InternalEngine(copy( replicaEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, globalCheckpoint::get)); - assertEquals(numDocsOnReplica, recoveringEngine.getTranslog().uncommittedOperations()); + assertEquals(numDocsOnReplica, recoveringEngine.getTranslog().stats().getUncommittedOperations()); recoveringEngine.recoverFromTranslog(); assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpointTracker().getMaxSeqNo()); assertEquals(checkpointOnReplica, recoveringEngine.getLocalCheckpointTracker().getCheckpoint()); @@ -3939,7 +3938,7 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { recoveringEngine = new InternalEngine( copy(replicaEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, globalCheckpoint::get)); if (flushed) { - assertEquals(0, recoveringEngine.getTranslog().uncommittedOperations()); + assertThat(recoveringEngine.getTranslog().stats().getUncommittedOperations(), equalTo(0)); } recoveringEngine.recoverFromTranslog(); assertEquals(maxSeqIDOnReplica, recoveringEngine.getLocalCheckpointTracker().getMaxSeqNo()); @@ -4369,7 +4368,8 @@ public void testCleanupCommitsWhenReleaseSnapshot() throws Exception { public void testShouldPeriodicallyFlush() throws Exception { assertThat("Empty engine does not need flushing", engine.shouldPeriodicallyFlush(), equalTo(false)); // A new engine may have more than one empty translog files - the test should account this extra. - final long extraTranslogSizeInNewEngine = engine.getTranslog().uncommittedSizeInBytes() - Translog.DEFAULT_HEADER_SIZE_IN_BYTES; + final Translog translog = engine.getTranslog(); + final long extraTranslogSizeInNewEngine = engine.getTranslog().stats().getUncommittedSizeInBytes() - Translog.DEFAULT_HEADER_SIZE_IN_BYTES; int numDocs = between(10, 100); for (int id = 0; id < numDocs; id++) { final ParsedDocument doc = testParsedDocument(Integer.toString(id), null, testDocumentWithTextField(), SOURCE, null); @@ -4377,17 +4377,17 @@ public void testShouldPeriodicallyFlush() throws Exception { } assertThat("Not exceeded translog flush threshold yet", engine.shouldPeriodicallyFlush(), equalTo(false)); long flushThreshold = RandomNumbers.randomLongBetween(random(), 100, - engine.getTranslog().uncommittedSizeInBytes() - extraTranslogSizeInNewEngine); + engine.getTranslog().stats().getUncommittedSizeInBytes()- extraTranslogSizeInNewEngine); final IndexSettings indexSettings = engine.config().getIndexSettings(); final IndexMetaData indexMetaData = IndexMetaData.builder(indexSettings.getIndexMetaData()) .settings(Settings.builder().put(indexSettings.getSettings()) .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), flushThreshold + "b")).build(); indexSettings.updateIndexMetaData(indexMetaData); engine.onSettingsChanged(); - assertThat(engine.getTranslog().uncommittedOperations(), equalTo(numDocs)); + assertThat(engine.getTranslog().stats().getUncommittedOperations(), equalTo(numDocs)); assertThat(engine.shouldPeriodicallyFlush(), equalTo(true)); engine.flush(); - assertThat(engine.getTranslog().uncommittedOperations(), equalTo(0)); + assertThat(engine.getTranslog().stats().getUncommittedOperations(), equalTo(0)); // Stale operations skipped by Lucene but added to translog - still able to flush for (int id = 0; id < numDocs; id++) { final ParsedDocument doc = testParsedDocument(Integer.toString(id), null, testDocumentWithTextField(), SOURCE, null); @@ -4395,13 +4395,53 @@ public void testShouldPeriodicallyFlush() throws Exception { assertThat(result.isCreated(), equalTo(false)); } SegmentInfos lastCommitInfo = engine.getLastCommittedSegmentInfos(); - assertThat(engine.getTranslog().uncommittedOperations(), equalTo(numDocs)); + assertThat(engine.getTranslog().stats().getUncommittedOperations(), equalTo(numDocs)); assertThat(engine.shouldPeriodicallyFlush(), equalTo(true)); engine.flush(false, false); assertThat(engine.getLastCommittedSegmentInfos(), not(sameInstance(lastCommitInfo))); - assertThat(engine.getTranslog().uncommittedOperations(), equalTo(0)); + assertThat(engine.getTranslog().stats().getUncommittedOperations(), equalTo(0)); + // If the new index commit still points to the same translog generation as the current index commit, + // we should not enable the periodically flush condition; otherwise we can get into an infinite loop of flushes. + engine.getLocalCheckpointTracker().generateSeqNo(); // create a gap here + for (int id = 0; id < numDocs; id++) { + if (randomBoolean()) { + translog.rollGeneration(); + } + final ParsedDocument doc = testParsedDocument("new" + id, null, testDocumentWithTextField(), SOURCE, null); + engine.index(replicaIndexForDoc(doc, 2L, engine.getLocalCheckpointTracker().generateSeqNo(), false)); + if (engine.shouldPeriodicallyFlush()) { + engine.flush(); + assertThat(engine.getLastCommittedSegmentInfos(), not(sameInstance(lastCommitInfo))); + assertThat(engine.shouldPeriodicallyFlush(), equalTo(false)); + } + } } + public void testStressShouldPeriodicallyFlush() throws Exception { + final long flushThreshold = randomLongBetween(100, 5000); + final long generationThreshold = randomLongBetween(1000, 5000); + final IndexSettings indexSettings = engine.config().getIndexSettings(); + final IndexMetaData indexMetaData = IndexMetaData.builder(indexSettings.getIndexMetaData()) + .settings(Settings.builder().put(indexSettings.getSettings()) + .put(IndexSettings.INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING.getKey(), generationThreshold + "b") + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), flushThreshold + "b")).build(); + indexSettings.updateIndexMetaData(indexMetaData); + engine.onSettingsChanged(); + final int numOps = scaledRandomIntBetween(100, 10_000); + for (int i = 0; i < numOps; i++) { + final long localCheckPoint = engine.getLocalCheckpointTracker().getCheckpoint(); + final long seqno = randomLongBetween(Math.max(0, localCheckPoint), localCheckPoint + 5); + final ParsedDocument doc = testParsedDocument(Long.toString(seqno), null, testDocumentWithTextField(), SOURCE, null); + engine.index(replicaIndexForDoc(doc, 1L, seqno, false)); + if (rarely() && engine.getTranslog().shouldRollGeneration()) { + engine.rollTranslogGeneration(); + } + if (rarely() || engine.shouldPeriodicallyFlush()) { + engine.flush(); + assertThat(engine.shouldPeriodicallyFlush(), equalTo(false)); + } + } + } public void testStressUpdateSameDocWhileGettingIt() throws IOException, InterruptedException { final int iters = randomIntBetween(1, 15); diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java index 3d811832d2951..7f407dd1c01d1 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/BinaryDVFieldDataTests.java @@ -68,7 +68,7 @@ public void testDocValue() throws Exception { writer.addDocument(d.rootDoc()); BytesRef bytes1 = randomBytes(); - doc = XContentFactory.jsonBuilder().startObject().field("field", bytes1).endObject(); + doc = XContentFactory.jsonBuilder().startObject().field("field", bytes1.bytes, bytes1.offset, bytes1.length).endObject(); d = mapper.parse(SourceToParse.source("test", "test", "2", BytesReference.bytes(doc), XContentType.JSON)); writer.addDocument(d.rootDoc()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java index 40fc0e81a920c..03cc183b906d3 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java @@ -34,14 +34,17 @@ import org.elasticsearch.test.geo.RandomGeoGenerator; import org.hamcrest.CoreMatchers; +import java.io.IOException; import java.util.Collection; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.mapper.GeoPointFieldMapper.Names.IGNORE_Z_VALUE; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { @@ -121,6 +124,43 @@ public void testLatLonInOneValue() throws Exception { assertThat(doc.rootDoc().getField("point"), notNullValue()); } + public void testLatLonStringWithZValue() throws Exception { + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("point").field("type", "geo_point") + .field(IGNORE_Z_VALUE.getPreferredName(), true); + String mapping = Strings.toString(xContentBuilder.endObject().endObject().endObject().endObject()); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", + new CompressedXContent(mapping)); + + ParsedDocument doc = defaultMapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("point", "1.2,1.3,10.0") + .endObject()), + XContentType.JSON)); + + assertThat(doc.rootDoc().getField("point"), notNullValue()); + } + + public void testLatLonStringWithZValueException() throws Exception { + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("point").field("type", "geo_point") + .field(IGNORE_Z_VALUE.getPreferredName(), false); + String mapping = Strings.toString(xContentBuilder.endObject().endObject().endObject().endObject()); + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse("type", + new CompressedXContent(mapping)); + + SourceToParse source = SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("point", "1.2,1.3,10.0") + .endObject()), + XContentType.JSON); + + Exception e = expectThrows(MapperParsingException.class, () -> defaultMapper.parse(source)); + assertThat(e.getCause().getMessage(), containsString("but [ignore_z_value] parameter is [false]")); + } + public void testLatLonInOneValueStored() throws Exception { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("point").field("type", "geo_point"); @@ -230,6 +270,41 @@ public void testLonLatArrayArrayStored() throws Exception { assertThat(doc.rootDoc().getFields("point").length, CoreMatchers.equalTo(4)); } + /** + * Test that accept_z_value parameter correctly parses + */ + public void testIgnoreZValue() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("location") + .field("type", "geo_point") + .field(IGNORE_Z_VALUE.getPreferredName(), "true") + .endObject().endObject() + .endObject().endObject()); + + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); + FieldMapper fieldMapper = defaultMapper.mappers().getMapper("location"); + assertThat(fieldMapper, instanceOf(GeoPointFieldMapper.class)); + + boolean ignoreZValue = ((GeoPointFieldMapper)fieldMapper).ignoreZValue().value(); + assertThat(ignoreZValue, equalTo(true)); + + // explicit false accept_z_value test + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("location") + .field("type", "geo_point") + .field(IGNORE_Z_VALUE.getPreferredName(), "false") + .endObject().endObject() + .endObject().endObject()); + + defaultMapper = createIndex("test2").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + fieldMapper = defaultMapper.mappers().getMapper("location"); + assertThat(fieldMapper, instanceOf(GeoPointFieldMapper.class)); + + ignoreZValue = ((GeoPointFieldMapper)fieldMapper).ignoreZValue().value(); + assertThat(ignoreZValue, equalTo(false)); + } + public void testMultiField() throws Exception { int numDocs = randomIntBetween(10, 100); String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("pin").startObject("properties").startObject("location") diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java index 352ef56b2315f..865d218670832 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoShapeFieldMapperTests.java @@ -35,6 +35,7 @@ import java.io.IOException; import java.util.Collection; +import static org.elasticsearch.index.mapper.GeoPointFieldMapper.Names.IGNORE_Z_VALUE; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -138,6 +139,42 @@ public void testCoerceParsing() throws IOException { assertThat(coerce, equalTo(false)); } + + /** + * Test that accept_z_value parameter correctly parses + */ + public void testIgnoreZValue() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("location") + .field("type", "geo_shape") + .field(IGNORE_Z_VALUE.getPreferredName(), "true") + .endObject().endObject() + .endObject().endObject()); + + DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser() + .parse("type1", new CompressedXContent(mapping)); + FieldMapper fieldMapper = defaultMapper.mappers().getMapper("location"); + assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); + + boolean ignoreZValue = ((GeoShapeFieldMapper)fieldMapper).ignoreZValue().value(); + assertThat(ignoreZValue, equalTo(true)); + + // explicit false accept_z_value test + mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type1") + .startObject("properties").startObject("location") + .field("type", "geo_shape") + .field(IGNORE_Z_VALUE.getPreferredName(), "false") + .endObject().endObject() + .endObject().endObject()); + + defaultMapper = createIndex("test2").mapperService().documentMapperParser().parse("type1", new CompressedXContent(mapping)); + fieldMapper = defaultMapper.mappers().getMapper("location"); + assertThat(fieldMapper, instanceOf(GeoShapeFieldMapper.class)); + + ignoreZValue = ((GeoShapeFieldMapper)fieldMapper).ignoreZValue().value(); + assertThat(ignoreZValue, equalTo(false)); + } + /** * Test that ignore_malformed parameter correctly parses */ diff --git a/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java b/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java index 204b71e82a192..4ddb80c4b0633 100644 --- a/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java @@ -410,6 +410,19 @@ public void testParseGeoPoint() throws IOException { } } + public void testParseGeoPointStringZValueError() throws IOException { + double lat = randomDouble() * 180 - 90 + randomIntBetween(-1000, 1000) * 180; + double lon = randomDouble() * 360 - 180 + randomIntBetween(-1000, 1000) * 360; + double alt = randomDouble() * 1000; + XContentBuilder json = jsonBuilder().startObject().field("foo", lat + "," + lon + "," + alt).endObject(); + XContentParser parser = createParser(json); + while (parser.currentToken() != Token.VALUE_STRING) { + parser.nextToken(); + } + Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser, new GeoPoint(), false)); + assertThat(e.getMessage(), containsString("but [ignore_z_value] parameter is [false]")); + } + public void testParseGeoPointGeohash() throws IOException { for (int i = 0; i < 100; i++) { int geoHashLength = randomIntBetween(1, GeoHashUtils.PRECISION); @@ -509,7 +522,21 @@ public void testParseGeoPointArrayTooManyValues() throws IOException { parser.nextToken(); } Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); - assertThat(e.getMessage(), is("only two values allowed")); + assertThat(e.getMessage(), is("Exception parsing coordinates: found Z value [0.0] but [ignore_z_value] parameter is [false]")); + } + + public void testParseGeoPointArray3D() throws IOException { + double lat = 90.0; + double lon = -180.0; + double elev = 0.0; + XContentBuilder json = jsonBuilder().startObject().startArray("foo").value(lon).value(lat).value(elev).endArray().endObject(); + XContentParser parser = createParser(json); + while (parser.currentToken() != Token.START_ARRAY) { + parser.nextToken(); + } + GeoPoint point = GeoUtils.parseGeoPoint(parser, new GeoPoint(), true); + assertThat(point.lat(), equalTo(lat)); + assertThat(point.lon(), equalTo(lon)); } public void testParseGeoPointArrayWrongType() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index 622a9b1acc363..2eccc0d45bbf4 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -359,29 +359,29 @@ public void testMaybeFlush() throws Exception { IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, update -> {}); assertTrue(shard.shouldPeriodicallyFlush()); final Translog translog = shard.getEngine().getTranslog(); - assertEquals(2, translog.uncommittedOperations()); + assertEquals(2, translog.stats().getUncommittedOperations()); client().prepareIndex("test", "test", "2").setSource("{}", XContentType.JSON) .setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); assertBusy(() -> { // this is async assertFalse(shard.shouldPeriodicallyFlush()); }); - assertEquals(0, translog.uncommittedOperations()); + assertEquals(0, translog.stats().getUncommittedOperations()); translog.sync(); - long size = Math.max(translog.uncommittedSizeInBytes(), Translog.DEFAULT_HEADER_SIZE_IN_BYTES + 1); - logger.info("--> current translog size: [{}] num_ops [{}] generation [{}]", translog.uncommittedSizeInBytes(), - translog.uncommittedOperations(), translog.getGeneration()); + long size = Math.max(translog.stats().getUncommittedSizeInBytes(), Translog.DEFAULT_HEADER_SIZE_IN_BYTES + 1); + logger.info("--> current translog size: [{}] num_ops [{}] generation [{}]", + translog.stats().getUncommittedSizeInBytes(), translog.stats().getUncommittedOperations(), translog.getGeneration()); client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put( IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(size, ByteSizeUnit.BYTES)) .build()).get(); client().prepareDelete("test", "test", "2").get(); - logger.info("--> translog size after delete: [{}] num_ops [{}] generation [{}]", translog.uncommittedSizeInBytes(), - translog.uncommittedOperations(), translog.getGeneration()); + logger.info("--> translog size after delete: [{}] num_ops [{}] generation [{}]", + translog.stats().getUncommittedSizeInBytes(), translog.stats().getUncommittedOperations(), translog.getGeneration()); assertBusy(() -> { // this is async - logger.info("--> translog size on iter : [{}] num_ops [{}] generation [{}]", translog.uncommittedSizeInBytes(), - translog.uncommittedOperations(), translog.getGeneration()); + logger.info("--> translog size on iter : [{}] num_ops [{}] generation [{}]", + translog.stats().getUncommittedSizeInBytes(), translog.stats().getUncommittedOperations(), translog.getGeneration()); assertFalse(shard.shouldPeriodicallyFlush()); }); - assertEquals(0, translog.uncommittedOperations()); + assertEquals(0, translog.stats().getUncommittedOperations()); } public void testMaybeRollTranslogGeneration() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index d5f97abaccabd..02f8107c545b0 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -21,7 +21,6 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.document.Field; import org.apache.lucene.document.NumericDocValuesField; @@ -503,10 +502,10 @@ public void testUncommittedOperations() throws Exception { translog.rollGeneration(); operationsInLastGen = 0; } - assertThat(translog.uncommittedOperations(), equalTo(uncommittedOps)); + assertThat(translog.stats().getUncommittedOperations(), equalTo(uncommittedOps)); if (frequently()) { markCurrentGenAsCommitted(translog); - assertThat(translog.uncommittedOperations(), equalTo(operationsInLastGen)); + assertThat(translog.stats().getUncommittedOperations(), equalTo(operationsInLastGen)); uncommittedOps = operationsInLastGen; } } @@ -922,7 +921,7 @@ public void doRun() throws BrokenBarrierException, InterruptedException, IOExcep @Override public void onFailure(Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("--> writer [{}] had an error", threadName), e); + logger.error(() -> new ParameterizedMessage("--> writer [{}] had an error", threadName), e); errors.add(e); } }, threadName); @@ -937,7 +936,7 @@ public void onFailure(Exception e) { @Override public void onFailure(Exception e) { - logger.error((Supplier) () -> new ParameterizedMessage("--> reader [{}] had an error", threadId), e); + logger.error(() -> new ParameterizedMessage("--> reader [{}] had an error", threadId), e); errors.add(e); try { closeRetentionLock(); @@ -2518,7 +2517,7 @@ public void testRollGeneration() throws Exception { long minGenForRecovery = randomLongBetween(generation, generation + rolls); commit(translog, minGenForRecovery, generation + rolls); assertThat(translog.currentFileGeneration(), equalTo(generation + rolls)); - assertThat(translog.uncommittedOperations(), equalTo(0)); + assertThat(translog.stats().getUncommittedOperations(), equalTo(0)); if (longRetention) { for (int i = 0; i <= rolls; i++) { assertFileIsPresent(translog, generation + i); diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index e4d73ce0f41ea..6079a9104d3db 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -110,8 +110,7 @@ public void testRandomClusterStateUpdates() { state = randomlyUpdateClusterState(state, clusterStateServiceMap, MockIndicesService::new); } catch (AssertionError error) { ClusterState finalState = state; - logger.error((org.apache.logging.log4j.util.Supplier) () -> - new ParameterizedMessage("failed to random change state. last good state: \n{}", finalState), error); + logger.error(() -> new ParameterizedMessage("failed to random change state. last good state: \n{}", finalState), error); throw error; } } @@ -125,7 +124,7 @@ public void testRandomClusterStateUpdates() { try { indicesClusterStateService.applyClusterState(event); } catch (AssertionError error) { - logger.error((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + logger.error(new ParameterizedMessage( "failed to apply change on [{}].\n *** Previous state ***\n{}\n *** New state ***\n{}", node, event.previousState(), event.state()), error); throw error; diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index a496664c0260b..49e557c3dde78 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -306,7 +306,7 @@ public void testShouldFlushAfterPeerRecovery() throws Exception { try (ReplicationGroup shards = createGroup(0)) { shards.startAll(); int numDocs = shards.indexDocs(between(10, 100)); - final long translogSizeOnPrimary = shards.getPrimary().getTranslog().uncommittedSizeInBytes(); + final long translogSizeOnPrimary = shards.getPrimary().translogStats().getUncommittedSizeInBytes(); shards.flush(); final IndexShard replica = shards.addReplica(); diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java index e470c5028aa8f..916fdee213695 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java @@ -36,9 +36,16 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; +import org.elasticsearch.persistent.decider.EnableAssignmentDecider; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; import java.util.ArrayList; import java.util.Arrays; @@ -52,14 +59,41 @@ import static org.elasticsearch.persistent.PersistentTasksClusterService.needsReassignment; import static org.elasticsearch.persistent.PersistentTasksClusterService.persistentTasksChanged; import static org.elasticsearch.persistent.PersistentTasksExecutor.NO_NODE_FOUND; +import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -import static org.mockito.Mockito.mock; public class PersistentTasksClusterServiceTests extends ESTestCase { + /** Needed by {@link ClusterService} **/ + private static ThreadPool threadPool; + /** Needed by {@link PersistentTasksClusterService} **/ + private ClusterService clusterService; + + @BeforeClass + public static void setUpThreadPool() { + threadPool = new TestThreadPool(PersistentTasksClusterServiceTests.class.getSimpleName()); + } + + @Before + public void setUp() throws Exception { + super.setUp(); + clusterService = createClusterService(threadPool); + } + + @AfterClass + public static void tearDownThreadPool() throws Exception { + terminate(threadPool); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + clusterService.close(); + } + public void testReassignmentRequired() { final PersistentTasksClusterService service = createService((params, clusterState) -> "never_assign".equals(((TestParams) params).getTestParam()) ? NO_NODE_FOUND : randomNodeAssignment(clusterState.nodes()) @@ -81,6 +115,55 @@ public void testReassignmentRequired() { } } + public void testReassignmentRequiredOnMetadataChanges() { + EnableAssignmentDecider.Allocation allocation = randomFrom(EnableAssignmentDecider.Allocation.values()); + + DiscoveryNodes nodes = DiscoveryNodes.builder() + .add(new DiscoveryNode("_node", buildNewFakeTransportAddress(), Version.CURRENT)) + .localNodeId("_node") + .masterNodeId("_node") + .build(); + + boolean unassigned = randomBoolean(); + PersistentTasksCustomMetaData tasks = PersistentTasksCustomMetaData.builder() + .addTask("_task_1", TestPersistentTasksExecutor.NAME, null, new Assignment(unassigned ? null : "_node", "_reason")) + .build(); + + MetaData metaData = MetaData.builder() + .putCustom(PersistentTasksCustomMetaData.TYPE, tasks) + .persistentSettings(Settings.builder() + .put(EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey(), allocation.toString()) + .build()) + .build(); + + ClusterState previous = ClusterState.builder(new ClusterName("_name")) + .nodes(nodes) + .metaData(metaData) + .build(); + + ClusterState current; + + final boolean changed = randomBoolean(); + if (changed) { + allocation = randomValueOtherThan(allocation, () -> randomFrom(EnableAssignmentDecider.Allocation.values())); + + current = ClusterState.builder(previous) + .metaData(MetaData.builder(previous.metaData()) + .persistentSettings(Settings.builder() + .put(EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey(), allocation.toString()) + .build()) + .build()) + .build(); + } else { + current = ClusterState.builder(previous).build(); + } + + final ClusterChangedEvent event = new ClusterChangedEvent("test", current, previous); + + final PersistentTasksClusterService service = createService((params, clusterState) -> randomNodeAssignment(clusterState.nodes())); + assertThat(dumpEvent(event), service.shouldReassignPersistentTasks(event), equalTo(changed && unassigned)); + } + public void testReassignTasksWithNoTasks() { ClusterState clusterState = initialState(); assertThat(reassign(clusterState).metaData().custom(PersistentTasksCustomMetaData.TYPE), nullValue()); @@ -527,7 +610,6 @@ private DiscoveryNode newNode(String nodeId) { Version.CURRENT); } - private ClusterState initialState() { MetaData.Builder metaData = MetaData.builder(); RoutingTable.Builder routingTable = RoutingTable.builder(); @@ -558,7 +640,7 @@ private void changeRoutingTable(MetaData.Builder metaData, RoutingTable.Builder } /** Creates a PersistentTasksClusterService with a single PersistentTasksExecutor implemented by a BiFunction **/ - static

    PersistentTasksClusterService createService(final BiFunction fn) { + private

    PersistentTasksClusterService createService(final BiFunction fn) { PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(Settings.EMPTY, singleton(new PersistentTasksExecutor

    (Settings.EMPTY, TestPersistentTasksExecutor.NAME, null) { @Override @@ -571,6 +653,6 @@ protected void nodeOperation(AllocatedPersistentTask task, P params, Task.Status throw new UnsupportedOperationException(); } })); - return new PersistentTasksClusterService(Settings.EMPTY, registry, mock(ClusterService.class)); + return new PersistentTasksClusterService(Settings.EMPTY, registry, clusterService); } } diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java new file mode 100644 index 0000000000000..356e518198c52 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java @@ -0,0 +1,134 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.persistent; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.util.function.Predicate; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; + +public abstract class PersistentTasksDecidersTestCase extends ESTestCase { + + /** Needed by {@link ClusterService} **/ + private static ThreadPool threadPool; + /** Needed by {@link PersistentTasksClusterService} **/ + private ClusterService clusterService; + + private PersistentTasksClusterService persistentTasksClusterService; + + @BeforeClass + public static void setUpThreadPool() { + threadPool = new TestThreadPool(getTestClass().getSimpleName()); + } + + @Before + public void setUp() throws Exception { + super.setUp(); + clusterService = createClusterService(threadPool); + PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(clusterService.getSettings(), emptyList()) { + @Override + public PersistentTasksExecutor getPersistentTaskExecutorSafe(String taskName) { + return new PersistentTasksExecutor(clusterService.getSettings(), taskName, null) { + @Override + protected void nodeOperation(AllocatedPersistentTask task, Params params, Task.Status status) { + logger.debug("Executing task {}", task); + } + }; + } + }; + persistentTasksClusterService = new PersistentTasksClusterService(clusterService.getSettings(), registry, clusterService); + } + + @AfterClass + public static void tearDownThreadPool() throws Exception { + terminate(threadPool); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + clusterService.close(); + } + + protected ClusterState reassign(final ClusterState clusterState) { + return persistentTasksClusterService.reassignTasks(clusterState); + } + + protected void updateSettings(final Settings settings) { + ClusterSettings clusterSettings = clusterService.getClusterSettings(); + Settings.Builder updated = Settings.builder(); + clusterSettings.updateDynamicSettings(settings, updated, Settings.builder(), getTestClass().getName()); + clusterSettings.applySettings(updated.build()); + } + + protected static ClusterState createClusterStateWithTasks(final int nbNodes, final int nbTasks) { + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); + for (int i = 0; i < nbNodes; i++) { + nodes.add(new DiscoveryNode("_node_" + i, buildNewFakeTransportAddress(), Version.CURRENT)); + } + + PersistentTasksCustomMetaData.Builder tasks = PersistentTasksCustomMetaData.builder(); + for (int i = 0; i < nbTasks; i++) { + tasks.addTask("_task_" + i, "test", null, new PersistentTasksCustomMetaData.Assignment(null, "initialized")); + } + + MetaData metaData = MetaData.builder() + .putCustom(PersistentTasksCustomMetaData.TYPE, tasks.build()) + .build(); + + return ClusterState.builder(ClusterName.DEFAULT).nodes(nodes).metaData(metaData).build(); + } + + /** Asserts that the given cluster state contains nbTasks tasks that are assigned **/ + protected static void assertNbAssignedTasks(final long nbTasks, final ClusterState clusterState) { + assertPersistentTasks(nbTasks, clusterState, PersistentTasksCustomMetaData.PersistentTask::isAssigned); + } + + /** Asserts that the given cluster state contains nbTasks tasks that are NOT assigned **/ + protected static void assertNbUnassignedTasks(final long nbTasks, final ClusterState clusterState) { + assertPersistentTasks(nbTasks, clusterState, task -> task.isAssigned() == false); + } + + /** Asserts that the cluster state contains nbTasks tasks that verify the given predicate **/ + protected static void assertPersistentTasks(final long nbTasks, + final ClusterState clusterState, + final Predicate predicate) { + PersistentTasksCustomMetaData tasks = clusterState.metaData().custom(PersistentTasksCustomMetaData.TYPE); + assertNotNull("Persistent tasks must be not null", tasks); + assertEquals(nbTasks, tasks.tasks().stream().filter(predicate).count()); + } +} diff --git a/server/src/test/java/org/elasticsearch/persistent/decider/AssignmentDecisionTests.java b/server/src/test/java/org/elasticsearch/persistent/decider/AssignmentDecisionTests.java new file mode 100644 index 0000000000000..3fa580e726a83 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/persistent/decider/AssignmentDecisionTests.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.persistent.decider; + +import org.elasticsearch.test.ESTestCase; + +public class AssignmentDecisionTests extends ESTestCase { + + public void testConstantsTypes() { + assertEquals(AssignmentDecision.Type.YES, AssignmentDecision.YES.getType()); + } + + public void testResolveFromType() { + final AssignmentDecision.Type expected = randomFrom(AssignmentDecision.Type.values()); + assertEquals(expected, AssignmentDecision.Type.resolve(expected.toString())); + } +} diff --git a/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java b/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java new file mode 100644 index 0000000000000..15d12fb1ce932 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java @@ -0,0 +1,173 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.persistent.decider; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.persistent.PersistentTaskParams; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.persistent.TestPersistentTasksPlugin; +import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams; +import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Collection; +import java.util.concurrent.CountDownLatch; + +import static java.util.Collections.singletonList; +import static org.elasticsearch.persistent.decider.EnableAssignmentDecider.Allocation; +import static org.elasticsearch.persistent.decider.EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; + +@ESIntegTestCase.ClusterScope(minNumDataNodes = 1) +public class EnableAssignmentDeciderIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return singletonList(TestPersistentTasksPlugin.class); + } + + @Override + protected Collection> transportClientPlugins() { + return nodePlugins(); + } + + @Override + protected boolean ignoreExternalCluster() { + return true; + } + + /** + * Test that the {@link EnableAssignmentDecider#CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING} setting correctly + * prevents persistent tasks to be assigned after a cluster restart. + */ + public void testEnableAssignmentAfterRestart() throws Exception { + final int numberOfTasks = randomIntBetween(1, 10); + logger.trace("creating {} persistent tasks", numberOfTasks); + + final CountDownLatch latch = new CountDownLatch(numberOfTasks); + for (int i = 0; i < numberOfTasks; i++) { + PersistentTasksService service = internalCluster().getInstance(PersistentTasksService.class); + service.startPersistentTask("task_" + i, TestPersistentTasksExecutor.NAME, randomTaskParams(), + new ActionListener>() { + @Override + public void onResponse(PersistentTask task) { + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + latch.countDown(); + } + }); + } + latch.await(); + + ClusterService clusterService = internalCluster().clusterService(internalCluster().getMasterName()); + PersistentTasksCustomMetaData tasks = clusterService.state().getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + assertEquals(numberOfTasks, tasks.tasks().stream().filter(t -> TestPersistentTasksExecutor.NAME.equals(t.getTaskName())).count()); + + logger.trace("waiting for the tasks to be running"); + assertBusy(() -> { + ListTasksResponse listTasks = client().admin().cluster().prepareListTasks() + .setActions(TestPersistentTasksExecutor.NAME + "[c]") + .get(); + assertThat(listTasks.getTasks().size(), equalTo(numberOfTasks)); + }); + + try { + logger.trace("disable persistent tasks assignment"); + disablePersistentTasksAssignment(); + + logger.trace("restart the cluster"); + internalCluster().fullRestart(); + ensureYellow(); + + logger.trace("persistent tasks assignment is still disabled"); + assertEnableAssignmentSetting(Allocation.NONE); + + logger.trace("persistent tasks are not assigned"); + tasks = internalCluster().clusterService().state().getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + assertEquals(numberOfTasks, tasks.tasks().stream() + .filter(t -> TestPersistentTasksExecutor.NAME.equals(t.getTaskName())) + .filter(t -> t.isAssigned() == false) + .count()); + + ListTasksResponse runningTasks = client().admin().cluster().prepareListTasks() + .setActions(TestPersistentTasksExecutor.NAME + "[c]") + .get(); + assertThat(runningTasks.getTasks().size(), equalTo(0)); + + logger.trace("enable persistent tasks assignment"); + if (randomBoolean()) { + enablePersistentTasksAssignment(); + } else { + resetPersistentTasksAssignment(); + } + + assertBusy(() -> { + ListTasksResponse listTasks = client().admin().cluster().prepareListTasks() + .setActions(TestPersistentTasksExecutor.NAME + "[c]") + .get(); + assertThat(listTasks.getTasks().size(), equalTo(numberOfTasks)); + }); + + } finally { + resetPersistentTasksAssignment(); + } + } + + private void assertEnableAssignmentSetting(final Allocation expected) { + ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().clear().setMetaData(true).get(); + Settings settings = clusterStateResponse.getState().getMetaData().settings(); + + String value = settings.get(CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey()); + assertThat(Allocation.fromString(value), equalTo(expected)); + } + + private void disablePersistentTasksAssignment() { + Settings.Builder settings = Settings.builder().put(CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.NONE); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings)); + } + + private void enablePersistentTasksAssignment() { + Settings.Builder settings = Settings.builder().put(CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.ALL); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings)); + } + + private void resetPersistentTasksAssignment() { + Settings.Builder settings = Settings.builder().putNull(CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey()); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings)); + } + + /** Returns a random task parameter **/ + private static PersistentTaskParams randomTaskParams() { + if (randomBoolean()) { + return null; + } + return new TestParams(randomAlphaOfLength(10)); + } +} diff --git a/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderTests.java b/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderTests.java new file mode 100644 index 0000000000000..7aedde1ab9b60 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderTests.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.persistent.decider; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.persistent.PersistentTasksDecidersTestCase; + +public class EnableAssignmentDeciderTests extends PersistentTasksDecidersTestCase { + + public void testAllocationValues() { + final String all = randomFrom("all", "All", "ALL"); + assertEquals(EnableAssignmentDecider.Allocation.ALL, EnableAssignmentDecider.Allocation.fromString(all)); + + final String none = randomFrom("none", "None", "NONE"); + assertEquals(EnableAssignmentDecider.Allocation.NONE, EnableAssignmentDecider.Allocation.fromString(none)); + } + + public void testEnableAssignment() { + final int nbTasks = randomIntBetween(1, 10); + final int nbNodes = randomIntBetween(1, 5); + final EnableAssignmentDecider.Allocation allocation = randomFrom(EnableAssignmentDecider.Allocation.values()); + + Settings settings = Settings.builder() + .put(EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey(), allocation.toString()) + .build(); + updateSettings(settings); + + ClusterState clusterState = reassign(createClusterStateWithTasks(nbNodes, nbTasks)); + if (allocation == EnableAssignmentDecider.Allocation.ALL) { + assertNbAssignedTasks(nbTasks, clusterState); + } else { + assertNbUnassignedTasks(nbTasks, clusterState); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java index ffebd804c609c..e99fb4cc1f258 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java @@ -57,7 +57,6 @@ public void testRestRecoveryAction() { final int totalShards = randomIntBetween(1, 32); final int successfulShards = Math.max(0, totalShards - randomIntBetween(1, 2)); final int failedShards = totalShards - successfulShards; - final boolean detailed = randomBoolean(); final Map> shardRecoveryStates = new HashMap<>(); final List recoveryStates = new ArrayList<>(); @@ -115,7 +114,6 @@ public void testRestRecoveryAction() { totalShards, successfulShards, failedShards, - detailed, shardRecoveryStates, shardFailures); final Table table = action.buildRecoveryTable(null, response); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/adjacency/InternalAdjacencyMatrixTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/adjacency/InternalAdjacencyMatrixTests.java index 742e769ed4082..2ba97251b313c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/adjacency/InternalAdjacencyMatrixTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/adjacency/InternalAdjacencyMatrixTests.java @@ -82,8 +82,10 @@ protected void assertReduced(InternalAdjacencyMatrix reduced, List expectedCounts = new TreeMap<>(); for (InternalAdjacencyMatrix input : inputs) { for (InternalAdjacencyMatrix.InternalBucket bucket : input.getBuckets()) { - expectedCounts.compute(bucket.getKeyAsString(), + if (bucket.getDocCount() > 0) { + expectedCounts.compute(bucket.getKeyAsString(), (key, oldValue) -> (oldValue == null ? 0 : oldValue) + bucket.getDocCount()); + } } } final Map actualCounts = new TreeMap<>(); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java index 0fcf794ee1d83..e277902ace24d 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.settings.Settings; @@ -482,7 +481,7 @@ private void assertShardExecutionState(SearchResponse response, int expectedFail ShardSearchFailure[] failures = response.getShardFailures(); if (failures.length != expectedFailures) { for (ShardSearchFailure failure : failures) { - logger.error((Supplier) () -> new ParameterizedMessage("Shard Failure: {}", failure), failure.getCause()); + logger.error(new ParameterizedMessage("Shard Failure: {}", failure), failure.getCause()); } fail("Unexpected shard failures!"); } diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java b/server/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java index 0038ef368c150..3b1002a6f68c4 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.geo; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.query.SpatialArgs; @@ -478,7 +477,7 @@ protected static boolean testRelationSupport(SpatialOperation relation) { final SpatialOperation finalRelation = relation; ESLoggerFactory .getLogger(GeoFilterIT.class.getName()) - .info((Supplier) () -> new ParameterizedMessage("Unsupported spatial operation {}", finalRelation), e); + .info(() -> new ParameterizedMessage("Unsupported spatial operation {}", finalRelation), e); return false; } }