diff --git a/.ci/jobs.t/elastic+elasticsearch+multijob+packaging-tests-unix.yml b/.ci/jobs.t/elastic+elasticsearch+multijob+packaging-tests-unix.yml index 701e7f32d103e..49a335d23d7e6 100644 --- a/.ci/jobs.t/elastic+elasticsearch+multijob+packaging-tests-unix.yml +++ b/.ci/jobs.t/elastic+elasticsearch+multijob+packaging-tests-unix.yml @@ -25,6 +25,7 @@ - sles-15-packaging - ubuntu-18.04-packaging - ubuntu-20.04-packaging + - rocky-linux-8-packaging builders: - inject: properties-file: '.ci/java-versions.properties' diff --git a/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-unix.yml b/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-unix.yml index b0941bce0c15b..0fe2ae5fa1d3c 100644 --- a/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-unix.yml +++ b/.ci/jobs.t/elastic+elasticsearch+multijob+platform-support-unix.yml @@ -25,6 +25,7 @@ - "sles-15&&immutable" - "ubuntu-18.04&&immutable" - "ubuntu-20.04&&immutable" + - "rocky-linux-8&&immutable" builders: - inject: properties-file: '.ci/java-versions.properties' diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml index 037f0bca8c3a1..6e1d7d9233424 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml @@ -44,6 +44,7 @@ - sles-15-packaging - ubuntu-18.04-packaging - ubuntu-20.04-packaging + - rocky-linux-8-packaging - axis: type: user-defined name: PACKAGING_TASK diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-fips.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-fips.yml index 766b158b59d7a..0eb7525c613e3 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-fips.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-fips.yml @@ -31,7 +31,7 @@ properties-file: '.ci/java-versions.properties' properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA + RUNTIME_JAVA_HOME=$HOME/.java/java11 JAVA15_HOME=$HOME/.java/openjdk15 - shell: | #!/usr/local/bin/runbld --redirect-stderr diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-fips.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-fips.yml index f22125ba5f51d..dfd7ebaf62bd2 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-fips.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-fips.yml @@ -31,7 +31,7 @@ properties-file: '.ci/java-versions.properties' properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA + RUNTIME_JAVA_HOME=$HOME/.java/java11 JAVA15_HOME=$HOME/.java/openjdk15 - shell: | #!/usr/local/bin/runbld --redirect-stderr diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/search/fetch/subphase/FetchSourcePhaseBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/fetch/subphase/FetchSourcePhaseBenchmark.java new file mode 100644 index 0000000000000..b313b0f57d3c2 --- /dev/null +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/search/fetch/subphase/FetchSourcePhaseBenchmark.java @@ -0,0 +1,138 @@ +package org.elasticsearch.benchmark.search.fetch.subphase; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.support.filtering.FilterPath; +import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.search.fetch.subphase.FetchSourcePhase; +import org.elasticsearch.search.lookup.SourceLookup; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; + +import java.io.IOException; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +@Fork(1) +@Warmup(iterations = 5) +@Measurement(iterations = 5) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.NANOSECONDS) +@State(Scope.Benchmark) +public class FetchSourcePhaseBenchmark { + private BytesReference sourceBytes; + private FetchSourceContext fetchContext; + private Set includesSet; + private Set excludesSet; + private FilterPath[] includesFilters; + private FilterPath[] excludesFilters; + + @Param({ "tiny", "short", "one_4k_field", "one_4m_field" }) + private String source; + @Param({ "message" }) + private String includes; + @Param({ "" }) + private String excludes; + + @Setup + public void setup() throws IOException { + switch (source) { + case "tiny": + sourceBytes = new BytesArray("{\"message\": \"short\"}"); + break; + case "short": + sourceBytes = read300BytesExample(); + break; + case "one_4k_field": + sourceBytes = buildBigExample("huge".repeat(1024)); + break; + case "one_4m_field": + sourceBytes = buildBigExample("huge".repeat(1024 * 1024)); + break; + default: + throw new IllegalArgumentException("Unknown source [" + source + "]"); + } + fetchContext = new FetchSourceContext( + true, + Strings.splitStringByCommaToArray(includes), + Strings.splitStringByCommaToArray(excludes) + ); + includesSet = Set.of(fetchContext.includes()); + excludesSet = Set.of(fetchContext.excludes()); + includesFilters = FilterPath.compile(Set.of(fetchContext.includes())); + excludesFilters = FilterPath.compile(Set.of(fetchContext.excludes())); + } + + private BytesReference read300BytesExample() throws IOException { + return Streams.readFully(FetchSourcePhaseBenchmark.class.getResourceAsStream("300b_example.json")); + } + + private BytesReference buildBigExample(String extraText) throws IOException { + String bigger = read300BytesExample().utf8ToString(); + bigger = "{\"huge\": \"" + extraText + "\"," + bigger.substring(1); + return new BytesArray(bigger); + } + + @Benchmark + public BytesReference filterObjects() throws IOException { + SourceLookup lookup = new SourceLookup(); + lookup.setSource(sourceBytes); + Object value = lookup.filter(fetchContext); + return FetchSourcePhase.objectToBytes(value, XContentType.JSON, Math.min(1024, lookup.internalSourceRef().length())); + } + + @Benchmark + public BytesReference filterXContentOnParser() throws IOException { + BytesStreamOutput streamOutput = new BytesStreamOutput(Math.min(1024, sourceBytes.length())); + XContentBuilder builder = new XContentBuilder(XContentType.JSON.xContent(), streamOutput); + try ( + XContentParser parser = XContentType.JSON.xContent() + .createParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + sourceBytes.streamInput(), + includesFilters, + excludesFilters + ) + ) { + builder.copyCurrentStructure(parser); + return BytesReference.bytes(builder); + } + } + + @Benchmark + public BytesReference filterXContentOnBuilder() throws IOException { + BytesStreamOutput streamOutput = new BytesStreamOutput(Math.min(1024, sourceBytes.length())); + XContentBuilder builder = new XContentBuilder( + XContentType.JSON.xContent(), + streamOutput, + includesSet, + excludesSet, + XContentType.JSON.toParsedMediaType() + ); + try ( + XContentParser parser = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, sourceBytes.streamInput()) + ) { + builder.copyCurrentStructure(parser); + return BytesReference.bytes(builder); + } + } +} diff --git a/benchmarks/src/main/resources/org/elasticsearch/benchmark/search/fetch/subphase/300b_example.json b/benchmarks/src/main/resources/org/elasticsearch/benchmark/search/fetch/subphase/300b_example.json new file mode 100644 index 0000000000000..8112244c213e8 --- /dev/null +++ b/benchmarks/src/main/resources/org/elasticsearch/benchmark/search/fetch/subphase/300b_example.json @@ -0,0 +1,20 @@ +{ + "@timestamp": "2099-11-15T14:12:12", + "http": { + "request": { + "method": "get" + }, + "response": { + "bytes": 1070000, + "status_code": 200 + }, + "version": "1.1" + }, + "message": "GET /search HTTP/1.1 200 1070000", + "source": { + "ip": "192.168.0.1" + }, + "user": { + "id": "user" + } +} \ No newline at end of file diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/LicenseHeadersTask.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/LicenseHeadersTask.java index e880d07e79148..4f07187f7fc38 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/LicenseHeadersTask.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/LicenseHeadersTask.java @@ -110,7 +110,7 @@ public void setExcludes(List excludes) { * Allowed license families for this project. */ @Input - private List approvedLicenses = new ArrayList(Arrays.asList("SSPL+Elastic License", "Generated", "Vendored")); + private List approvedLicenses = new ArrayList(Arrays.asList("SSPL+Elastic License", "Generated", "Vendored", "Apache LZ4-Java")); /** * Files that should be excluded from the license header check. Use with extreme care, only in situations where the license on the * source file is compatible with the codebase but we do not want to add the license to the list of approved headers (to avoid the @@ -154,6 +154,8 @@ public void runRat() { matchers.add(subStringMatcher("BSD4 ", "Original BSD License (with advertising clause)", "All advertising materials")); // Apache matchers.add(subStringMatcher("AL ", "Apache", "Licensed to Elasticsearch B.V. under one or more contributor")); + // Apache lz4-java + matchers.add(subStringMatcher("ALLZ4", "Apache LZ4-Java", "Copyright 2020 Adrien Grand and the lz4-java contributors")); // Generated resources matchers.add(subStringMatcher("GEN ", "Generated", "ANTLR GENERATED CODE")); // Vendored Code diff --git a/build-tools-internal/build.gradle b/build-tools-internal/build.gradle index b0363dc795eb0..83684976df81d 100644 --- a/build-tools-internal/build.gradle +++ b/build-tools-internal/build.gradle @@ -102,9 +102,9 @@ gradlePlugin { id = 'elasticsearch.java' implementationClass = 'org.elasticsearch.gradle.internal.ElasticsearchJavaPlugin' } - javaRestTest { - id = 'elasticsearch.java-rest-test' - implementationClass = 'org.elasticsearch.gradle.internal.test.rest.JavaRestTestPlugin' + internalJavaRestTest { + id = 'elasticsearch.internal-java-rest-test' + implementationClass = 'org.elasticsearch.gradle.internal.test.rest.InternalJavaRestTestPlugin' } jdkDownload { id = 'elasticsearch.jdk-download' diff --git a/build-tools-internal/src/main/groovy/elasticsearch.formatting.gradle b/build-tools-internal/src/main/groovy/elasticsearch.formatting.gradle index 1abf66430b937..205e5e3229394 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.formatting.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.formatting.gradle @@ -54,6 +54,7 @@ def projectPathsToExclude = [ ':libs:elasticsearch-dissect', ':libs:elasticsearch-geo', ':libs:elasticsearch-grok', + ':libs:elasticsearch-lz4', ':libs:elasticsearch-nio', ':libs:elasticsearch-plugin-classloader', ':libs:elasticsearch-secure-sm', diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/ShellRetry.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/ShellRetry.java index 5286a3af24619..e3193f7aea5d0 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/ShellRetry.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/docker/ShellRetry.java @@ -8,6 +8,9 @@ package org.elasticsearch.gradle.internal.docker; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + /** * The methods in this class take a shell command and wrap it in retry logic, so that our * Docker builds can be more robust in the face of transient errors e.g. network issues. @@ -20,7 +23,11 @@ static String loop(String name, String command) { static String loop(String name, String command, int indentSize, String exitKeyword) { String indent = " ".repeat(indentSize); - StringBuilder commandWithRetry = new StringBuilder("for iter in {1..10}; do \n"); + // bash understands the `{1..10}` syntax, but other shells don't e.g. the default in Alpine Linux. + // We therefore use an explicit sequence. + String retrySequence = IntStream.rangeClosed(1, 10).mapToObj(String::valueOf).collect(Collectors.joining(" ")); + + StringBuilder commandWithRetry = new StringBuilder("for iter in " + retrySequence + "; do \n"); commandWithRetry.append(indent).append(" ").append(command).append(" && \n"); commandWithRetry.append(indent).append(" exit_code=0 && break || \n"); commandWithRetry.append(indent); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/JavaRestTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/InternalJavaRestTestPlugin.java similarity index 95% rename from build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/JavaRestTestPlugin.java rename to build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/InternalJavaRestTestPlugin.java index 21c4bee730e62..dd8854ebfec54 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/JavaRestTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/InternalJavaRestTestPlugin.java @@ -21,7 +21,7 @@ /** * Apply this plugin to run the Java based REST tests. */ -public class JavaRestTestPlugin implements Plugin { +public class InternalJavaRestTestPlugin implements Plugin { public static final String SOURCE_SET_NAME = "javaRestTest"; diff --git a/build-tools-internal/src/main/resources/checkstyle.xml b/build-tools-internal/src/main/resources/checkstyle.xml index 46f595349ff5a..ae330ff3dbdbe 100644 --- a/build-tools-internal/src/main/resources/checkstyle.xml +++ b/build-tools-internal/src/main/resources/checkstyle.xml @@ -95,6 +95,17 @@ lines up with the directory structure. --> + + + + snapshot start time collided"); + assertAcked(clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName).get()); + } else { + assertTrue(forbiddenStartTimes.add(snapshotInfo.startTime())); + return snapshotInfo; + } + } + } + + private List allAfterStartTimeAscending(long timestamp) throws IOException { + final Request request = baseGetSnapshotsRequest("*"); + request.addParameter("sort", GetSnapshotsRequest.SortBy.START_TIME.toString()); + request.addParameter("from_sort_value", String.valueOf(timestamp)); + final Response response = getRestClient().performRequest(request); + return readSnapshotInfos(response).getSnapshots(); + } + + private List allBeforeStartTimeDescending(long timestamp) throws IOException { + final Request request = baseGetSnapshotsRequest("*"); + request.addParameter("sort", GetSnapshotsRequest.SortBy.START_TIME.toString()); + request.addParameter("from_sort_value", String.valueOf(timestamp)); + request.addParameter("order", SortOrder.DESC.toString()); + final Response response = getRestClient().performRequest(request); + return readSnapshotInfos(response).getSnapshots(); + } + private static List getAllSnapshotsForPolicies(String... policies) throws IOException { final Request requestWithPolicy = new Request(HttpGet.METHOD_NAME, "/_snapshot/*/*"); requestWithPolicy.addParameter("slm_policy_filter", Strings.arrayToCommaDelimitedString(policies)); diff --git a/qa/smoke-test-multinode/build.gradle b/qa/smoke-test-multinode/build.gradle index 1830ef309b4fb..ae2ac38cb21e7 100644 --- a/qa/smoke-test-multinode/build.gradle +++ b/qa/smoke-test-multinode/build.gradle @@ -6,6 +6,8 @@ * Side Public License, v 1. */ +import org.elasticsearch.gradle.internal.info.BuildParams + apply plugin: 'elasticsearch.internal-testclusters' apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -25,6 +27,9 @@ testClusters.matching { it.name == "integTest" }.configureEach { testClusters.all { setting 'xpack.security.enabled', 'false' + if (BuildParams.isSnapshotBuild() == false) { + systemProperty 'es.index_mode_feature_flag_registered', 'true' + } } tasks.named("integTest").configure { diff --git a/qa/system-indices/build.gradle b/qa/system-indices/build.gradle index 6762b90588c17..1e082cf5e2af3 100644 --- a/qa/system-indices/build.gradle +++ b/qa/system-indices/build.gradle @@ -7,7 +7,7 @@ */ apply plugin: 'elasticsearch.internal-es-plugin' -apply plugin: 'elasticsearch.java-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' esplugin { name 'system-indices-qa' @@ -25,5 +25,6 @@ tasks.named("javaRestTest").configure { testClusters.all { testDistribution = 'DEFAULT' setting 'xpack.security.enabled', 'true' + setting 'xpack.security.autoconfiguration.enabled', 'false' user username: 'rest_user', password: 'rest-user-password', role: 'superuser' } diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 7d6059f035ba3..a208a103b0726 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -1,3 +1,4 @@ +import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.OS apply plugin: 'elasticsearch.build' @@ -35,6 +36,9 @@ artifacts { testClusters.all { module ':modules:mapper-extras' + if (BuildParams.isSnapshotBuild() == false) { + systemProperty 'es.index_mode_feature_flag_registered', 'true' + } } tasks.named("test").configure { enabled = false } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete.json index 252ba75473f68..d066c745b94ac 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.delete.json @@ -51,8 +51,8 @@ "none", "all" ], - "default":"open", - "description":"Whether wildcard expressions should get expanded to open or closed indices (default: open)" + "default":"open,closed", + "description":"Whether wildcard expressions should get expanded to open, closed, or hidden indices" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.put_trained_model_vocabulary.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.put_trained_model_vocabulary.json new file mode 100644 index 0000000000000..061e4ced23831 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.put_trained_model_vocabulary.json @@ -0,0 +1,34 @@ +{ + "ml.put_trained_model_vocabulary":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/put-trained-model-vocabulary.html", + "description":"Creates a trained model vocabulary" + }, + "stability":"experimental", + "visibility":"public", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_ml/trained_models/{model_id}/vocabulary", + "methods":[ + "PUT" + ], + "parts":{ + "model_id":{ + "type":"string", + "description":"The ID of the trained model for this vocabulary" + } + } + } + ] + }, + "body":{ + "description":"The trained model vocabulary", + "required":true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.clear_metering_archive.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.clear_repositories_metering_archive.json similarity index 95% rename from rest-api-spec/src/main/resources/rest-api-spec/api/nodes.clear_metering_archive.json rename to rest-api-spec/src/main/resources/rest-api-spec/api/nodes.clear_repositories_metering_archive.json index 4a7c6e3c0d36c..3edcb98e8b2e3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.clear_metering_archive.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.clear_repositories_metering_archive.json @@ -1,5 +1,5 @@ { - "nodes.clear_metering_archive":{ + "nodes.clear_repositories_metering_archive":{ "documentation":{ "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/clear-repositories-metering-archive-api.html", "description":"Removes the archived repositories metering information present in the cluster." diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.get_metering_info.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.get_repositories_metering_info.json similarity index 94% rename from rest-api-spec/src/main/resources/rest-api-spec/api/nodes.get_metering_info.json rename to rest-api-spec/src/main/resources/rest-api-spec/api/nodes.get_repositories_metering_info.json index caba879a9a967..312b6b6c82eea 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.get_metering_info.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.get_repositories_metering_info.json @@ -1,5 +1,5 @@ { - "nodes.get_metering_info":{ + "nodes.get_repositories_metering_info":{ "documentation":{ "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/get-repositories-metering-api.html", "description":"Returns cluster repositories metering information." diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/open_point_in_time.json b/rest-api-spec/src/main/resources/rest-api-spec/api/open_point_in_time.json index d33ead1597a9d..55b19368f7cf7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/open_point_in_time.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/open_point_in_time.json @@ -11,12 +11,6 @@ }, "url":{ "paths":[ - { - "path":"/_pit", - "methods":[ - "POST" - ] - }, { "path":"/{index}/_pit", "methods":[ diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/20_terms.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/20_terms.yml index c9d832b0d6adb..7d4ad735fa96d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/20_terms.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/20_terms.yml @@ -1328,8 +1328,8 @@ precise size: --- huge size: - skip: - version: " - 7.99.99" - reason: "Fixed in 8.0.0 to be backported to 7.14.1" + version: " - 7.14.0" + reason: "Fixed in 7.14.1" - do: bulk: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/230_composite.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/230_composite.yml index 1753e5b9d8f06..f405c985f9ee4 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/230_composite.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/230_composite.yml @@ -1118,7 +1118,7 @@ setup: "nested": { "path": "courses" }, "aggregations": { "names": { - "composite": { + "composite": { "sources": [ "kw": {"terms": {"field": "courses.name"}} ] @@ -1264,7 +1264,7 @@ setup: "aggs": { "keez": { "composite": { - "sources": [ + "sources": [ "key": {"terms": {"field": "kw"}} ] } @@ -1278,3 +1278,145 @@ setup: - length: {aggregations.not_one.keez.buckets: 2} - match: {aggregations.not_one.keez.buckets.0.key.key: "three"} - match: {aggregations.not_one.keez.buckets.1.key.key: "two"} + +--- +"Simple Composite aggregation with missing_bucket": + - do: + search: + rest_total_hits_as_int: true + index: test + body: + aggregations: + test: + composite: + sources: [ + "kw": { + "terms": { + "field": "keyword", + "missing_bucket": true + } + } + ] + + - length: { aggregations.test.buckets: 3 } + - match: { aggregations.test.buckets.0.key.kw: null } + - match: { aggregations.test.buckets.0.doc_count: 2 } + +--- +"Simple Composite aggregation with missing_order": + - skip: + version: " - 7.15.99" + reason: "`missing_order` has been introduced in 7.16" + - do: + search: + rest_total_hits_as_int: true + index: test + body: + aggregations: + test: + composite: + sources: [ + "kw": { + "terms": { + "field": "keyword", + "missing_bucket": true, + "missing_order": "last" + } + } + ] + + - length: { aggregations.test.buckets: 3 } + - match: { aggregations.test.buckets.2.key.kw: null } + - match: { aggregations.test.buckets.2.doc_count: 2 } + +--- +"missing_order with missing_bucket = false": + - skip: + version: " - 7.15.99" + reason: "`missing_order` has been introduced in 7.16" + - do: + catch: /missingOrder can only be set if missingBucket is true/ + search: + rest_total_hits_as_int: true + index: test + body: + aggregations: + test: + composite: + sources: [ + "kw": { + "terms": { + "field": "keyword", + "missing_bucket": false, + "missing_order": "first" + } + } + ] + +--- +"missing_order without missing_bucket": + - skip: + version: " - 7.15.99" + reason: "`missing_order` has been introduced in 7.16" + - do: + catch: /missingOrder can only be set if missingBucket is true/ + search: + rest_total_hits_as_int: true + index: test + body: + aggregations: + test: + composite: + sources: [ + "kw": { + "terms": { + "field": "keyword", + "missing_order": "first" + } + } + ] + +--- +"Nested Composite aggregation with missing_order": + - skip: + version: " - 7.15.99" + reason: "`missing_order` has been introduced in 7.16" + - do: + search: + rest_total_hits_as_int: true + index: test + body: + aggregations: + test: + composite: + sources: [ + { + "long": { + "terms": { + "field": "long", + "missing_bucket": true, + "missing_order": "default" + } + } + }, + { + "kw": { + "terms": { + "field": "keyword", + "missing_bucket": true, + "missing_order": "last" + } + } + } + ] + + - length: { aggregations.test.buckets: 8 } + - match: { aggregations.test.buckets.0.key.long: null} + - match: { aggregations.test.buckets.0.key.kw: "bar" } + - match: { aggregations.test.buckets.0.doc_count: 1 } + - match: { aggregations.test.buckets.1.key.long: null} + - match: { aggregations.test.buckets.1.key.kw: "foo" } + - match: { aggregations.test.buckets.1.doc_count: 1 } + - match: { aggregations.test.buckets.2.key.long: null} + - match: { aggregations.test.buckets.2.key.kw: null } + - match: { aggregations.test.buckets.2.doc_count: 2 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/350_variable_width_histogram.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/350_variable_width_histogram.yml index 47ff51dd6a1c4..08d76a82aaa3b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/350_variable_width_histogram.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/350_variable_width_histogram.yml @@ -28,7 +28,7 @@ setup: --- "basic": - skip: - version: " - 7.9.99" + version: " - 7.8.99" reason: added in 7.9.0 - do: search: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/400_sampler.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/400_sampler.yml new file mode 100644 index 0000000000000..ae0941efce9a7 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/400_sampler.yml @@ -0,0 +1,79 @@ +setup: + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + mappings: + properties: + tags: + type: text + number: + type: integer + + - do: + bulk: + index: test + refresh: true + body: + - '{"index": {}}' + - '{"tags": "kibana", "number": 1}' + - '{"index": {}}' + - '{"tags": "kibana", "number": 2}' + - '{"index": {}}' + - '{"tags": "kibana", "number": 3}' + - '{"index": {}}' + - '{"tags": "javascript", "number": 4}' + +--- +small shard_size: + - do: + search: + body: + query: + query_string: + query: 'tags:kibana OR tags:javascript' + aggs: + sample: + sampler: + shard_size: 1 + aggs: + min_number: + min: + field: number + max_number: + max: + field: number + + + - match: { hits.total.value: 4 } + - match: { aggregations.sample.doc_count: 1 } + # The document with 4 has the highest score so we pick that one. + - match: { aggregations.sample.min_number.value: 4.0 } + - match: { aggregations.sample.max_number.value: 4.0 } + +--- +default shard size: + - do: + search: + body: + query: + query_string: + query: 'tags:kibana OR tags:javascript' + aggs: + sample: + sampler: {} + aggs: + min_number: + min: + field: number + max_number: + max: + field: number + + - match: { hits.total.value: 4 } + # The default shard size is much larger than the four test documents we are working with + - match: { aggregations.sample.doc_count: 4 } + - match: { aggregations.sample.min_number.value: 1.0 } + - match: { aggregations.sample.max_number.value: 4.0 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/40_range.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/40_range.yml index 57b61b242d8c8..afe66e4340d3b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/40_range.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/40_range.yml @@ -7,420 +7,205 @@ setup: number_of_replicas: 0 mappings: properties: - ip: - type: ip double: type: double - date: - type: date - format: epoch_second + long: + type: long - do: cluster.health: - wait_for_status: green - ---- -"Double range": - - do: - index: - index: test - id: 1 - body: { "double" : 42 } - - - do: - index: - index: test - id: 2 - body: { "double" : 100 } + wait_for_status: yellow - do: - index: + bulk: index: test - id: 3 - body: { "double" : 50 } - - - do: - indices.refresh: {} + refresh: true + body: + - {"index": {}} + - { "double" : 42.1, "long": 25 } + - {"index": {}} + - { "double" : 100.7, "long": 80 } + - {"index": {}} + - { "double" : 50.5, "long": 75} +# For testing missing values + - {"index": {}} + - {} +--- +"Double range": - do: search: - rest_total_hits_as_int: true - body: { "size" : 0, "aggs" : { "double_range" : { "range" : { "field" : "double", "ranges": [ { "to": 50 }, { "from": 50, "to": 150 }, { "from": 150 } ] } } } } - - - match: { hits.total: 3 } - + body: + size: 0 + aggs: + double_range: + range: + field: "double" + ranges: + - + to: 50 + - + from: 50 + to: 150 + - + from: 150 + + - match: { hits.total.relation: "eq" } + - match: { hits.total.value: 4 } - length: { aggregations.double_range.buckets: 3 } - - match: { aggregations.double_range.buckets.0.key: "*-50.0" } - - is_false: aggregations.double_range.buckets.0.from - - match: { aggregations.double_range.buckets.0.to: 50.0 } - - match: { aggregations.double_range.buckets.0.doc_count: 1 } - - match: { aggregations.double_range.buckets.1.key: "50.0-150.0" } - - match: { aggregations.double_range.buckets.1.from: 50.0 } - - match: { aggregations.double_range.buckets.1.to: 150.0 } - - match: { aggregations.double_range.buckets.1.doc_count: 2 } - - match: { aggregations.double_range.buckets.2.key: "150.0-*" } - - match: { aggregations.double_range.buckets.2.from: 150.0 } - - is_false: aggregations.double_range.buckets.2.to - - match: { aggregations.double_range.buckets.2.doc_count: 0 } +--- +"Double range with missing value": - do: search: - rest_total_hits_as_int: true - body: { "size" : 0, "aggs" : { "double_range" : { "range" : { "field" : "double", "ranges": [ { "from": null, "to": 50 }, { "from": 50, "to": 150 }, { "from": 150, "to": null } ] } } } } - - - match: { hits.total: 3 } - + body: + size: 0 + aggs: + double_range: + range: + field: "double" + missing: 1000 + ranges: + - + to: 50 + - + from: 50 + to: 150 + - + from: 150 + + - match: { hits.total.relation: "eq" } + - match: { hits.total.value: 4 } - length: { aggregations.double_range.buckets: 3 } - - match: { aggregations.double_range.buckets.0.key: "*-50.0" } - - is_false: aggregations.double_range.buckets.0.from - - match: { aggregations.double_range.buckets.0.to: 50.0 } - - match: { aggregations.double_range.buckets.0.doc_count: 1 } - - match: { aggregations.double_range.buckets.1.key: "50.0-150.0" } - - match: { aggregations.double_range.buckets.1.from: 50.0 } - - match: { aggregations.double_range.buckets.1.to: 150.0 } - - match: { aggregations.double_range.buckets.1.doc_count: 2 } - - match: { aggregations.double_range.buckets.2.key: "150.0-*" } - - match: { aggregations.double_range.buckets.2.from: 150.0 } - - is_false: aggregations.double_range.buckets.2.to - - - match: { aggregations.double_range.buckets.2.doc_count: 0 } - ---- -"IP range": - - do: - index: - index: test - id: 1 - body: { "ip" : "::1" } - - - do: - index: - index: test - id: 2 - body: { "ip" : "192.168.0.1" } - - - do: - index: - index: test - id: 3 - body: { "ip" : "192.168.0.7" } - - - do: - indices.refresh: {} - - - do: - search: - rest_total_hits_as_int: true - body: { "size" : 0, "aggs" : { "ip_range" : { "ip_range" : { "field" : "ip", "ranges": [ { "to": "192.168.0.0" }, { "from": "192.168.0.0", "to": "192.169.0.0" }, { "from": "192.169.0.0" } ] } } } } - - - match: { hits.total: 3 } - - - length: { aggregations.ip_range.buckets: 3 } - - - is_false: aggregations.ip_range.buckets.0.from - - - match: { aggregations.ip_range.buckets.0.to: "192.168.0.0" } - - - match: { aggregations.ip_range.buckets.0.doc_count: 1 } - - - match: { aggregations.ip_range.buckets.1.from: "192.168.0.0" } - - - match: { aggregations.ip_range.buckets.1.to: "192.169.0.0" } - - - match: { aggregations.ip_range.buckets.1.doc_count: 2 } - - - match: { aggregations.ip_range.buckets.2.from: "192.169.0.0" } - - - is_false: aggregations.ip_range.buckets.2.to - - - match: { aggregations.ip_range.buckets.2.doc_count: 0 } - - - do: - search: - rest_total_hits_as_int: true - body: { "size" : 0, "aggs" : { "ip_range" : { "ip_range" : { "field" : "ip", "ranges": [ { "from": null, "to": "192.168.0.0" }, { "from": "192.168.0.0", "to": "192.169.0.0" }, { "from": "192.169.0.0", "to": null } ] } } } } - - - match: { hits.total: 3 } - - - length: { aggregations.ip_range.buckets: 3 } - - - is_false: aggregations.ip_range.buckets.0.from - - - match: { aggregations.ip_range.buckets.0.to: "192.168.0.0" } - - - match: { aggregations.ip_range.buckets.0.doc_count: 1 } - - - match: { aggregations.ip_range.buckets.1.from: "192.168.0.0" } - - - match: { aggregations.ip_range.buckets.1.to: "192.169.0.0" } - - - match: { aggregations.ip_range.buckets.1.doc_count: 2 } - - - match: { aggregations.ip_range.buckets.2.from: "192.169.0.0" } - - - is_false: aggregations.ip_range.buckets.2.to - - - match: { aggregations.ip_range.buckets.2.doc_count: 0 } - - - do: - search: - rest_total_hits_as_int: true - body: { "size" : 0, "aggs" : { "ip_range" : { "ip_range" : { "field" : "ip", "ranges": [ { "mask": "::/24" }, { "mask": "192.168.0.0/16" } ] } } } } - - - match: { hits.total: 3 } - - - length: { aggregations.ip_range.buckets: 2 } - - - match: { aggregations.ip_range.buckets.0.key: "::/24" } - - - match: { aggregations.ip_range.buckets.0.to: "0:100::" } - - - match: { aggregations.ip_range.buckets.0.doc_count: 3 } - - - match: { aggregations.ip_range.buckets.1.key: "192.168.0.0/16" } - - - match: { aggregations.ip_range.buckets.1.from: "192.168.0.0" } - - - match: { aggregations.ip_range.buckets.1.to: "192.169.0.0" } - - - match: { aggregations.ip_range.buckets.1.doc_count: 2 } + - match: { aggregations.double_range.buckets.2.doc_count: 1 } --- -"IP Range Key Generation": - +"Null to and from": - do: search: - rest_total_hits_as_int: true - body: { "size" : 0, "aggs" : { "ip_range" : { "ip_range" : { "field" : "ip", "ranges": [ { "to": "192.168.0.0" }, { "from": "192.168.0.0", "to": "192.169.0.0" }, { "from": "192.169.0.0" } ] } } } } - - - length: { aggregations.ip_range.buckets: 3 } - - match: { aggregations.ip_range.buckets.0.key: "*-192.168.0.0" } - - match: { aggregations.ip_range.buckets.1.key: "192.168.0.0-192.169.0.0" } - - match: { aggregations.ip_range.buckets.2.key: "192.169.0.0-*" } - ---- -"IP Range avg_bucket": - - skip: - version: " - 7.7.99" - reason: Fixed in 7.8.0 - - do: - bulk: - refresh: true - index: test - body: - - '{"index": {}}' - - '{"ip": "::1", "v": 1}' - - '{"index": {}}' - - '{"ip": "192.168.0.1", "v": 2}' - - '{"index": {}}' - - '{"ip": "192.168.0.7", "v": 3}' - - - do: - search: - index: test body: size: 0 aggs: - range: - ip_range: - field: ip + double_range: + range: + field: "double" ranges: - - to: 192.168.0.0 - - from: 192.168.0.0 - to: 192.169.0.0 - - from: 192.169.0.0 - aggs: - v: - sum: - field: v - range_avg_v: - avg_bucket: - buckets_path: range.v - - - match: { hits.total.value: 3 } - - length: { aggregations.range.buckets: 3 } - - match: { aggregations.range.buckets.0.key: "*-192.168.0.0" } - - match: { aggregations.range.buckets.0.doc_count: 1 } - - match: { aggregations.range.buckets.0.v.value: 1 } - - match: { aggregations.range.buckets.1.key: "192.168.0.0-192.169.0.0" } - - match: { aggregations.range.buckets.1.doc_count: 2 } - - match: { aggregations.range.buckets.1.v.value: 5 } - - match: { aggregations.range.buckets.2.key: "192.169.0.0-*" } - - match: { aggregations.range.buckets.2.doc_count: 0 } - - match: { aggregations.range.buckets.2.v.value: 0 } - - match: { aggregations.range_avg_v.value: 3 } - ---- -"Date range": - - do: - index: - index: test - id: 1 - body: { "date" : 1000 } - - - do: - index: - index: test - id: 2 - body: { "date" : 2000 } - - - do: - index: - index: test - id: 3 - body: { "date" : 3000 } - - - do: - indices.refresh: {} - - - do: - search: - rest_total_hits_as_int: true - body: { "size" : 0, "aggs" : { "date_range" : { "date_range" : { "field" : "date", "ranges": [ { "from" : 1000, "to": 3000 }, { "from": 3000, "to": 4000 } ] } } } } - - - match: { hits.total: 3 } - - - length: { aggregations.date_range.buckets: 2 } - - - match: { aggregations.date_range.buckets.0.doc_count: 2 } - - match: { aggregations.date_range.buckets.0.key: "1000-3000" } - - match: { aggregations.date_range.buckets.0.from: 1000000 } - - match: { aggregations.date_range.buckets.0.to: 3000000 } - - - match: { aggregations.date_range.buckets.1.doc_count: 1 } - - match: { aggregations.date_range.buckets.1.key: "3000-4000" } - - match: { aggregations.date_range.buckets.1.from: 3000000 } - - match: { aggregations.date_range.buckets.1.to: 4000000 } + - + from: null + to: 50 + - + from: 50 + to: 150 + - + from: 150 + to: null + + - match: { hits.total.relation: "eq" } + - match: { hits.total.value: 4 } + - length: { aggregations.double_range.buckets: 3 } + - match: { aggregations.double_range.buckets.0.key: "*-50.0" } + - is_false: aggregations.double_range.buckets.0.from + - match: { aggregations.double_range.buckets.0.to: 50.0 } + - match: { aggregations.double_range.buckets.0.doc_count: 1 } + - match: { aggregations.double_range.buckets.1.key: "50.0-150.0" } + - match: { aggregations.double_range.buckets.1.from: 50.0 } + - match: { aggregations.double_range.buckets.1.to: 150.0 } + - match: { aggregations.double_range.buckets.1.doc_count: 2 } + - match: { aggregations.double_range.buckets.2.key: "150.0-*" } + - match: { aggregations.double_range.buckets.2.from: 150.0 } + - is_false: aggregations.double_range.buckets.2.to + - match: { aggregations.double_range.buckets.2.doc_count: 0 } --- -"Date Range Missing": - - do: - index: - index: test - id: 1 - body: { "date" : "28800000000" } - - - do: - index: - index: test - id: 2 - body: { "date" : "315561600000" } - - - do: - index: - index: test - id: 3 - body: { "date" : "631180800000" } - - - do: - index: - index: test - id: 4 - body: { "date" : "10000" } - - - do: - index: - index: test - id: 5 - body: { "ip" : "192.168.0.1" } - - - do: - indices.refresh: {} - +"Range agg on long field": - do: search: - rest_total_hits_as_int: true body: + size: 0 aggs: - age_groups: - date_range: - field: date - missing: "0" + long_range: + range: + field: "long" ranges: - - key: Generation Y - from: '315561600000' - to: '946713600000' - - key: Generation X - from: "200000" - to: '315561600000' - - key: Other - to: "200000" - - - match: { hits.total: 5 } - - - length: { aggregations.age_groups.buckets: 3 } - - - match: { aggregations.age_groups.buckets.0.key: "Other" } - - - match: { aggregations.age_groups.buckets.0.doc_count: 2 } - - - match: { aggregations.age_groups.buckets.1.key: "Generation X" } - - - match: { aggregations.age_groups.buckets.1.doc_count: 1 } - - - match: { aggregations.age_groups.buckets.2.key: "Generation Y" } - - - match: { aggregations.age_groups.buckets.2.doc_count: 2 } - + - + to: 50 + - + from: 50 + to: 150 + - + from: 150 + + - match: { hits.total.relation: "eq" } + - match: { hits.total.value: 4 } + - length: { aggregations.long_range.buckets: 3 } + - match: { aggregations.long_range.buckets.0.key: "*-50.0" } + - is_false: aggregations.long_range.buckets.0.from + - match: { aggregations.long_range.buckets.0.to: 50 } + - match: { aggregations.long_range.buckets.0.doc_count: 1 } + - match: { aggregations.long_range.buckets.1.key: "50.0-150.0" } + - match: { aggregations.long_range.buckets.1.from: 50 } + - match: { aggregations.long_range.buckets.1.to: 150 } + - match: { aggregations.long_range.buckets.1.doc_count: 2 } + - match: { aggregations.long_range.buckets.2.key: "150.0-*" } + - match: { aggregations.long_range.buckets.2.from: 150 } + - is_false: aggregations.long_range.buckets.2.to + - match: { aggregations.long_range.buckets.2.doc_count: 0 } --- -"Date range unmapped with children": - - skip: - version: " - 7.9.99" - reason: Fixed in 7.10.0 - - - do: - indices.create: - index: test_a_unmapped - body: - settings: - number_of_shards: 1 - number_of_replicas: 0 +"Double range default keyed response": - do: search: - index: test_a_unmapped body: size: 0 - query: - terms: - animal: [] aggs: - date_range: - date_range: - field: date + double_range: + range: + field: "double" + keyed: true ranges: - - from: 2020-01-01T00:00:00Z - aggs: - sounds: - cardinality: - field: sound.keyword - - - match: { hits.total.value: 0 } - - length: { aggregations.date_range.buckets: 1 } - - match: { aggregations.date_range.buckets.0.doc_count: 0 } - - match: { aggregations.date_range.buckets.0.key: "2020-01-01T00:00:00.000Z-*" } - - is_false: aggregations.date_range.buckets.0.to - - match: { aggregations.date_range.buckets.0.sounds.value: 0 } + - + key: "first" + to: 50 + - + key: "another" + from: 50 + to: 150 + - + key: "last" + from: 150 + + - match: { hits.total.relation: "eq" } + - match: { hits.total.value: 4 } + - length: { aggregations.double_range.buckets: 3 } + - is_false: aggregations.double_range.buckets.first.from + - match: { aggregations.double_range.buckets.first.to: 50.0 } + - match: { aggregations.double_range.buckets.first.doc_count: 1 } + - match: { aggregations.double_range.buckets.another.from: 50.0 } + - match: { aggregations.double_range.buckets.another.to: 150.0 } + - match: { aggregations.double_range.buckets.another.doc_count: 2 } + - match: { aggregations.double_range.buckets.last.from: 150.0 } + - is_false: aggregations.double_range.buckets.last.to + - match: { aggregations.double_range.buckets.last.doc_count: 0 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/41_date_range.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/41_date_range.yml new file mode 100644 index 0000000000000..e1160520ac3e7 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/41_date_range.yml @@ -0,0 +1,169 @@ +setup: + - do: + indices.create: + index: old_test + body: + settings: + number_of_replicas: 0 + mappings: + properties: + date: + type: date + format: epoch_second + + - do: + indices.create: + index: test + body: + settings: + number_of_replicas: 0 + mappings: + properties: + date: + type: date + format: epoch_second + + - do: + cluster.health: + wait_for_status: yellow + +# March 8th 2020 was the start of daylight savings time that year + - do: + bulk: + index: test + refresh: true + body: + - {"index": {}} + - { "date" : "2020-03-08T06:15:00Z" } + - {"index": {}} + - { "date" : "2020-03-08T07:15:00Z" } + +--- +"Date range": + - do: + bulk: + index: old_test + refresh: true + body: + - {"index": {}} + - { "date" : 1000 } + - {"index": {}} + - { "date" : 2000 } + - {"index": {}} + - { "date" : 3000 } + + - do: + search: + body: + size: 0 + aggs: + date_range: + date_range: + field: "date" + ranges: + - + from: 1000 + to: 3000 + - + from: 3000 + to: 4000 + + + - match: { hits.total.value: 3 } + - match: { hits.total.relation: "eq" } + - length: { aggregations.date_range.buckets: 2 } + - match: { aggregations.date_range.buckets.0.doc_count: 2 } + - match: { aggregations.date_range.buckets.0.key: "1000-3000" } + - match: { aggregations.date_range.buckets.0.from: 1000000 } + - match: { aggregations.date_range.buckets.0.to: 3000000 } + + - match: { aggregations.date_range.buckets.1.doc_count: 1 } + - match: { aggregations.date_range.buckets.1.key: "3000-4000" } + - match: { aggregations.date_range.buckets.1.from: 3000000 } + - match: { aggregations.date_range.buckets.1.to: 4000000 } + +--- +"Date Range Missing": + - do: + bulk: + index: old_test + refresh: true + body: + - {"index": {}} + - { "date" : "28800000000" } + - {"index": {}} + - { "date" : "315561600000" } + - {"index": {}} + - { "date" : "631180800000" } + - {"index": {}} + - { "date" : "10000" } + - {"index": {}} + - { "ip" : "192.168.0.1" } + + - do: + search: + body: + aggs: + age_groups: + date_range: + field: date + missing: "0" + ranges: + - key: Generation Y + from: '315561600000' + to: '946713600000' + - key: Generation X + from: "200000" + to: '315561600000' + - key: Other + to: "200000" + + - match: { hits.total.value: 5 } + - match: { hits.total.relation: "eq" } + - length: { aggregations.age_groups.buckets: 3 } + - match: { aggregations.age_groups.buckets.0.key: "Other" } + - match: { aggregations.age_groups.buckets.0.doc_count: 2 } + - match: { aggregations.age_groups.buckets.1.key: "Generation X" } + - match: { aggregations.age_groups.buckets.1.doc_count: 1 } + - match: { aggregations.age_groups.buckets.2.key: "Generation Y" } + - match: { aggregations.age_groups.buckets.2.doc_count: 2 } + + +--- +"Date range unmapped with children": + - skip: + version: " - 7.9.99" + reason: Fixed in 7.10.0 + + - do: + indices.create: + index: test_a_unmapped + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + - do: + search: + index: test_a_unmapped + body: + size: 0 + query: + terms: + animal: [] + aggs: + date_range: + date_range: + field: date + ranges: + - from: 2020-01-01T00:00:00Z + aggs: + sounds: + cardinality: + field: sound.keyword + + - match: { hits.total.value: 0 } + - length: { aggregations.date_range.buckets: 1 } + - match: { aggregations.date_range.buckets.0.doc_count: 0 } + - match: { aggregations.date_range.buckets.0.key: "2020-01-01T00:00:00.000Z-*" } + - is_false: aggregations.date_range.buckets.0.to + - match: { aggregations.date_range.buckets.0.sounds.value: 0 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/420_percentile_ranks_tdigest_metric.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/420_percentile_ranks_tdigest_metric.yml new file mode 100644 index 0000000000000..966dde25da883 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/420_percentile_ranks_tdigest_metric.yml @@ -0,0 +1,180 @@ +setup: + - do: + indices.create: + index: test + body: + settings: + number_of_replicas: 0 + mappings: + properties: + int: + type : integer + double: + type : double + keyword: + type: keyword + + - do: + bulk: + refresh: true + index: test + body: + - '{"index": {}}' + - '{"int": 1, "double": 1.0, "keyword": "foo"}' + - '{"index": {}}' + - '{"int": 51, "double": 51.0, "keyword": "foo"}' + - '{"index": {}}' + - '{"int": 101, "double": 101.0, "keyword": "foo"}' + - '{"index": {}}' + - '{"int": 151, "double": 151.0, "keyword": "foo"}' + +--- +basic: + - skip: + features: close_to + + - do: + search: + body: + size: 0 + aggs: + percentile_ranks_int: + percentile_ranks: + field: int + values: [50, 99] + percentile_ranks_double: + percentile_ranks: + field: double + values: [50, 99] + + - match: { hits.total.value: 4 } + - close_to: { aggregations.percentile_ranks_int.values.50\\.0: { value: 37.0, error: 1} } + - close_to: { aggregations.percentile_ranks_int.values.99\\.0: { value: 61.5, error: 1} } + - close_to: { aggregations.percentile_ranks_double.values.50\\.0: { value: 37.0, error: 1} } + - close_to: { aggregations.percentile_ranks_double.values.99\\.0: { value: 61.5, error: 1} } + +--- +filtered: + - skip: + features: close_to + + - do: + search: + body: + size: 0 + query: + range: + int: + gte: 50 + aggs: + percentile_ranks_int: + percentile_ranks: + field: int + values: [50] + percentile_ranks_double: + percentile_ranks: + field: double + values: [50] + + - match: { hits.total.value: 3 } + - close_to: { aggregations.percentile_ranks_int.values.50\\.0: { value: 16.0, error: 1} } + - close_to: { aggregations.percentile_ranks_double.values.50\\.0: { value: 16.0, error: 1} } + +--- +missing field with missing param: + - skip: + features: close_to + + - do: + search: + body: + size: 0 + aggs: + percentile_ranks_missing: + percentile_ranks: + field: missing + missing: 1.0 + values: [50, 99] + + - match: { hits.total.value: 4 } + - close_to: { aggregations.percentile_ranks_missing.values.50\\.0: { value: 100.0, error: 1} } + - close_to: { aggregations.percentile_ranks_missing.values.99\\.0: { value: 100.0, error: 1} } + +--- +missing field without missing param: + - do: + search: + body: + size: 0 + aggs: + percentile_ranks_missing: + percentile_ranks: + field: missing + values: [50, 99] + + - match: { hits.total.value: 4 } + - is_false: aggregations.percentile_ranks_missing.value + +--- +invalid params: + - do: + catch: bad_request + search: + body: + aggs: + percentile_ranks_int: + percentile_ranks: + field: int + values: [] + + - do: + catch: bad_request + search: + body: + aggs: + percentile_ranks_int: + percentile_ranks: + field: int + values: null + + - do: + catch: bad_request + search: + body: + aggs: + percentile_ranks_int: + percentile_ranks: + field: int + values: ["foo"] + + - do: + catch: bad_request + search: + body: + aggs: + percentile_ranks_string: + percentile_ranks: + field: string + +--- +non-keyed test: + - skip: + features: close_to + + - do: + search: + body: + size: 0 + aggs: + percentile_ranks_int: + percentile_ranks: + field: int + values: [50, 99] + keyed: false + + - match: { hits.total.value: 4 } + - match: { aggregations.percentile_ranks_int.values.0.key: 50} + - close_to: { aggregations.percentile_ranks_int.values.0.value: { value: 37.0, error: 1} } + - match: { aggregations.percentile_ranks_int.values.1.key: 99} + - close_to: { aggregations.percentile_ranks_int.values.1.value: { value: 61.5, error: 1} } + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/42_ip_range.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/42_ip_range.yml new file mode 100644 index 0000000000000..461732a0d461d --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/42_ip_range.yml @@ -0,0 +1,193 @@ +setup: + - do: + indices.create: + index: test + body: + settings: + number_of_replicas: 0 + mappings: + properties: + ip: + type: ip + + - do: + cluster.health: + wait_for_status: yellow + +--- +"IP range": + - do: + bulk: + index: test + refresh: true + body: + - {"index": {}} + - { "ip" : "::1" } + - {"index": {}} + - { "ip" : "192.168.0.1" } + - {"index": {}} + - { "ip" : "192.168.0.7" } + + - do: + search: + body: + size: 0 + aggs: + ip_range: + ip_range: + field: "ip" + ranges: + - + to: "192.168.0.0" + - + from: "192.168.0.0" + to: "192.169.0.0" + - + from: "192.169.0.0" + + - match: { hits.total.value: 3 } + - match: { hits.total.relation: "eq" } + - length: { aggregations.ip_range.buckets: 3 } + - is_false: aggregations.ip_range.buckets.0.from + - match: { aggregations.ip_range.buckets.0.to: "192.168.0.0" } + - match: { aggregations.ip_range.buckets.0.doc_count: 1 } + - match: { aggregations.ip_range.buckets.1.from: "192.168.0.0" } + - match: { aggregations.ip_range.buckets.1.to: "192.169.0.0" } + - match: { aggregations.ip_range.buckets.1.doc_count: 2 } + - match: { aggregations.ip_range.buckets.2.from: "192.169.0.0" } + - is_false: aggregations.ip_range.buckets.2.to + - match: { aggregations.ip_range.buckets.2.doc_count: 0 } + + - do: + search: + body: + size: 0 + aggs: + ip_range: + ip_range: + field: "ip" + ranges: + - + from: null + to: "192.168.0.0" + - + from: "192.168.0.0" + to: "192.169.0.0" + - + from: "192.169.0.0" + to: null + + - match: { hits.total.value: 3 } + - match: { hits.total.relation: "eq" } + - length: { aggregations.ip_range.buckets: 3 } + - is_false: aggregations.ip_range.buckets.0.from + - match: { aggregations.ip_range.buckets.0.to: "192.168.0.0" } + - match: { aggregations.ip_range.buckets.0.doc_count: 1 } + - match: { aggregations.ip_range.buckets.1.from: "192.168.0.0" } + - match: { aggregations.ip_range.buckets.1.to: "192.169.0.0" } + - match: { aggregations.ip_range.buckets.1.doc_count: 2 } + - match: { aggregations.ip_range.buckets.2.from: "192.169.0.0" } + - is_false: aggregations.ip_range.buckets.2.to + - match: { aggregations.ip_range.buckets.2.doc_count: 0 } + + - do: + search: + body: + size: 0 + aggs: + ip_range: + ip_range: + field: "ip" + ranges: + - + mask: "::/24" + - + mask: "192.168.0.0/16" + + - match: { hits.total.value: 3 } + - match: { hits.total.relation: "eq" } + - length: { aggregations.ip_range.buckets: 2 } + - match: { aggregations.ip_range.buckets.0.key: "::/24" } + - match: { aggregations.ip_range.buckets.0.to: "0:100::" } + - match: { aggregations.ip_range.buckets.0.doc_count: 3 } + - match: { aggregations.ip_range.buckets.1.key: "192.168.0.0/16" } + - match: { aggregations.ip_range.buckets.1.from: "192.168.0.0" } + - match: { aggregations.ip_range.buckets.1.to: "192.169.0.0" } + - match: { aggregations.ip_range.buckets.1.doc_count: 2 } + +--- +"IP Range Key Generation": + + - do: + search: + body: + size: 0 + aggs: + ip_range: + ip_range: + field: "ip" + ranges: + - + to: "192.168.0.0" + - + from: "192.168.0.0" + to: "192.169.0.0" + - + from: "192.169.0.0" + + - length: { aggregations.ip_range.buckets: 3 } + - match: { aggregations.ip_range.buckets.0.key: "*-192.168.0.0" } + - match: { aggregations.ip_range.buckets.1.key: "192.168.0.0-192.169.0.0" } + - match: { aggregations.ip_range.buckets.2.key: "192.169.0.0-*" } + +--- +"IP Range avg_bucket": + - skip: + version: " - 7.7.99" + reason: Fixed in 7.8.0 + - do: + bulk: + refresh: true + index: test + body: + - '{"index": {}}' + - '{"ip": "::1", "v": 1}' + - '{"index": {}}' + - '{"ip": "192.168.0.1", "v": 2}' + - '{"index": {}}' + - '{"ip": "192.168.0.7", "v": 3}' + + - do: + search: + index: test + body: + size: 0 + aggs: + range: + ip_range: + field: ip + ranges: + - to: 192.168.0.0 + - from: 192.168.0.0 + to: 192.169.0.0 + - from: 192.169.0.0 + aggs: + v: + sum: + field: v + range_avg_v: + avg_bucket: + buckets_path: range.v + + - match: { hits.total.value: 3 } + - length: { aggregations.range.buckets: 3 } + - match: { aggregations.range.buckets.0.key: "*-192.168.0.0" } + - match: { aggregations.range.buckets.0.doc_count: 1 } + - match: { aggregations.range.buckets.0.v.value: 1 } + - match: { aggregations.range.buckets.1.key: "192.168.0.0-192.169.0.0" } + - match: { aggregations.range.buckets.1.doc_count: 2 } + - match: { aggregations.range.buckets.1.v.value: 5 } + - match: { aggregations.range.buckets.2.key: "192.169.0.0-*" } + - match: { aggregations.range.buckets.2.doc_count: 0 } + - match: { aggregations.range.buckets.2.v.value: 0 } + - match: { aggregations.range_avg_v.value: 3 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/430_percentile_ranks_hdr_metric.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/430_percentile_ranks_hdr_metric.yml new file mode 100644 index 0000000000000..5c00bd1cf732a --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/430_percentile_ranks_hdr_metric.yml @@ -0,0 +1,229 @@ +setup: + - do: + indices.create: + index: test + body: + settings: + number_of_replicas: 0 + mappings: + properties: + int: + type : integer + double: + type : double + keyword: + type: keyword + + - do: + bulk: + refresh: true + index: test + body: + - '{"index": {}}' + - '{"int": 1, "double": 1.0, "keyword": "foo"}' + - '{"index": {}}' + - '{"int": 51, "double": 51.0, "keyword": "foo"}' + - '{"index": {}}' + - '{"int": 101, "double": 101.0, "keyword": "foo"}' + - '{"index": {}}' + - '{"int": 151, "double": 151.0, "keyword": "foo"}' + +--- +basic: + - skip: + features: close_to + + - do: + search: + body: + size: 0 + aggs: + percentile_ranks_int: + percentile_ranks: + field: int + values: [50, 99] + hdr: {} + percentile_ranks_double: + percentile_ranks: + field: double + values: [50, 99] + hdr: {} + + - match: { hits.total.value: 4 } + - close_to: { aggregations.percentile_ranks_int.values.50\\.0: { value: 25.0, error: 1} } + - close_to: { aggregations.percentile_ranks_int.values.99\\.0: { value: 50.0, error: 1} } + - close_to: { aggregations.percentile_ranks_double.values.50\\.0: { value: 25.0, error: 1} } + - close_to: { aggregations.percentile_ranks_double.values.99\\.0: { value: 50.0, error: 1} } + +--- +set significant digits: + - skip: + features: close_to + + - do: + search: + body: + size: 0 + aggs: + percentile_ranks_int: + percentile_ranks: + field: int + values: [50, 99] + hdr: + number_of_significant_value_digits: 3 + percentile_ranks_double: + percentile_ranks: + field: double + values: [50, 99] + hdr: + number_of_significant_value_digits: 3 + + - match: { hits.total.value: 4 } + - close_to: { aggregations.percentile_ranks_int.values.50\\.0: { value: 25.0, error: 1} } + - close_to: { aggregations.percentile_ranks_int.values.99\\.0: { value: 50.0, error: 1} } + - close_to: { aggregations.percentile_ranks_double.values.50\\.0: { value: 25.0, error: 1} } + - close_to: { aggregations.percentile_ranks_double.values.99\\.0: { value: 50.0, error: 1} } + +--- +filtered: + - skip: + features: close_to + + - do: + search: + body: + size: 0 + query: + range: + int: + gte: 50 + aggs: + percentile_ranks_int: + percentile_ranks: + field: int + values: [50] + hdr: + number_of_significant_value_digits: 3 + percentile_ranks_double: + percentile_ranks: + field: double + values: [50] + hdr: + number_of_significant_value_digits: 3 + + - match: { hits.total.value: 3 } + - close_to: { aggregations.percentile_ranks_int.values.50\\.0: { value: 0.0, error: 1} } + - close_to: { aggregations.percentile_ranks_double.values.50\\.0: { value: 0.0, error: 1} } + +--- +missing field with missing param: + - skip: + features: close_to + + - do: + search: + body: + size: 0 + aggs: + percentile_ranks_missing: + percentile_ranks: + field: missing + missing: 1.0 + values: [50, 99] + hdr: + number_of_significant_value_digits: 3 + + - match: { hits.total.value: 4 } + - close_to: { aggregations.percentile_ranks_missing.values.50\\.0: { value: 100.0, error: 1} } + - close_to: { aggregations.percentile_ranks_missing.values.99\\.0: { value: 100.0, error: 1} } + +--- +missing field without missing param: + - do: + search: + body: + size: 0 + aggs: + percentile_ranks_missing: + percentile_ranks: + field: missing + values: [50, 99] + hdr: + number_of_significant_value_digits: 3 + + - match: { hits.total.value: 4 } + - is_false: aggregations.percentile_ranks_missing.value + +--- +invalid params: + - do: + catch: bad_request + search: + body: + aggs: + percentile_ranks_int: + percentile_ranks: + field: int + values: [] + hdr: + number_of_significant_value_digits: 3 + + - do: + catch: bad_request + search: + body: + aggs: + percentile_ranks_int: + percentile_ranks: + field: int + values: null + hdr: + number_of_significant_value_digits: 3 + + - do: + catch: bad_request + search: + body: + aggs: + percentile_ranks_int: + percentile_ranks: + field: int + values: ["foo"] + hdr: + number_of_significant_value_digits: 3 + + - do: + catch: bad_request + search: + body: + aggs: + percentile_ranks_string: + percentile_ranks: + field: string + hdr: + number_of_significant_value_digits: 3 + + +--- +non-keyed: + - skip: + features: close_to + + - do: + search: + body: + size: 0 + aggs: + percentile_ranks_int: + percentile_ranks: + field: int + values: [50, 99] + keyed: false + hdr: + number_of_significant_value_digits: 3 + + - match: { hits.total.value: 4 } + - match: { aggregations.percentile_ranks_int.values.0.key: 50} + - close_to: { aggregations.percentile_ranks_int.values.0.value: { value: 25, error: 1} } + - match: { aggregations.percentile_ranks_int.values.1.key: 99} + - close_to: { aggregations.percentile_ranks_int.values.1.value: { value: 50, error: 1} } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/49_range_timezone_bug.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/49_range_timezone_bug.yml new file mode 100644 index 0000000000000..481c32f688be6 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/49_range_timezone_bug.yml @@ -0,0 +1,95 @@ +setup: + - do: + indices.create: + index: test + body: + settings: + number_of_replicas: 0 + mappings: + properties: + mydate: + type: date + format: "uuuu-MM-dd'T'HH:mm:ss.SSSSSSSSSZZZZZ" + + - do: + cluster.health: + wait_for_status: green + + - do: + index: + index: test + id: 1 + body: { "mydate": "2021-08-12T01:00:00.000000000+02:00" } + + - do: + indices.refresh: {} + +--- +"respect offsets in range bounds": + - skip: + version: " - 7.15.99" + reason: "Fixed in 7.16" + - do: + search: + rest_total_hits_as_int: true + body: { + "query": { + "match_all": {} + }, + "aggregations": { + "myagg": { + "date_range": { + "field": "mydate", + "ranges": [ + { + "from": "2021-08-12T00:00:00.000000000+02:00", + "to": "2021-08-12T02:00:00.000000000+02:00" + } + ] + } + } + } + } + - match: { hits.total: 1 } + - length: { aggregations.myagg.buckets: 1 } + - match: { aggregations.myagg.buckets.0.from_as_string: "2021-08-11T22:00:00.000000000Z" } + - match: { aggregations.myagg.buckets.0.from: 1628719200000 } + - match: { aggregations.myagg.buckets.0.to_as_string: "2021-08-12T00:00:00.000000000Z" } + - match: { aggregations.myagg.buckets.0.to: 1628726400000 } + - match: { aggregations.myagg.buckets.0.doc_count: 1 } + +--- +"offsets and timezones play nicely together": + - skip: + version: " - 7.15.99" + reason: "Fixed in 7.16" + - do: + search: + rest_total_hits_as_int: true + body: { + "query": { + "match_all": {} + }, + "aggregations": { + "myagg": { + "date_range": { + "time_zone": "America/New_York", + "field": "mydate", + "ranges": [ + { + "from": "2021-08-12T00:00:00.000000000+02:00", + "to": "2021-08-12T02:00:00.000000000+02:00" + } + ] + } + } + } + } + - match: { hits.total: 1 } + - length: { aggregations.myagg.buckets: 1 } + - match: { aggregations.myagg.buckets.0.from_as_string: "2021-08-11T18:00:00.000000000-04:00" } + - match: { aggregations.myagg.buckets.0.from: 1628719200000 } + - match: { aggregations.myagg.buckets.0.to_as_string: "2021-08-11T20:00:00.000000000-04:00" } + - match: { aggregations.myagg.buckets.0.to: 1628726400000 } + - match: { aggregations.myagg.buckets.0.doc_count: 1 } + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/370_profile.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/370_profile.yml new file mode 100644 index 0000000000000..0c4b1089b8122 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/370_profile.yml @@ -0,0 +1,146 @@ +--- +setup: + - do: + indices.create: + index: test + body: + settings: + index.number_of_shards: 1 + mappings: + properties: + keyword: + type: keyword + + - do: + index: + index: test + id: 1 + refresh: true + body: + keyword: [ "a", "b" ] + +--- +fetch fields: + - skip: + version: ' - 7.15.99' + reason: fetch profiling implemented in 7.16.0 + + - do: + search: + index: test + body: + _source: false + profile: true + fields: [keyword] + + - is_true: hits.hits.0._id + - match: { hits.hits.0.fields.keyword.0: a } + - match: { hits.hits.0.fields.keyword.1: b } + - gt: { profile.shards.0.fetch.time_in_nanos: 0 } + - gt: { profile.shards.0.fetch.breakdown.next_reader_count: 0 } + - gt: { profile.shards.0.fetch.breakdown.next_reader: 0 } + - gt: { profile.shards.0.fetch.breakdown.load_stored_fields_count: 0 } + - gt: { profile.shards.0.fetch.breakdown.load_stored_fields: 0 } + - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _routing, _source] } + - length: { profile.shards.0.fetch.children: 1 } + - match: { profile.shards.0.fetch.children.0.type: FetchFieldsPhase } + - gt: { profile.shards.0.fetch.children.0.breakdown.next_reader_count: 0 } + - gt: { profile.shards.0.fetch.children.0.breakdown.next_reader: 0 } + - gt: { profile.shards.0.fetch.children.0.breakdown.next_reader_count: 0 } + - gt: { profile.shards.0.fetch.children.0.breakdown.next_reader: 0 } + +--- +fetch source: + - skip: + version: ' - 7.99.99' + reason: fetch profiling implemented in 8.0.0 to be backported to 7.16.0 + + - do: + search: + index: test + body: + profile: true + + - is_true: hits.hits.0._id + - match: { hits.hits.0._source.keyword.0: a } + - match: { hits.hits.0._source.keyword.1: b } + - gt: { profile.shards.0.fetch.time_in_nanos: 0 } + - gt: { profile.shards.0.fetch.breakdown.next_reader_count: 0 } + - gt: { profile.shards.0.fetch.breakdown.next_reader: 0 } + - gt: { profile.shards.0.fetch.breakdown.load_stored_fields_count: 0 } + - gt: { profile.shards.0.fetch.breakdown.load_stored_fields: 0 } + - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _routing, _source] } + - length: { profile.shards.0.fetch.children: 1 } + - match: { profile.shards.0.fetch.children.0.type: FetchSourcePhase } + - gt: { profile.shards.0.fetch.children.0.breakdown.next_reader_count: 0 } + - gt: { profile.shards.0.fetch.children.0.breakdown.next_reader: 0 } + - gt: { profile.shards.0.fetch.children.0.breakdown.next_reader_count: 0 } + - gt: { profile.shards.0.fetch.children.0.breakdown.next_reader: 0 } + - match: { profile.shards.0.fetch.children.0.debug.fast_path: 1 } + +--- +fetch nested source: + - skip: + version: ' - 7.15.99' + reason: fetch profiling implemented in 7.16.0 + + - do: + indices.create: + index: test_nested + body: + settings: + index.number_of_shards: 1 + mappings: + properties: + keyword: + type: keyword + nested: + type: nested + properties: + text: + type: text + + - do: + index: + index: test_nested + id: 1 + refresh: true + body: + keyword: [ "a", "b" ] + nested: + - text: the quick brown fox + - text: jumped over the + - text: lazy dog + + - do: + search: + index: test_nested + body: + profile: true + query: + nested: + path: nested + query: + match_all: {} + inner_hits: {} + + - is_true: hits.hits.0._id + - match: { hits.hits.0._source.keyword.0: a } + - match: { hits.hits.0._source.keyword.1: b } + - gt: { profile.shards.0.fetch.time_in_nanos: 0 } + - gt: { profile.shards.0.fetch.breakdown.next_reader_count: 0 } + - gt: { profile.shards.0.fetch.breakdown.next_reader: 0 } + - gt: { profile.shards.0.fetch.breakdown.load_stored_fields_count: 0 } + - gt: { profile.shards.0.fetch.breakdown.load_stored_fields: 0 } + - match: { profile.shards.0.fetch.debug.stored_fields: [_id, _routing, _source] } + - length: { profile.shards.0.fetch.children: 2 } + - match: { profile.shards.0.fetch.children.0.type: FetchSourcePhase } + - gt: { profile.shards.0.fetch.children.0.breakdown.next_reader_count: 0 } + - gt: { profile.shards.0.fetch.children.0.breakdown.next_reader: 0 } + - gt: { profile.shards.0.fetch.children.0.breakdown.next_reader_count: 0 } + - gt: { profile.shards.0.fetch.children.0.breakdown.next_reader: 0 } + - match: { profile.shards.0.fetch.children.1.type: InnerHitsPhase } + - gt: { profile.shards.0.fetch.children.1.breakdown.next_reader_count: 0 } + - gt: { profile.shards.0.fetch.children.1.breakdown.next_reader: 0 } + - gt: { profile.shards.0.fetch.children.1.breakdown.next_reader_count: 0 } + - gt: { profile.shards.0.fetch.children.1.breakdown.next_reader: 0 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml new file mode 100644 index 0000000000000..a4987bf69d70b --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml @@ -0,0 +1,119 @@ +enable: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.16.0 + + - do: + indices.create: + index: test_index + body: + settings: + index: + mode: time_series + number_of_replicas: 0 + number_of_shards: 2 + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + dimension: true + k8s: + properties: + pod: + properties: + uid: + type: keyword + dimension: true + name: + type: keyword + ip: + type: ip + network: + properties: + tx: + type: long + rx: + type: long + +--- +no sort field: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.16.0 + + - do: + catch: /\[index.mode=time_series\] is incompatible with \[index.sort.field\]/ + indices.create: + index: test_index + body: + settings: + index: + mode: time_series + sort.field: ['a'] + +--- +no sort order: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.16.0 + + - do: + catch: /\[index.mode=time_series\] is incompatible with \[index.sort.order\]/ + indices.create: + index: test_index + body: + settings: + index: + mode: time_series + sort.order: ['DESC'] + +--- +no sort mode: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.16.0 + + - do: + catch: /\[index.mode=time_series\] is incompatible with \[index.sort.mode\]/ + indices.create: + index: test_index + body: + settings: + index: + mode: time_series + sort.mode: ['MIN'] + +--- +no sort missing: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.16.0 + + - do: + catch: /\[index.mode=time_series\] is incompatible with \[index.sort.missing\]/ + indices.create: + index: test_index + body: + settings: + index: + mode: time_series + sort.missing: ['_last'] + +--- +no partitioning: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.16.0 + + - do: + catch: /\[index.mode=time_series\] is incompatible with \[index.routing_partition_size\]/ + indices.create: + index: test_index + body: + settings: + index: + mode: time_series + shards: 5 + routing_partition_size: 2 diff --git a/server/build.gradle b/server/build.gradle index c5951a0447d5a..80ef95163e7fe 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -27,6 +27,7 @@ dependencies { api project(':libs:elasticsearch-secure-sm') api project(':libs:elasticsearch-x-content') api project(":libs:elasticsearch-geo") + api project(":libs:elasticsearch-lz4") implementation project(':libs:elasticsearch-plugin-classloader') @@ -50,9 +51,6 @@ dependencies { api project(":libs:elasticsearch-cli") api 'com.carrotsearch:hppc:0.8.1' - // LZ4 - api 'org.lz4:lz4-java:1.8.0' - // time handling, remove with java 8 time api "joda-time:joda-time:${versions.joda}" @@ -249,11 +247,6 @@ tasks.named("thirdPartyAudit").configure { 'com.google.common.geometry.S2LatLng' ) ignoreMissingClasses 'javax.xml.bind.DatatypeConverter' - - ignoreViolations( - // from java-lz4 - 'net.jpountz.util.UnsafeUtils' - ) } tasks.named("dependencyLicenses").configure { diff --git a/server/licenses/lucene-analyzers-common-8.10.0-snapshot-bf2fcb53079.jar.sha1 b/server/licenses/lucene-analyzers-common-8.10.0-snapshot-bf2fcb53079.jar.sha1 new file mode 100644 index 0000000000000..fdede7b9c4cdc --- /dev/null +++ b/server/licenses/lucene-analyzers-common-8.10.0-snapshot-bf2fcb53079.jar.sha1 @@ -0,0 +1 @@ +eb63f6ecd58a7e27a02b533b9c1e6cdb68f506fc \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.9.0.jar.sha1 b/server/licenses/lucene-analyzers-common-8.9.0.jar.sha1 deleted file mode 100644 index 8cd5ba872a31d..0000000000000 --- a/server/licenses/lucene-analyzers-common-8.9.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -18f3bbff2b7672ea0b9cc18c8110ef69c763ae6b \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.10.0-snapshot-bf2fcb53079.jar.sha1 b/server/licenses/lucene-backward-codecs-8.10.0-snapshot-bf2fcb53079.jar.sha1 new file mode 100644 index 0000000000000..d7b5d232b1d5b --- /dev/null +++ b/server/licenses/lucene-backward-codecs-8.10.0-snapshot-bf2fcb53079.jar.sha1 @@ -0,0 +1 @@ +2482a84e5e26a3eaf0bd7c5a77efc60435c7f688 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.9.0.jar.sha1 b/server/licenses/lucene-backward-codecs-8.9.0.jar.sha1 deleted file mode 100644 index 932c95dc8cfcb..0000000000000 --- a/server/licenses/lucene-backward-codecs-8.9.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fec88b5e71c699ceddc3ae0369481697ac9a5c96 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.10.0-snapshot-bf2fcb53079.jar.sha1 b/server/licenses/lucene-core-8.10.0-snapshot-bf2fcb53079.jar.sha1 new file mode 100644 index 0000000000000..2a546f6674e06 --- /dev/null +++ b/server/licenses/lucene-core-8.10.0-snapshot-bf2fcb53079.jar.sha1 @@ -0,0 +1 @@ +f33b45dbbce59e727e5a82dddab48c8c7681e25b \ No newline at end of file diff --git a/server/licenses/lucene-core-8.9.0.jar.sha1 b/server/licenses/lucene-core-8.9.0.jar.sha1 deleted file mode 100644 index 859cdb2013408..0000000000000 --- a/server/licenses/lucene-core-8.9.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5c3f72357089f7f0c1ef44bbe7b4c67b6149a5af \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.10.0-snapshot-bf2fcb53079.jar.sha1 b/server/licenses/lucene-grouping-8.10.0-snapshot-bf2fcb53079.jar.sha1 new file mode 100644 index 0000000000000..c39c757ef5945 --- /dev/null +++ b/server/licenses/lucene-grouping-8.10.0-snapshot-bf2fcb53079.jar.sha1 @@ -0,0 +1 @@ +4020eb4e53d759fa11819b8d6b6a49422f51abe8 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.9.0.jar.sha1 b/server/licenses/lucene-grouping-8.9.0.jar.sha1 deleted file mode 100644 index e28e73fb699bf..0000000000000 --- a/server/licenses/lucene-grouping-8.9.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9440fdd430b1c2dadbf3bc72656848d61e6f747f \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.10.0-snapshot-bf2fcb53079.jar.sha1 b/server/licenses/lucene-highlighter-8.10.0-snapshot-bf2fcb53079.jar.sha1 new file mode 100644 index 0000000000000..eda61949a36fd --- /dev/null +++ b/server/licenses/lucene-highlighter-8.10.0-snapshot-bf2fcb53079.jar.sha1 @@ -0,0 +1 @@ +b2c9682be68699860aeeb9b048665ab6cf259c7c \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.9.0.jar.sha1 b/server/licenses/lucene-highlighter-8.9.0.jar.sha1 deleted file mode 100644 index 111b023f9502c..0000000000000 --- a/server/licenses/lucene-highlighter-8.9.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -94e80bdeab170b0ce1b36a32b6a790d23d7f6d7b \ No newline at end of file diff --git a/server/licenses/lucene-join-8.10.0-snapshot-bf2fcb53079.jar.sha1 b/server/licenses/lucene-join-8.10.0-snapshot-bf2fcb53079.jar.sha1 new file mode 100644 index 0000000000000..60e4cb4bed007 --- /dev/null +++ b/server/licenses/lucene-join-8.10.0-snapshot-bf2fcb53079.jar.sha1 @@ -0,0 +1 @@ +be4f561159763de6a28d47b463438331a96c31c9 \ No newline at end of file diff --git a/server/licenses/lucene-join-8.9.0.jar.sha1 b/server/licenses/lucene-join-8.9.0.jar.sha1 deleted file mode 100644 index 9b8322851dfec..0000000000000 --- a/server/licenses/lucene-join-8.9.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ae97803efd3344597f6b6bdf823b18d130e8851 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.10.0-snapshot-bf2fcb53079.jar.sha1 b/server/licenses/lucene-memory-8.10.0-snapshot-bf2fcb53079.jar.sha1 new file mode 100644 index 0000000000000..0cb9423697434 --- /dev/null +++ b/server/licenses/lucene-memory-8.10.0-snapshot-bf2fcb53079.jar.sha1 @@ -0,0 +1 @@ +8fc0745c4a589cdfbd56d156236fd91dbab0dacb \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.9.0.jar.sha1 b/server/licenses/lucene-memory-8.9.0.jar.sha1 deleted file mode 100644 index bd7fbba668fc4..0000000000000 --- a/server/licenses/lucene-memory-8.9.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09423a6dca2a9ba665e644d86a713d9a6b2b0d3f \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.10.0-snapshot-bf2fcb53079.jar.sha1 b/server/licenses/lucene-misc-8.10.0-snapshot-bf2fcb53079.jar.sha1 new file mode 100644 index 0000000000000..4860f9a1e54ea --- /dev/null +++ b/server/licenses/lucene-misc-8.10.0-snapshot-bf2fcb53079.jar.sha1 @@ -0,0 +1 @@ +7089f903268271acf6eb668918ac51f0cba15213 \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.9.0.jar.sha1 b/server/licenses/lucene-misc-8.9.0.jar.sha1 deleted file mode 100644 index dd4e7baeb3c53..0000000000000 --- a/server/licenses/lucene-misc-8.9.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -067494d621ba2ef1f2e4da3ef167106f00b52051 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.10.0-snapshot-bf2fcb53079.jar.sha1 b/server/licenses/lucene-queries-8.10.0-snapshot-bf2fcb53079.jar.sha1 new file mode 100644 index 0000000000000..0f6f657b0747b --- /dev/null +++ b/server/licenses/lucene-queries-8.10.0-snapshot-bf2fcb53079.jar.sha1 @@ -0,0 +1 @@ +35a4945ac05c2aeb0c9e244098827fd7aeea1858 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.9.0.jar.sha1 b/server/licenses/lucene-queries-8.9.0.jar.sha1 deleted file mode 100644 index 682553f877600..0000000000000 --- a/server/licenses/lucene-queries-8.9.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c6bda4622abf240da6567a128242f46708fa6c00 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.10.0-snapshot-bf2fcb53079.jar.sha1 b/server/licenses/lucene-queryparser-8.10.0-snapshot-bf2fcb53079.jar.sha1 new file mode 100644 index 0000000000000..48ccf9f681ce5 --- /dev/null +++ b/server/licenses/lucene-queryparser-8.10.0-snapshot-bf2fcb53079.jar.sha1 @@ -0,0 +1 @@ +3d0929b7a5a2ba7f83d0357553f240f6d8362446 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.9.0.jar.sha1 b/server/licenses/lucene-queryparser-8.9.0.jar.sha1 deleted file mode 100644 index d1978b318fd67..0000000000000 --- a/server/licenses/lucene-queryparser-8.9.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -95a9d8cf8ca8eaf9f241fd323697d26d211721b2 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.10.0-snapshot-bf2fcb53079.jar.sha1 b/server/licenses/lucene-sandbox-8.10.0-snapshot-bf2fcb53079.jar.sha1 new file mode 100644 index 0000000000000..062d28281a90a --- /dev/null +++ b/server/licenses/lucene-sandbox-8.10.0-snapshot-bf2fcb53079.jar.sha1 @@ -0,0 +1 @@ +d1d6696a4857bb580f6fc4a93bd3307effddd736 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.9.0.jar.sha1 b/server/licenses/lucene-sandbox-8.9.0.jar.sha1 deleted file mode 100644 index 9e0539ea7ae5d..0000000000000 --- a/server/licenses/lucene-sandbox-8.9.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af9f6c0287465e17a520b93b684474712433b293 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.10.0-snapshot-bf2fcb53079.jar.sha1 b/server/licenses/lucene-spatial-extras-8.10.0-snapshot-bf2fcb53079.jar.sha1 new file mode 100644 index 0000000000000..f7c69cd9622e3 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-8.10.0-snapshot-bf2fcb53079.jar.sha1 @@ -0,0 +1 @@ +bc52ac3d5fed41fde8b7ad95c7d5ce703b90377f \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.9.0.jar.sha1 b/server/licenses/lucene-spatial-extras-8.9.0.jar.sha1 deleted file mode 100644 index cedc910adb51b..0000000000000 --- a/server/licenses/lucene-spatial-extras-8.9.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c89f4e78712806e8d5bb4adfb21cf0722ad3f175 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.10.0-snapshot-bf2fcb53079.jar.sha1 b/server/licenses/lucene-spatial3d-8.10.0-snapshot-bf2fcb53079.jar.sha1 new file mode 100644 index 0000000000000..bacaf78b1aec2 --- /dev/null +++ b/server/licenses/lucene-spatial3d-8.10.0-snapshot-bf2fcb53079.jar.sha1 @@ -0,0 +1 @@ +57d8cc36815cf45eb16d43648c8d2a5b251b4e62 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.9.0.jar.sha1 b/server/licenses/lucene-spatial3d-8.9.0.jar.sha1 deleted file mode 100644 index f0c08e6e6bd29..0000000000000 --- a/server/licenses/lucene-spatial3d-8.9.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0d1238c4e8bf4409b3bb3fbddf2e977b0f19b24b \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.10.0-snapshot-bf2fcb53079.jar.sha1 b/server/licenses/lucene-suggest-8.10.0-snapshot-bf2fcb53079.jar.sha1 new file mode 100644 index 0000000000000..90ade912a8369 --- /dev/null +++ b/server/licenses/lucene-suggest-8.10.0-snapshot-bf2fcb53079.jar.sha1 @@ -0,0 +1 @@ +17e5a74d57ecb861e93c3cfbf4feb356a0571bbf \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.9.0.jar.sha1 b/server/licenses/lucene-suggest-8.9.0.jar.sha1 deleted file mode 100644 index f6f8bb83d66ae..0000000000000 --- a/server/licenses/lucene-suggest-8.9.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98cb69950b48b829b6605a003c99aa7eb86fa9eb \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java index 891ca7061e285..f7a00f85c6e78 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/HotThreadsIT.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRequestBuilder; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.monitor.jvm.HotThreads; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matcher; @@ -66,7 +67,7 @@ public void testHotThreadsDontFail() throws ExecutionException, InterruptedExcep break; } assertThat(type, notNullValue()); - nodesHotThreadsRequestBuilder.setType(type); + nodesHotThreadsRequestBuilder.setType(HotThreads.ReportType.of(type)); } else { type = null; } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java index 4713bc9a5fb3a..39c7f1535a97b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java @@ -331,7 +331,9 @@ public void testRemoveBanParentsOnDisconnect() throws Exception { if (bannedParent.getNodeId().equals(node.getId()) && randomBoolean()) { Collection childConns = taskManager.startBanOnChildTasks(bannedParent.getId(), "", () -> {}); for (Transport.Connection connection : randomSubsetOf(childConns)) { - connection.close(); + if (connection.getNode().equals(node) == false) { + connection.close(); + } } } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java index 0490fe44532fa..64817d7bd3aaa 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java @@ -35,7 +35,6 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import java.util.stream.StreamSupport; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures; @@ -159,8 +158,8 @@ public void testRelocation() throws Exception { assertNoFailures(resp); assertHitCount(resp, numDocs); assertThat(resp.pointInTimeId(), equalTo(pitId)); - final Set dataNodes = StreamSupport.stream(clusterService().state().nodes().getDataNodes().spliterator(), false) - .map(e -> e.value.getId()).collect(Collectors.toSet()); + final Set dataNodes = clusterService().state().nodes().getDataNodes().stream() + .map(e -> e.getValue().getId()).collect(Collectors.toSet()); final List excludedNodes = randomSubsetOf(2, dataNodes); assertAcked(client().admin().indices().prepareUpdateSettings("test") .setSettings(Settings.builder().put("index.routing.allocation.exclude._id", String.join(",", excludedNodes)).build())); @@ -313,9 +312,8 @@ public void testCanMatch() throws Exception { public void testPartialResults() throws Exception { internalCluster().ensureAtLeastNumDataNodes(2); - final List dataNodes = - StreamSupport.stream(internalCluster().clusterService().state().nodes().getDataNodes().spliterator(), false) - .map(e -> e.value.getName()) + final List dataNodes = internalCluster().clusterService().state().nodes().getDataNodes().stream() + .map(e -> e.getValue().getName()) .collect(Collectors.toList()); final String assignedNodeForIndex1 = randomFrom(dataNodes); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index 5a923f4f44d0a..0debeb18a900f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -97,7 +97,7 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { .execute().actionGet(); if (randomBoolean()) { - client().admin().indices().prepareClose("test").get(); + client().admin().indices().prepareClose("test").setWaitForActiveShards(ActiveShardCount.NONE).get(); } ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState(); @@ -232,7 +232,7 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc final boolean closed = randomBoolean(); if (closed) { - client().admin().indices().prepareClose("test").get(); + client().admin().indices().prepareClose("test").setWaitForActiveShards(ActiveShardCount.NONE).get(); } ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java index e80490f5ed565..8a9b699696bfb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/ZenDiscoveryIT.java @@ -9,9 +9,11 @@ package org.elasticsearch.cluster.coordination; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -29,16 +31,14 @@ import java.util.EnumSet; import java.util.Optional; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import static org.elasticsearch.test.NodeRoles.dataNode; import static org.elasticsearch.test.NodeRoles.masterOnlyNode; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; @@ -77,8 +77,7 @@ public void testNoShardRelocationsOccurWhenElectedMasterNodeFails() throws Excep assertThat(numRecoveriesAfterNewMaster, equalTo(numRecoveriesBeforeNewMaster)); } - public void testHandleNodeJoin_incompatibleClusterState() - throws InterruptedException, ExecutionException, TimeoutException { + public void testHandleNodeJoin_incompatibleClusterState() { String masterNode = internalCluster().startMasterOnlyNode(); String node1 = internalCluster().startNode(); ClusterService clusterService = internalCluster().getInstance(ClusterService.class, node1); @@ -88,28 +87,29 @@ public void testHandleNodeJoin_incompatibleClusterState() mdBuilder.putCustom(CustomMetadata.TYPE, new CustomMetadata("data")); ClusterState stateWithCustomMetadata = ClusterState.builder(state).metadata(mdBuilder).build(); - final CompletableFuture future = new CompletableFuture<>(); - DiscoveryNode node = state.nodes().getLocalNode(); - - coordinator.sendValidateJoinRequest(stateWithCustomMetadata, new JoinRequest(node, 0L, Optional.empty()), - new JoinHelper.JoinCallback() { - @Override - public void onSuccess() { - future.completeExceptionally(new AssertionError("onSuccess should not be called")); - } - - @Override - public void onFailure(Exception e) { - future.complete(e); - } - }); - - Throwable t = future.get(10, TimeUnit.SECONDS); - - assertTrue(t instanceof IllegalStateException); - assertTrue(t.getCause() instanceof RemoteTransportException); - assertTrue(t.getCause().getCause() instanceof IllegalArgumentException); - assertThat(t.getCause().getCause().getMessage(), containsString("Unknown NamedWriteable")); + final PlainActionFuture future = new PlainActionFuture<>(); + final DiscoveryNode node = state.nodes().getLocalNode(); + + coordinator.sendValidateJoinRequest( + stateWithCustomMetadata, + new JoinRequest(node, 0L, Optional.empty()), + new ActionListener<>() { + @Override + public void onResponse(Void unused) { + fail("onResponse should not be called"); + } + + @Override + public void onFailure(Exception t) { + assertThat(t, instanceOf(IllegalStateException.class)); + assertThat(t.getCause(), instanceOf(RemoteTransportException.class)); + assertThat(t.getCause().getCause(), instanceOf(IllegalArgumentException.class)); + assertThat(t.getCause().getCause().getMessage(), containsString("Unknown NamedWriteable")); + future.onResponse(null); + } + }); + + future.actionGet(10, TimeUnit.SECONDS); } public static class CustomMetadata extends TestCustomMetadata { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index 042d2e9108911..5b664be2ec3b1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -16,19 +16,22 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.coordination.ClusterBootstrapService; +import org.elasticsearch.cluster.coordination.FollowersChecker; import org.elasticsearch.cluster.coordination.LagDetector; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.Murmur3HashFunction; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTestCase; @@ -40,6 +43,8 @@ import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.junit.annotations.TestIssueLogging; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.TransportService; import java.util.ArrayList; import java.util.Collections; @@ -59,6 +64,11 @@ import static org.elasticsearch.action.DocWriteResponse.Result.CREATED; import static org.elasticsearch.action.DocWriteResponse.Result.UPDATED; +import static org.elasticsearch.cluster.coordination.FollowersChecker.FOLLOWER_CHECK_INTERVAL_SETTING; +import static org.elasticsearch.cluster.coordination.FollowersChecker.FOLLOWER_CHECK_RETRY_COUNT_SETTING; +import static org.elasticsearch.cluster.coordination.LeaderChecker.LEADER_CHECK_INTERVAL_SETTING; +import static org.elasticsearch.cluster.coordination.LeaderChecker.LEADER_CHECK_RETRY_COUNT_SETTING; +import static org.elasticsearch.discovery.PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; @@ -494,4 +504,63 @@ public void testRestartNodeWhileIndexing() throws Exception { } } + public void testRejoinWhileBeingRemoved() { + final String masterNode = internalCluster().startMasterOnlyNode(Settings.builder() + .put(FOLLOWER_CHECK_INTERVAL_SETTING.getKey(), "100ms") + .put(FOLLOWER_CHECK_RETRY_COUNT_SETTING.getKey(), "1") + .build()); + final String dataNode = internalCluster().startDataOnlyNode(Settings.builder() + .put(DISCOVERY_FIND_PEERS_INTERVAL_SETTING.getKey(), "100ms") + .put(LEADER_CHECK_INTERVAL_SETTING.getKey(), "100ms") + .put(LEADER_CHECK_RETRY_COUNT_SETTING.getKey(), "1") + .build()); + + final ClusterService masterClusterService = internalCluster().getInstance(ClusterService.class, masterNode); + final PlainActionFuture removedNode = new PlainActionFuture<>(); + masterClusterService.addListener(clusterChangedEvent -> { + if (removedNode.isDone() == false && clusterChangedEvent.state().nodes().getDataNodes().isEmpty()) { + removedNode.onResponse(null); + } + }); + + final ClusterService dataClusterService = internalCluster().getInstance(ClusterService.class, dataNode); + final PlainActionFuture failedLeader = new PlainActionFuture<>() { + @Override + protected boolean blockingAllowed() { + // we're deliberately blocking the cluster applier on the master until the data node starts to rejoin + return true; + } + }; + final AtomicBoolean dataNodeHasMaster = new AtomicBoolean(true); + dataClusterService.addListener(clusterChangedEvent -> { + dataNodeHasMaster.set(clusterChangedEvent.state().nodes().getMasterNode() != null); + if (failedLeader.isDone() == false && dataNodeHasMaster.get() == false) { + failedLeader.onResponse(null); + } + }); + + masterClusterService.addHighPriorityApplier(event -> { + failedLeader.actionGet(); + if (dataNodeHasMaster.get() == false) { + try { + Thread.sleep(100); + } catch (InterruptedException e) { + throw new AssertionError("unexpected", e); + } + } + }); + + final MockTransportService dataTransportService + = (MockTransportService) internalCluster().getInstance(TransportService.class, dataNode); + dataTransportService.addRequestHandlingBehavior(FollowersChecker.FOLLOWER_CHECK_ACTION_NAME, (handler, request, channel, task) -> { + if (removedNode.isDone() == false) { + channel.sendResponse(new ElasticsearchException("simulated check failure")); + } else { + handler.messageReceived(request, channel, task); + } + }); + + removedNode.actionGet(10, TimeUnit.SECONDS); + ensureStableCluster(2); + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java index e62a9a9ea0183..a77412461db6c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java @@ -154,6 +154,7 @@ public void testElectMasterWithLatestVersion() throws Exception { * sure that the node is removed form the cluster, that the node start pinging and that * the cluster reforms when healed. */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/77751") public void testNodeNotReachableFromMaster() throws Exception { startCluster(3); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java index 2f90c28d509b7..bf4e095c5d436 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -66,6 +66,7 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.InternalSettingsPlugin; +import org.elasticsearch.test.VersionUtils; import org.junit.Assert; import java.io.IOException; @@ -116,6 +117,11 @@ protected Collection> getPlugins() { return pluginList(InternalSettingsPlugin.class); } + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + public void testLockTryingToDelete() throws Exception { createIndex("test"); ensureGreen(); @@ -214,8 +220,11 @@ public void testIndexDirIsDeletedWhenShardRemoved() throws Exception { Environment env = getInstanceFromNode(Environment.class); Path idxPath = env.sharedDataFile().resolve(randomAlphaOfLength(10)); logger.info("--> idxPath: [{}]", idxPath); + Version createdVersion = + VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), VersionUtils.getPreviousVersion(Version.V_8_0_0)); Settings idxSettings = Settings.builder() .put(IndexMetadata.SETTING_DATA_PATH, idxPath) + .put(IndexMetadata.SETTING_VERSION_CREATED, createdVersion) .build(); createIndex("test", idxSettings); ensureGreen("test"); @@ -253,8 +262,14 @@ public void testIndexCanChangeCustomDataPath() throws Exception { final Path sharedDataPath = getInstanceFromNode(Environment.class).sharedDataFile().resolve(randomAsciiLettersOfLength(10)); final Path indexDataPath = sharedDataPath.resolve("start-" + randomAsciiLettersOfLength(10)); - logger.info("--> creating index [{}] with data_path [{}]", index, indexDataPath); - createIndex(index, Settings.builder().put(IndexMetadata.SETTING_DATA_PATH, indexDataPath.toAbsolutePath().toString()).build()); + logger.info("--> creating legacy index [{}] with data_path [{}]", index, indexDataPath); + Version createdVersion = + VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), VersionUtils.getPreviousVersion(Version.V_8_0_0)); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_DATA_PATH, indexDataPath.toAbsolutePath().toString()) + .put(IndexMetadata.SETTING_VERSION_CREATED, createdVersion) + .build(); + createIndex(index, settings); client().prepareIndex(index).setId("1").setSource("foo", "bar").setRefreshPolicy(IMMEDIATE).get(); ensureGreen(index); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 0cb870a101fa9..8011f48ef80bd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -54,9 +54,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.gateway.ReplicaShardAllocatorIT; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; @@ -124,7 +124,6 @@ import java.util.function.Consumer; import java.util.stream.Collectors; import java.util.stream.IntStream; -import java.util.stream.StreamSupport; import static java.util.Collections.singletonMap; import static java.util.stream.Collectors.toList; @@ -1283,8 +1282,8 @@ public void testOngoingRecoveryAndMasterFailOver() throws Exception { public void testRecoverLocallyUpToGlobalCheckpoint() throws Exception { internalCluster().ensureAtLeastNumDataNodes(2); - List nodes = randomSubsetOf(2, StreamSupport.stream(clusterService().state().nodes().getDataNodes().spliterator(), false) - .map(node -> node.value.getName()).collect(Collectors.toSet())); + List nodes = randomSubsetOf(2, clusterService().state().nodes().getDataNodes().stream() + .map(node -> node.getValue().getName()).collect(Collectors.toSet())); String indexName = "test-index"; createIndex(indexName, Settings.builder() .put("index.number_of_shards", 1) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index dc2b7ab0303b7..1fe526e4b8cab 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -9,6 +9,7 @@ package org.elasticsearch.indices.store; import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.cluster.ClusterState; @@ -23,7 +24,6 @@ import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; -import org.elasticsearch.cluster.service.ClusterApplier.ClusterApplyListener; import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; @@ -430,9 +430,9 @@ public void testShardActiveElseWhere() throws Exception { .routingTable(RoutingTable.builder().add(indexRoutingTableBuilder).build()) .build(); CountDownLatch latch = new CountDownLatch(1); - clusterApplierService.onNewClusterState("test", () -> newState, new ClusterApplyListener() { + clusterApplierService.onNewClusterState("test", () -> newState, new ActionListener<>() { @Override - public void onSuccess() { + public void onResponse(Void ignored) { latch.countDown(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java index a936e04f2d7bb..825fc0573b5e7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java @@ -124,6 +124,8 @@ public void testGetShardSnapshotReturnsTheLatestSuccessfulSnapshot() throws Exce List indices = List.of(indexName, indexName2); createIndex(indexName, indexName2); SnapshotInfo lastSnapshot = null; + String expectedIndexMetadataId = null; + int numSnapshots = randomIntBetween(5, 25); for (int i = 0; i < numSnapshots; i++) { if (randomBoolean()) { @@ -134,6 +136,9 @@ public void testGetShardSnapshotReturnsTheLatestSuccessfulSnapshot() throws Exce final SnapshotInfo snapshotInfo = createSnapshot(repoName, String.format(Locale.ROOT, "snap-%03d", i), snapshotIndices); if (snapshotInfo.indices().contains(indexName)) { lastSnapshot = snapshotInfo; + ClusterStateResponse clusterStateResponse = admin().cluster().prepareState().execute().actionGet(); + IndexMetadata indexMetadata = clusterStateResponse.getState().metadata().index(indexName); + expectedIndexMetadataId = IndexMetaDataGenerations.buildUniqueIdentifier(indexMetadata); } } @@ -151,10 +156,7 @@ public void testGetShardSnapshotReturnsTheLatestSuccessfulSnapshot() throws Exce final ShardSnapshotInfo shardSnapshotInfo = indexShardSnapshotInfoOpt.get(); - final ClusterStateResponse clusterStateResponse = admin().cluster().prepareState().execute().actionGet(); - final IndexMetadata indexMetadata = clusterStateResponse.getState().metadata().index(indexName); - final String indexMetadataId = IndexMetaDataGenerations.buildUniqueIdentifier(indexMetadata); - assertThat(shardSnapshotInfo.getIndexMetadataIdentifier(), equalTo(indexMetadataId)); + assertThat(shardSnapshotInfo.getIndexMetadataIdentifier(), equalTo(expectedIndexMetadataId)); final Snapshot snapshot = shardSnapshotInfo.getSnapshot(); assertThat(snapshot, equalTo(lastSnapshot.snapshot())); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java index d7a1d63311c1c..a9e3c00e57e4d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketIT.java @@ -8,393 +8,43 @@ package org.elasticsearch.search.aggregations.pipeline; -import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.search.aggregations.BucketOrder; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; -import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude; -import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.Sum; -import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.test.ESIntegTestCase; +import java.util.function.Function; +import java.util.function.IntToDoubleFunction; -import java.util.ArrayList; -import java.util.List; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; -import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; -import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.avgBucket; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.core.IsNull.notNullValue; - -@ESIntegTestCase.SuiteScopeTestCase -public class AvgBucketIT extends ESIntegTestCase { - - private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; - static int numDocs; - static int interval; - static int minRandomValue; - static int maxRandomValue; - static int numValueBuckets; - static long[] valueCounts; +public class AvgBucketIT extends BucketMetricsPipeLineAggregationTestCase { @Override - public void setupSuiteScopeCluster() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); - createIndex("idx_unmapped"); - - numDocs = randomIntBetween(6, 20); - interval = randomIntBetween(2, 5); - - minRandomValue = 0; - maxRandomValue = 20; - - numValueBuckets = ((maxRandomValue - minRandomValue) / interval) + 1; - valueCounts = new long[numValueBuckets]; - - List builders = new ArrayList<>(); - - for (int i = 0; i < numDocs; i++) { - int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); - builders.add( - client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, fieldValue) - .field("tag", "tag" + (i % interval)) - .endObject() - ) - ); - final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); - valueCounts[bucket]++; - } - - assertAcked(prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer")); - for (int i = 0; i < 2; i++) { - builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) - ); - } - indexRandom(true, builders); - ensureSearchable(); + protected AvgBucketPipelineAggregationBuilder BucketMetricsPipelineAgg(String name, String bucketsPath) { + return avgBucket(name, bucketsPath); } - public void testDocCountTopLevel() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) - ) - .addAggregation(avgBucket("avg_bucket", "histo>_count")) - .get(); - - assertSearchResponse(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); - + @Override + protected void assertResult( + IntToDoubleFunction bucketValues, + Function bucketKeys, + int numBuckets, + InternalSimpleValue pipelineBucket + ) { double sum = 0; int count = 0; - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + for (int i = 0; i < numBuckets; ++i) { count++; - sum += bucket.getDocCount(); + sum += bucketValues.applyAsDouble(i); } - double avgValue = count == 0 ? Double.NaN : (sum / count); - InternalSimpleValue avgBucketValue = response.getAggregations().get("avg_bucket"); - assertThat(avgBucketValue, notNullValue()); - assertThat(avgBucketValue.getName(), equalTo("avg_bucket")); - assertThat(avgBucketValue.value(), equalTo(avgValue)); - } - - public void testDocCountAsSubAgg() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) - ) - .subAggregation(avgBucket("avg_bucket", "histo>_count")) - ) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - double sum = 0; - int count = 0; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - count++; - sum += bucket.getDocCount(); - } - - double avgValue = count == 0 ? Double.NaN : (sum / count); - InternalSimpleValue avgBucketValue = termsBucket.getAggregations().get("avg_bucket"); - assertThat(avgBucketValue, notNullValue()); - assertThat(avgBucketValue.getName(), equalTo("avg_bucket")); - assertThat(avgBucketValue.value(), equalTo(avgValue)); - } - } - - public void testMetricTopLevel() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(avgBucket("avg_bucket", "terms>sum")) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(interval)); - - double bucketSum = 0; - int count = 0; - for (int i = 0; i < interval; ++i) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("tag" + (i % interval))); - assertThat(bucket.getDocCount(), greaterThan(0L)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - count++; - bucketSum += sum.value(); - } - - double avgValue = count == 0 ? Double.NaN : (bucketSum / count); - InternalSimpleValue avgBucketValue = response.getAggregations().get("avg_bucket"); - assertThat(avgBucketValue, notNullValue()); - assertThat(avgBucketValue.getName(), equalTo("avg_bucket")); - assertThat(avgBucketValue.value(), equalTo(avgValue)); + assertThat(pipelineBucket.value(), equalTo(avgValue)); } - public void testMetricAsSubAgg() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ) - .subAggregation(avgBucket("avg_bucket", "histo>sum")) - ) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - double bucketSum = 0; - int count = 0; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - if (bucket.getDocCount() != 0) { - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - count++; - bucketSum += sum.value(); - } - } - - double avgValue = count == 0 ? Double.NaN : (bucketSum / count); - InternalSimpleValue avgBucketValue = termsBucket.getAggregations().get("avg_bucket"); - assertThat(avgBucketValue, notNullValue()); - assertThat(avgBucketValue.getName(), equalTo("avg_bucket")); - assertThat(avgBucketValue.value(), equalTo(avgValue)); - } - } - - public void testMetricAsSubAggWithInsertZeros() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ) - .subAggregation(avgBucket("avg_bucket", "histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS)) - ) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - double bucketSum = 0; - int count = 0; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - - count++; - bucketSum += sum.value(); - } - - double avgValue = count == 0 ? Double.NaN : (bucketSum / count); - InternalSimpleValue avgBucketValue = termsBucket.getAggregations().get("avg_bucket"); - assertThat(avgBucketValue, notNullValue()); - assertThat(avgBucketValue.getName(), equalTo("avg_bucket")); - assertThat(avgBucketValue.value(), equalTo(avgValue)); - } - } - - public void testNoBuckets() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .includeExclude(new IncludeExclude(null, "tag.*")) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ) - .addAggregation(avgBucket("avg_bucket", "terms>sum")) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(0)); - - InternalSimpleValue avgBucketValue = response.getAggregations().get("avg_bucket"); - assertThat(avgBucketValue, notNullValue()); - assertThat(avgBucketValue.getName(), equalTo("avg_bucket")); - assertThat(avgBucketValue.value(), equalTo(Double.NaN)); + @Override + protected String nestedMetric() { + return "value"; } - public void testNested() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) - ) - .subAggregation(avgBucket("avg_histo_bucket", "histo>_count")) - ) - .addAggregation(avgBucket("avg_terms_bucket", "terms>avg_histo_bucket")) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - double aggTermsSum = 0; - int aggTermsCount = 0; - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - double aggHistoSum = 0; - int aggHistoCount = 0; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - - aggHistoCount++; - aggHistoSum += bucket.getDocCount(); - } - - double avgHistoValue = aggHistoCount == 0 ? Double.NaN : (aggHistoSum / aggHistoCount); - InternalSimpleValue avgBucketValue = termsBucket.getAggregations().get("avg_histo_bucket"); - assertThat(avgBucketValue, notNullValue()); - assertThat(avgBucketValue.getName(), equalTo("avg_histo_bucket")); - assertThat(avgBucketValue.value(), equalTo(avgHistoValue)); - - aggTermsCount++; - aggTermsSum += avgHistoValue; - } - - double avgTermsValue = aggTermsCount == 0 ? Double.NaN : (aggTermsSum / aggTermsCount); - InternalSimpleValue avgBucketValue = response.getAggregations().get("avg_terms_bucket"); - assertThat(avgBucketValue, notNullValue()); - assertThat(avgBucketValue.getName(), equalTo("avg_terms_bucket")); - assertThat(avgBucketValue.value(), equalTo(avgTermsValue)); + @Override + protected double getNestedMetric(InternalSimpleValue bucket) { + return bucket.value(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipeLineAggregationTestCase.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipeLineAggregationTestCase.java new file mode 100644 index 0000000000000..8c9c1acd19a2b --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipeLineAggregationTestCase.java @@ -0,0 +1,495 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.aggregations.pipeline; + +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; +import org.elasticsearch.search.aggregations.metrics.Sum; +import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Function; +import java.util.function.IntToDoubleFunction; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.core.IsNull.notNullValue; + +@ESIntegTestCase.SuiteScopeTestCase +abstract class BucketMetricsPipeLineAggregationTestCase extends ESIntegTestCase { + + static final String SINGLE_VALUED_FIELD_NAME = "l_value"; + + static int numDocs; + static int interval; + static int minRandomValue; + static int maxRandomValue; + static int numValueBuckets; + static long[] valueCounts; + + static String histoName; + static String termsName; + + /** Creates the pipeline aggregation to test */ + protected abstract BucketMetricsPipelineAggregationBuilder BucketMetricsPipelineAgg(String name, String bucketsPath); + + /** Checks that the provided bucket values and keys agree with the result of the pipeline aggregation */ + protected abstract void assertResult( + IntToDoubleFunction bucketValues, + Function bucketKeys, + int numValues, + T pipelineBucket + ); + + /** Nested metric from the pipeline aggregation to test. This metric is added to the end of the bucket path*/ + protected abstract String nestedMetric(); + + /** Extract the value of the nested metric provided in {@link #nestedMetric()} */ + protected abstract double getNestedMetric(T bucket); + + @Override + public void setupSuiteScopeCluster() throws Exception { + assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); + createIndex("idx_unmapped"); + + numDocs = randomIntBetween(6, 20); + interval = randomIntBetween(2, 5); + + minRandomValue = 0; + maxRandomValue = 20; + + numValueBuckets = ((maxRandomValue - minRandomValue) / interval) + 1; + valueCounts = new long[numValueBuckets]; + + List builders = new ArrayList<>(); + + for (int i = 0; i < numDocs; i++) { + int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); + builders.add( + client().prepareIndex("idx") + .setSource( + jsonBuilder().startObject() + .field(SINGLE_VALUED_FIELD_NAME, fieldValue) + .field("tag", "tag" + (i % interval)) + .endObject() + ) + ); + final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); + valueCounts[bucket]++; + } + + assertAcked(prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer")); + for (int i = 0; i < 2; i++) { + builders.add( + client().prepareIndex("empty_bucket_idx") + .setId("" + i) + .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) + ); + } + indexRandom(true, builders); + ensureSearchable(); + histoName = randomName(); + termsName = randomName(); + } + + private String randomName() { + return randomBoolean() + ? randomAlphaOfLengthBetween(3, 12) + : randomAlphaOfLengthBetween(3, 6) + "." + randomAlphaOfLengthBetween(3, 6); + } + + public void testDocCountTopLevel() { + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + histogram(histoName).field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) + ) + .addAggregation(BucketMetricsPipelineAgg("pipeline_agg", histoName + ">_count")) + .get(); + + assertSearchResponse(response); + + Histogram histo = response.getAggregations().get(histoName); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo(histoName)); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + } + + T pipelineBucket = response.getAggregations().get("pipeline_agg"); + assertThat(pipelineBucket, notNullValue()); + assertThat(pipelineBucket.getName(), equalTo("pipeline_agg")); + + assertResult((i) -> buckets.get(i).getDocCount(), (i) -> buckets.get(i).getKeyAsString(), numValueBuckets, pipelineBucket); + } + + public void testDocCountAsSubAgg() { + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms(termsName).field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram(histoName).field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(minRandomValue, maxRandomValue) + ) + .subAggregation(BucketMetricsPipelineAgg("pipeline_agg", histoName + ">_count")) + ) + .get(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get(termsName); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo(termsName)); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get(histoName); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo(histoName)); + List buckets = histo.getBuckets(); + + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + } + + T pipelineBucket = termsBucket.getAggregations().get("pipeline_agg"); + assertThat(pipelineBucket, notNullValue()); + assertThat(pipelineBucket.getName(), equalTo("pipeline_agg")); + + assertResult((k) -> buckets.get(k).getDocCount(), (k) -> buckets.get(k).getKeyAsString(), numValueBuckets, pipelineBucket); + } + } + + public void testMetricTopLevel() { + SearchResponse response = client().prepareSearch("idx") + .addAggregation(terms(termsName).field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .addAggregation(BucketMetricsPipelineAgg("pipeline_agg", termsName + ">sum")) + .get(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get(termsName); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo(termsName)); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(interval)); + + for (int i = 0; i < interval; ++i) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("tag" + (i % interval))); + assertThat(bucket.getDocCount(), greaterThan(0L)); + } + + T pipelineBucket = response.getAggregations().get("pipeline_agg"); + assertThat(pipelineBucket, notNullValue()); + assertThat(pipelineBucket.getName(), equalTo("pipeline_agg")); + + IntToDoubleFunction function = (i) -> { + Sum sum = buckets.get(i).getAggregations().get("sum"); + assertThat(sum, notNullValue()); + return sum.value(); + }; + assertResult(function, (i) -> buckets.get(i).getKeyAsString(), interval, pipelineBucket); + } + + public void testMetricAsSubAgg() { + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms(termsName).field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram(histoName).field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(minRandomValue, maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .subAggregation(BucketMetricsPipelineAgg("pipeline_agg", histoName + ">sum")) + ) + .get(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get(termsName); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo(termsName)); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get(histoName); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo(histoName)); + List buckets = histo.getBuckets(); + + List notNullBuckets = new ArrayList<>(); + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + if (bucket.getDocCount() != 0) { + notNullBuckets.add(bucket); + } + } + + T pipelineBucket = termsBucket.getAggregations().get("pipeline_agg"); + assertThat(pipelineBucket, notNullValue()); + assertThat(pipelineBucket.getName(), equalTo("pipeline_agg")); + + IntToDoubleFunction function = (k) -> { + Sum sum = notNullBuckets.get(k).getAggregations().get("sum"); + assertThat(sum, notNullValue()); + return sum.value(); + }; + assertResult(function, (k) -> notNullBuckets.get(k).getKeyAsString(), notNullBuckets.size(), pipelineBucket); + } + } + + public void testMetricAsSubAggWithInsertZeros() { + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms(termsName).field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram(histoName).field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(minRandomValue, maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .subAggregation( + BucketMetricsPipelineAgg("pipeline_agg", histoName + ">sum").gapPolicy(BucketHelpers.GapPolicy.INSERT_ZEROS) + ) + ) + .get(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get(termsName); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo(termsName)); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get(histoName); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo(histoName)); + List buckets = histo.getBuckets(); + + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + } + + T pipelineBucket = termsBucket.getAggregations().get("pipeline_agg"); + assertThat(pipelineBucket, notNullValue()); + assertThat(pipelineBucket.getName(), equalTo("pipeline_agg")); + + IntToDoubleFunction function = (k) -> { + Sum sum = buckets.get(k).getAggregations().get("sum"); + assertThat(sum, notNullValue()); + return sum.value(); + }; + assertResult(function, (k) -> buckets.get(k).getKeyAsString(), numValueBuckets, pipelineBucket); + } + } + + public void testNoBuckets() { + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms(termsName).field("tag") + .includeExclude(new IncludeExclude(null, "tag.*")) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + ) + .addAggregation(BucketMetricsPipelineAgg("pipeline_agg", termsName + ">sum")) + .get(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get(termsName); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo(termsName)); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(0)); + + T pipelineBucket = response.getAggregations().get("pipeline_agg"); + assertThat(pipelineBucket, notNullValue()); + assertThat(pipelineBucket.getName(), equalTo("pipeline_agg")); + + assertResult((k) -> 0.0, (k) -> "", 0, pipelineBucket); + } + + public void testNested() { + SearchResponse response = client().prepareSearch("idx") + .addAggregation( + terms(termsName).field("tag") + .order(BucketOrder.key(true)) + .subAggregation( + histogram(histoName).field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(minRandomValue, maxRandomValue) + ) + .subAggregation(BucketMetricsPipelineAgg("nested_histo_bucket", histoName + ">_count")) + ) + .addAggregation(BucketMetricsPipelineAgg("nested_terms_bucket", termsName + ">nested_histo_bucket." + nestedMetric())) + .get(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get(termsName); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo(termsName)); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + List allBuckets = new ArrayList<>(); + List nestedTags = new ArrayList<>(); + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get(histoName); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo(histoName)); + List buckets = histo.getBuckets(); + + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + } + + T pipelineBucket = termsBucket.getAggregations().get("nested_histo_bucket"); + assertThat(pipelineBucket, notNullValue()); + assertThat(pipelineBucket.getName(), equalTo("nested_histo_bucket")); + + assertResult((k) -> buckets.get(k).getDocCount(), (k) -> buckets.get(k).getKeyAsString(), numValueBuckets, pipelineBucket); + allBuckets.add(pipelineBucket); + nestedTags.add(termsBucket.getKeyAsString()); + } + + T pipelineBucket = response.getAggregations().get("nested_terms_bucket"); + assertThat(pipelineBucket, notNullValue()); + assertThat(pipelineBucket.getName(), equalTo("nested_terms_bucket")); + + assertResult((k) -> getNestedMetric(allBuckets.get(k)), (k) -> nestedTags.get(k), allBuckets.size(), pipelineBucket); + } + + /** + * https://github.com/elastic/elasticsearch/issues/33514 + * + * This bug manifests as the max_bucket agg ("peak") being added to the response twice, because + * the pipeline agg is run twice. This makes invalid JSON and breaks conversion to maps. + * The bug was caused by an UnmappedTerms being the chosen as the first reduction target. UnmappedTerms + * delegated reduction to the first non-unmapped agg, which would reduce and run pipeline aggs. But then + * execution returns to the UnmappedTerms and _it_ runs pipelines as well, doubling up on the values. + */ + public void testFieldIsntWrittenOutTwice() throws Exception { + // you need to add an additional index with no fields in order to trigger this (or potentially a shard) + // so that there is an UnmappedTerms in the list to reduce. + createIndex("foo_1"); + // @formatter:off + XContentBuilder builder = jsonBuilder().startObject() + .startObject("properties") + .startObject("@timestamp") + .field("type", "date") + .endObject() + .startObject("license") + .startObject("properties") + .startObject("count") + .field("type", "long") + .endObject() + .startObject("partnumber") + .field("type", "text") + .startObject("fields") + .startObject("keyword") + .field("type", "keyword") + .field("ignore_above", 256) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject(); + // @formatter:on + assertAcked(client().admin().indices().prepareCreate("foo_2").setMapping(builder).get()); + // @formatter:off + XContentBuilder docBuilder = jsonBuilder().startObject() + .startObject("license") + .field("partnumber", "foobar") + .field("count", 2) + .endObject() + .field("@timestamp", "2018-07-08T08:07:00.599Z") + .endObject(); + // @formatter:on + client().prepareIndex("foo_2").setSource(docBuilder).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + + client().admin().indices().prepareRefresh(); + + TermsAggregationBuilder groupByLicenseAgg = terms("group_by_license_partnumber").field("license.partnumber.keyword"); + + SumAggregationBuilder sumAggBuilder = sum("total_licenses").field("license.count"); + DateHistogramAggregationBuilder licensePerDayBuilder = dateHistogram("licenses_per_day").field("@timestamp") + .fixedInterval(DateHistogramInterval.DAY); + licensePerDayBuilder.subAggregation(sumAggBuilder); + groupByLicenseAgg.subAggregation(licensePerDayBuilder); + groupByLicenseAgg.subAggregation(BucketMetricsPipelineAgg("peak", "licenses_per_day>total_licenses")); + + SearchResponse response = client().prepareSearch("foo_*").setSize(0).addAggregation(groupByLicenseAgg).get(); + BytesReference bytes = XContentHelper.toXContent(response, XContentType.JSON, false); + XContentHelper.convertToMap(bytes, false, XContentType.JSON); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java index ecef1b8aa0f86..936d2f2f25b06 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java @@ -15,70 +15,72 @@ import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; -import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude; -import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.ExtendedStats.Bounds; -import org.elasticsearch.search.aggregations.metrics.Sum; -import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.test.ESIntegTestCase; import java.util.ArrayList; import java.util.List; +import java.util.function.Function; +import java.util.function.IntToDoubleFunction; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.extendedStatsBucket; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.core.IsNull.notNullValue; -@ESIntegTestCase.SuiteScopeTestCase -public class ExtendedStatsBucketIT extends ESIntegTestCase { +public class ExtendedStatsBucketIT extends BucketMetricsPipeLineAggregationTestCase { - private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; - - static int numDocs; - static int interval; - static int minRandomValue; - static int maxRandomValue; - static int numValueBuckets; - static long[] valueCounts; + @Override + protected ExtendedStatsBucketPipelineAggregationBuilder BucketMetricsPipelineAgg(String name, String bucketsPath) { + return extendedStatsBucket(name, bucketsPath); + } @Override - public void setupSuiteScopeCluster() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); - createIndex("idx_unmapped", "idx_gappy"); + protected void assertResult( + IntToDoubleFunction buckets, + Function bucketKeys, + int numBuckets, + ExtendedStatsBucket pipelineBucket + ) { + double sum = 0; + int count = 0; + double min = Double.POSITIVE_INFINITY; + double max = Double.NEGATIVE_INFINITY; + double sumOfSquares = 0; + for (int i = 0; i < numBuckets; ++i) { + double bucketValue = buckets.applyAsDouble(i); + count++; + sum += bucketValue; + min = Math.min(min, bucketValue); + max = Math.max(max, bucketValue); + sumOfSquares += bucketValue * bucketValue; + } + double avgValue = count == 0 ? Double.NaN : (sum / count); + assertThat(pipelineBucket.getAvg(), equalTo(avgValue)); + assertThat(pipelineBucket.getMin(), equalTo(min)); + assertThat(pipelineBucket.getMax(), equalTo(max)); + assertThat(pipelineBucket.getSumOfSquares(), equalTo(sumOfSquares)); + } - numDocs = randomIntBetween(6, 20); - interval = randomIntBetween(2, 5); + @Override + protected String nestedMetric() { + return "avg"; + } - minRandomValue = 0; - maxRandomValue = 20; + @Override + protected double getNestedMetric(ExtendedStatsBucket bucket) { + return bucket.getAvg(); + } - numValueBuckets = ((maxRandomValue - minRandomValue) / interval) + 1; - valueCounts = new long[numValueBuckets]; + @Override + public void setupSuiteScopeCluster() throws Exception { + super.setupSuiteScopeCluster(); List builders = new ArrayList<>(); - for (int i = 0; i < numDocs; i++) { - int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); - builders.add( - client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, fieldValue) - .field("tag", "tag" + (i % interval)) - .endObject() - ) - ); - final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); - valueCounts[bucket]++; - } - for (int i = 0; i < 6; i++) { // creates 6 documents where the value of the field is 0, 1, 2, 3, // 3, 5 @@ -89,14 +91,6 @@ public void setupSuiteScopeCluster() throws Exception { ); } - assertAcked(prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer")); - for (int i = 0; i < 2; i++) { - builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) - ); - } indexRandom(true, builders); ensureSearchable(); } @@ -154,300 +148,6 @@ public void testGappyIndexWithSigma() { assertThat(extendedStatsBucketValue.getStdDeviationBound(Bounds.UPPER), equalTo(avg + (sigma * stdDev))); } - public void testDocCountTopLevel() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) - ) - .addAggregation(extendedStatsBucket("extended_stats_bucket", "histo>_count")) - .get(); - - assertSearchResponse(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); - - double sum = 0; - int count = 0; - double min = Double.POSITIVE_INFINITY; - double max = Double.NEGATIVE_INFINITY; - double sumOfSquares = 0; - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); - count++; - sum += bucket.getDocCount(); - min = Math.min(min, bucket.getDocCount()); - max = Math.max(max, bucket.getDocCount()); - sumOfSquares += bucket.getDocCount() * bucket.getDocCount(); - } - - double avgValue = count == 0 ? Double.NaN : (sum / count); - ExtendedStatsBucket extendedStatsBucketValue = response.getAggregations().get("extended_stats_bucket"); - assertThat(extendedStatsBucketValue, notNullValue()); - assertThat(extendedStatsBucketValue.getName(), equalTo("extended_stats_bucket")); - assertThat(extendedStatsBucketValue.getAvg(), equalTo(avgValue)); - assertThat(extendedStatsBucketValue.getMin(), equalTo(min)); - assertThat(extendedStatsBucketValue.getMax(), equalTo(max)); - assertThat(extendedStatsBucketValue.getSumOfSquares(), equalTo(sumOfSquares)); - } - - public void testDocCountAsSubAgg() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) - ) - .subAggregation(extendedStatsBucket("extended_stats_bucket", "histo>_count")) - ) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - double sum = 0; - int count = 0; - double min = Double.POSITIVE_INFINITY; - double max = Double.NEGATIVE_INFINITY; - double sumOfSquares = 0; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - count++; - sum += bucket.getDocCount(); - min = Math.min(min, bucket.getDocCount()); - max = Math.max(max, bucket.getDocCount()); - sumOfSquares += bucket.getDocCount() * bucket.getDocCount(); - } - - double avgValue = count == 0 ? Double.NaN : (sum / count); - ExtendedStatsBucket extendedStatsBucketValue = termsBucket.getAggregations().get("extended_stats_bucket"); - assertThat(extendedStatsBucketValue, notNullValue()); - assertThat(extendedStatsBucketValue.getName(), equalTo("extended_stats_bucket")); - assertThat(extendedStatsBucketValue.getAvg(), equalTo(avgValue)); - assertThat(extendedStatsBucketValue.getMin(), equalTo(min)); - assertThat(extendedStatsBucketValue.getMax(), equalTo(max)); - assertThat(extendedStatsBucketValue.getSumOfSquares(), equalTo(sumOfSquares)); - } - } - - public void testMetricTopLevel() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(extendedStatsBucket("extended_stats_bucket", "terms>sum")) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(interval)); - - double bucketSum = 0; - int count = 0; - double min = Double.POSITIVE_INFINITY; - double max = Double.NEGATIVE_INFINITY; - double sumOfSquares = 0; - for (int i = 0; i < interval; ++i) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("tag" + (i % interval))); - assertThat(bucket.getDocCount(), greaterThan(0L)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - count++; - bucketSum += sum.value(); - min = Math.min(min, sum.value()); - max = Math.max(max, sum.value()); - sumOfSquares += sum.value() * sum.value(); - } - - double avgValue = count == 0 ? Double.NaN : (bucketSum / count); - ExtendedStatsBucket extendedStatsBucketValue = response.getAggregations().get("extended_stats_bucket"); - assertThat(extendedStatsBucketValue, notNullValue()); - assertThat(extendedStatsBucketValue.getName(), equalTo("extended_stats_bucket")); - assertThat(extendedStatsBucketValue.getAvg(), equalTo(avgValue)); - assertThat(extendedStatsBucketValue.getMin(), equalTo(min)); - assertThat(extendedStatsBucketValue.getMax(), equalTo(max)); - assertThat(extendedStatsBucketValue.getSumOfSquares(), equalTo(sumOfSquares)); - } - - public void testMetricAsSubAgg() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ) - .subAggregation(extendedStatsBucket("extended_stats_bucket", "histo>sum")) - ) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - double bucketSum = 0; - int count = 0; - double min = Double.POSITIVE_INFINITY; - double max = Double.NEGATIVE_INFINITY; - double sumOfSquares = 0; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - if (bucket.getDocCount() != 0) { - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - count++; - bucketSum += sum.value(); - min = Math.min(min, sum.value()); - max = Math.max(max, sum.value()); - sumOfSquares += sum.value() * sum.value(); - } - } - - double avgValue = count == 0 ? Double.NaN : (bucketSum / count); - ExtendedStatsBucket extendedStatsBucketValue = termsBucket.getAggregations().get("extended_stats_bucket"); - assertThat(extendedStatsBucketValue, notNullValue()); - assertThat(extendedStatsBucketValue.getName(), equalTo("extended_stats_bucket")); - assertThat(extendedStatsBucketValue.getAvg(), equalTo(avgValue)); - assertThat(extendedStatsBucketValue.getMin(), equalTo(min)); - assertThat(extendedStatsBucketValue.getMax(), equalTo(max)); - assertThat(extendedStatsBucketValue.getSumOfSquares(), equalTo(sumOfSquares)); - } - } - - public void testMetricAsSubAggWithInsertZeros() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ) - .subAggregation(extendedStatsBucket("extended_stats_bucket", "histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS)) - ) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - double bucketSum = 0; - int count = 0; - double min = Double.POSITIVE_INFINITY; - double max = Double.NEGATIVE_INFINITY; - double sumOfSquares = 0; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - - count++; - bucketSum += sum.value(); - min = Math.min(min, sum.value()); - max = Math.max(max, sum.value()); - sumOfSquares += sum.value() * sum.value(); - } - - double avgValue = count == 0 ? Double.NaN : (bucketSum / count); - ExtendedStatsBucket extendedStatsBucketValue = termsBucket.getAggregations().get("extended_stats_bucket"); - assertThat(extendedStatsBucketValue, notNullValue()); - assertThat(extendedStatsBucketValue.getName(), equalTo("extended_stats_bucket")); - assertThat(extendedStatsBucketValue.getAvg(), equalTo(avgValue)); - assertThat(extendedStatsBucketValue.getMin(), equalTo(min)); - assertThat(extendedStatsBucketValue.getMax(), equalTo(max)); - assertThat(extendedStatsBucketValue.getSumOfSquares(), equalTo(sumOfSquares)); - } - } - - public void testNoBuckets() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .includeExclude(new IncludeExclude(null, "tag.*")) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ) - .addAggregation(extendedStatsBucket("extended_stats_bucket", "terms>sum")) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(0)); - - ExtendedStatsBucket extendedStatsBucketValue = response.getAggregations().get("extended_stats_bucket"); - assertThat(extendedStatsBucketValue, notNullValue()); - assertThat(extendedStatsBucketValue.getName(), equalTo("extended_stats_bucket")); - assertThat(extendedStatsBucketValue.getAvg(), equalTo(Double.NaN)); - } - public void testBadSigmaAsSubAgg() throws Exception { Exception ex = expectThrows( Exception.class, @@ -478,74 +178,4 @@ public void testBadSigmaAsSubAgg() throws Exception { throw ex; } } - - public void testNested() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) - ) - .subAggregation(extendedStatsBucket("avg_histo_bucket", "histo>_count")) - ) - .addAggregation(extendedStatsBucket("avg_terms_bucket", "terms>avg_histo_bucket.avg")) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - double aggTermsSum = 0; - int aggTermsCount = 0; - double min = Double.POSITIVE_INFINITY; - double max = Double.NEGATIVE_INFINITY; - double sumOfSquares = 0; - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - double aggHistoSum = 0; - int aggHistoCount = 0; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - - aggHistoCount++; - aggHistoSum += bucket.getDocCount(); - } - - double avgHistoValue = aggHistoCount == 0 ? Double.NaN : (aggHistoSum / aggHistoCount); - ExtendedStatsBucket extendedStatsBucketValue = termsBucket.getAggregations().get("avg_histo_bucket"); - assertThat(extendedStatsBucketValue, notNullValue()); - assertThat(extendedStatsBucketValue.getName(), equalTo("avg_histo_bucket")); - assertThat(extendedStatsBucketValue.getAvg(), equalTo(avgHistoValue)); - - aggTermsCount++; - aggTermsSum += avgHistoValue; - min = Math.min(min, avgHistoValue); - max = Math.max(max, avgHistoValue); - sumOfSquares += avgHistoValue * avgHistoValue; - } - - double avgTermsValue = aggTermsCount == 0 ? Double.NaN : (aggTermsSum / aggTermsCount); - ExtendedStatsBucket extendedStatsBucketValue = response.getAggregations().get("avg_terms_bucket"); - assertThat(extendedStatsBucketValue, notNullValue()); - assertThat(extendedStatsBucketValue.getName(), equalTo("avg_terms_bucket")); - assertThat(extendedStatsBucketValue.getAvg(), equalTo(avgTermsValue)); - assertThat(extendedStatsBucketValue.getMin(), equalTo(min)); - assertThat(extendedStatsBucketValue.getMax(), equalTo(max)); - assertThat(extendedStatsBucketValue.getSumOfSquares(), equalTo(sumOfSquares)); - } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java index 6a183f15d1c04..4e7e0fba0f0eb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketIT.java @@ -8,560 +8,51 @@ package org.elasticsearch.search.aggregations.pipeline; -import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.search.aggregations.AggregationBuilders; -import org.elasticsearch.search.aggregations.BucketOrder; -import org.elasticsearch.search.aggregations.PipelineAggregatorBuilders; -import org.elasticsearch.search.aggregations.bucket.filter.Filter; -import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; -import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude; -import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.Sum; -import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.test.ESIntegTestCase; - import java.util.ArrayList; import java.util.List; +import java.util.function.Function; +import java.util.function.IntToDoubleFunction; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.index.query.QueryBuilders.termQuery; -import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; -import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; -import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; -import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.maxBucket; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.core.IsNull.notNullValue; - -@ESIntegTestCase.SuiteScopeTestCase -public class MaxBucketIT extends ESIntegTestCase { - - private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; - static int numDocs; - static int interval; - static int minRandomValue; - static int maxRandomValue; - static int numValueBuckets; - static long[] valueCounts; +public class MaxBucketIT extends BucketMetricsPipeLineAggregationTestCase { @Override - public void setupSuiteScopeCluster() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); - createIndex("idx_unmapped"); - - numDocs = randomIntBetween(6, 20); - interval = randomIntBetween(2, 5); - - minRandomValue = 0; - maxRandomValue = 20; - - numValueBuckets = ((maxRandomValue - minRandomValue) / interval) + 1; - valueCounts = new long[numValueBuckets]; - - List builders = new ArrayList<>(); - - for (int i = 0; i < numDocs; i++) { - int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); - builders.add( - client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, fieldValue) - .field("tag", "tag" + (i % interval)) - .endObject() - ) - ); - final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); - valueCounts[bucket]++; - } - - assertAcked(prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer")); - for (int i = 0; i < 2; i++) { - builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) - ); - } - indexRandom(true, builders); - ensureSearchable(); + protected MaxBucketPipelineAggregationBuilder BucketMetricsPipelineAgg(String name, String bucketsPath) { + return maxBucket(name, bucketsPath); } - public void testDocCountTopLevel() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) - ) - .addAggregation(maxBucket("max_bucket", "histo>_count")) - .get(); - - assertSearchResponse(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); - - List maxKeys = new ArrayList<>(); - double maxValue = Double.NEGATIVE_INFINITY; - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); - if (bucket.getDocCount() > maxValue) { - maxValue = bucket.getDocCount(); - maxKeys = new ArrayList<>(); - maxKeys.add(bucket.getKeyAsString()); - } else if (bucket.getDocCount() == maxValue) { - maxKeys.add(bucket.getKeyAsString()); - } - } - - InternalBucketMetricValue maxBucketValue = response.getAggregations().get("max_bucket"); - assertThat(maxBucketValue, notNullValue()); - assertThat(maxBucketValue.getName(), equalTo("max_bucket")); - assertThat(maxBucketValue.value(), equalTo(maxValue)); - assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()]))); - } - - public void testDocCountAsSubAgg() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) - ) - .subAggregation(maxBucket("max_bucket", "histo>_count")) - ) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - List maxKeys = new ArrayList<>(); - double maxValue = Double.NEGATIVE_INFINITY; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - if (bucket.getDocCount() > maxValue) { - maxValue = bucket.getDocCount(); - maxKeys = new ArrayList<>(); - maxKeys.add(bucket.getKeyAsString()); - } else if (bucket.getDocCount() == maxValue) { - maxKeys.add(bucket.getKeyAsString()); - } - } - - InternalBucketMetricValue maxBucketValue = termsBucket.getAggregations().get("max_bucket"); - assertThat(maxBucketValue, notNullValue()); - assertThat(maxBucketValue.getName(), equalTo("max_bucket")); - assertThat(maxBucketValue.value(), equalTo(maxValue)); - assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()]))); - } - } - - public void testMetricTopLevel() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(maxBucket("max_bucket", "terms>sum")) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(interval)); - + @Override + protected void assertResult( + IntToDoubleFunction bucketValues, + Function bucketKeys, + int numBuckets, + InternalBucketMetricValue pipelineBucket + ) { List maxKeys = new ArrayList<>(); double maxValue = Double.NEGATIVE_INFINITY; - for (int i = 0; i < interval; ++i) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("tag" + (i % interval))); - assertThat(bucket.getDocCount(), greaterThan(0L)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - if (sum.value() > maxValue) { - maxValue = sum.value(); + for (int i = 0; i < numBuckets; ++i) { + double bucketValue = bucketValues.applyAsDouble(i); + if (bucketValue > maxValue) { + maxValue = bucketValue; maxKeys = new ArrayList<>(); - maxKeys.add(bucket.getKeyAsString()); - } else if (sum.value() == maxValue) { - maxKeys.add(bucket.getKeyAsString()); - } - } - - InternalBucketMetricValue maxBucketValue = response.getAggregations().get("max_bucket"); - assertThat(maxBucketValue, notNullValue()); - assertThat(maxBucketValue.getName(), equalTo("max_bucket")); - assertThat(maxBucketValue.value(), equalTo(maxValue)); - assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()]))); - } - - public void testMetricAsSubAgg() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ) - .subAggregation(maxBucket("max_bucket", "histo>sum")) - ) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - List maxKeys = new ArrayList<>(); - double maxValue = Double.NEGATIVE_INFINITY; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - if (bucket.getDocCount() != 0) { - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - if (sum.value() > maxValue) { - maxValue = sum.value(); - maxKeys = new ArrayList<>(); - maxKeys.add(bucket.getKeyAsString()); - } else if (sum.value() == maxValue) { - maxKeys.add(bucket.getKeyAsString()); - } - } - } - - InternalBucketMetricValue maxBucketValue = termsBucket.getAggregations().get("max_bucket"); - assertThat(maxBucketValue, notNullValue()); - assertThat(maxBucketValue.getName(), equalTo("max_bucket")); - assertThat(maxBucketValue.value(), equalTo(maxValue)); - assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()]))); - } - } - - public void testMetricAsSubAggOfSingleBucketAgg() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - filter("filter", termQuery("tag", "tag0")).subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ).subAggregation(maxBucket("max_bucket", "histo>sum")) - ) - .get(); - - assertSearchResponse(response); - - Filter filter = response.getAggregations().get("filter"); - assertThat(filter, notNullValue()); - assertThat(filter.getName(), equalTo("filter")); - Histogram histo = filter.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - List maxKeys = new ArrayList<>(); - double maxValue = Double.NEGATIVE_INFINITY; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - if (bucket.getDocCount() != 0) { - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - if (sum.value() > maxValue) { - maxValue = sum.value(); - maxKeys = new ArrayList<>(); - maxKeys.add(bucket.getKeyAsString()); - } else if (sum.value() == maxValue) { - maxKeys.add(bucket.getKeyAsString()); - } + maxKeys.add(bucketKeys.apply(i)); + } else if (bucketValue == maxValue) { + maxKeys.add(bucketKeys.apply(i)); } } - - InternalBucketMetricValue maxBucketValue = filter.getAggregations().get("max_bucket"); - assertThat(maxBucketValue, notNullValue()); - assertThat(maxBucketValue.getName(), equalTo("max_bucket")); - assertThat(maxBucketValue.value(), equalTo(maxValue)); - assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()]))); + assertThat(pipelineBucket.value(), equalTo(maxValue)); + assertThat(pipelineBucket.keys(), equalTo(maxKeys.toArray(new String[0]))); } - public void testMetricAsSubAggWithInsertZeros() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ) - .subAggregation(maxBucket("max_bucket", "histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS)) - ) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - List maxKeys = new ArrayList<>(); - double maxValue = Double.NEGATIVE_INFINITY; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - if (sum.value() > maxValue) { - maxValue = sum.value(); - maxKeys = new ArrayList<>(); - maxKeys.add(bucket.getKeyAsString()); - } else if (sum.value() == maxValue) { - maxKeys.add(bucket.getKeyAsString()); - } - } - - InternalBucketMetricValue maxBucketValue = termsBucket.getAggregations().get("max_bucket"); - assertThat(maxBucketValue, notNullValue()); - assertThat(maxBucketValue.getName(), equalTo("max_bucket")); - assertThat(maxBucketValue.value(), equalTo(maxValue)); - assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()]))); - } - } - - public void testNoBuckets() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .includeExclude(new IncludeExclude(null, "tag.*")) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ) - .addAggregation(maxBucket("max_bucket", "terms>sum")) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(0)); - - InternalBucketMetricValue maxBucketValue = response.getAggregations().get("max_bucket"); - assertThat(maxBucketValue, notNullValue()); - assertThat(maxBucketValue.getName(), equalTo("max_bucket")); - assertThat(maxBucketValue.value(), equalTo(Double.NEGATIVE_INFINITY)); - assertThat(maxBucketValue.keys(), equalTo(new String[0])); - } - - public void testNested() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) - ) - .subAggregation(maxBucket("max_histo_bucket", "histo>_count")) - ) - .addAggregation(maxBucket("max_terms_bucket", "terms>max_histo_bucket")) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - List maxTermsKeys = new ArrayList<>(); - double maxTermsValue = Double.NEGATIVE_INFINITY; - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - List maxHistoKeys = new ArrayList<>(); - double maxHistoValue = Double.NEGATIVE_INFINITY; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - if (bucket.getDocCount() > maxHistoValue) { - maxHistoValue = bucket.getDocCount(); - maxHistoKeys = new ArrayList<>(); - maxHistoKeys.add(bucket.getKeyAsString()); - } else if (bucket.getDocCount() == maxHistoValue) { - maxHistoKeys.add(bucket.getKeyAsString()); - } - } - - InternalBucketMetricValue maxBucketValue = termsBucket.getAggregations().get("max_histo_bucket"); - assertThat(maxBucketValue, notNullValue()); - assertThat(maxBucketValue.getName(), equalTo("max_histo_bucket")); - assertThat(maxBucketValue.value(), equalTo(maxHistoValue)); - assertThat(maxBucketValue.keys(), equalTo(maxHistoKeys.toArray(new String[maxHistoKeys.size()]))); - if (maxHistoValue > maxTermsValue) { - maxTermsValue = maxHistoValue; - maxTermsKeys = new ArrayList<>(); - maxTermsKeys.add(termsBucket.getKeyAsString()); - } else if (maxHistoValue == maxTermsValue) { - maxTermsKeys.add(termsBucket.getKeyAsString()); - } - } - - InternalBucketMetricValue maxBucketValue = response.getAggregations().get("max_terms_bucket"); - assertThat(maxBucketValue, notNullValue()); - assertThat(maxBucketValue.getName(), equalTo("max_terms_bucket")); - assertThat(maxBucketValue.value(), equalTo(maxTermsValue)); - assertThat(maxBucketValue.keys(), equalTo(maxTermsKeys.toArray(new String[maxTermsKeys.size()]))); + @Override + protected String nestedMetric() { + return "value"; } - /** - * https://github.com/elastic/elasticsearch/issues/33514 - * - * This bug manifests as the max_bucket agg ("peak") being added to the response twice, because - * the pipeline agg is run twice. This makes invalid JSON and breaks conversion to maps. - * The bug was caused by an UnmappedTerms being the chosen as the first reduction target. UnmappedTerms - * delegated reduction to the first non-unmapped agg, which would reduce and run pipeline aggs. But then - * execution returns to the UnmappedTerms and _it_ runs pipelines as well, doubling up on the values. - * - * Applies to any pipeline agg, not just max. - */ - public void testFieldIsntWrittenOutTwice() throws Exception { - // you need to add an additional index with no fields in order to trigger this (or potentially a shard) - // so that there is an UnmappedTerms in the list to reduce. - createIndex("foo_1"); - - XContentBuilder builder = jsonBuilder().startObject() - .startObject("properties") - .startObject("@timestamp") - .field("type", "date") - .endObject() - .startObject("license") - .startObject("properties") - .startObject("count") - .field("type", "long") - .endObject() - .startObject("partnumber") - .field("type", "text") - .startObject("fields") - .startObject("keyword") - .field("type", "keyword") - .field("ignore_above", 256) - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - .endObject(); - assertAcked(client().admin().indices().prepareCreate("foo_2").setMapping(builder).get()); - - XContentBuilder docBuilder = jsonBuilder().startObject() - .startObject("license") - .field("partnumber", "foobar") - .field("count", 2) - .endObject() - .field("@timestamp", "2018-07-08T08:07:00.599Z") - .endObject(); - - client().prepareIndex("foo_2").setSource(docBuilder).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); - - client().admin().indices().prepareRefresh(); - - TermsAggregationBuilder groupByLicenseAgg = AggregationBuilders.terms("group_by_license_partnumber") - .field("license.partnumber.keyword"); - MaxBucketPipelineAggregationBuilder peakPipelineAggBuilder = PipelineAggregatorBuilders.maxBucket( - "peak", - "licenses_per_day>total_licenses" - ); - SumAggregationBuilder sumAggBuilder = AggregationBuilders.sum("total_licenses").field("license.count"); - DateHistogramAggregationBuilder licensePerDayBuilder = AggregationBuilders.dateHistogram("licenses_per_day") - .field("@timestamp") - .fixedInterval(DateHistogramInterval.DAY); - licensePerDayBuilder.subAggregation(sumAggBuilder); - groupByLicenseAgg.subAggregation(licensePerDayBuilder); - groupByLicenseAgg.subAggregation(peakPipelineAggBuilder); - - SearchResponse response = client().prepareSearch("foo_*").setSize(0).addAggregation(groupByLicenseAgg).get(); - BytesReference bytes = XContentHelper.toXContent(response, XContentType.JSON, false); - XContentHelper.convertToMap(bytes, false, XContentType.JSON); + @Override + protected double getNestedMetric(InternalBucketMetricValue bucket) { + return bucket.value(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java index b327164ff5868..0dc3e8c87e819 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/MinBucketIT.java @@ -8,426 +8,51 @@ package org.elasticsearch.search.aggregations.pipeline; -import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.search.aggregations.BucketOrder; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; -import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude; -import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.Sum; -import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.test.ESIntegTestCase; - import java.util.ArrayList; import java.util.List; +import java.util.function.Function; +import java.util.function.IntToDoubleFunction; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; -import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; -import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.minBucket; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.core.IsNull.notNullValue; - -@ESIntegTestCase.SuiteScopeTestCase -public class MinBucketIT extends ESIntegTestCase { - - private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; - static int numDocs; - static int interval; - static int minRandomValue; - static int maxRandomValue; - static int numValueBuckets; - static long[] valueCounts; +public class MinBucketIT extends BucketMetricsPipeLineAggregationTestCase { @Override - public void setupSuiteScopeCluster() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); - createIndex("idx_unmapped"); - - numDocs = randomIntBetween(6, 20); - interval = randomIntBetween(2, 5); - - minRandomValue = 0; - maxRandomValue = 20; - - numValueBuckets = ((maxRandomValue - minRandomValue) / interval) + 1; - valueCounts = new long[numValueBuckets]; - - List builders = new ArrayList<>(); - - for (int i = 0; i < numDocs; i++) { - int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); - builders.add( - client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, fieldValue) - .field("tag", "tag" + (i % interval)) - .endObject() - ) - ); - final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); - valueCounts[bucket]++; - } - - assertAcked(prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer")); - for (int i = 0; i < 2; i++) { - builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) - ); - } - indexRandom(true, builders); - ensureSearchable(); - } - - public void testDocCountTopLevel() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) - ) - .addAggregation(minBucket("min_bucket", "histo>_count")) - .get(); - - assertSearchResponse(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); - - List minKeys = new ArrayList<>(); - double minValue = Double.POSITIVE_INFINITY; - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); - if (bucket.getDocCount() < minValue) { - minValue = bucket.getDocCount(); - minKeys = new ArrayList<>(); - minKeys.add(bucket.getKeyAsString()); - } else if (bucket.getDocCount() == minValue) { - minKeys.add(bucket.getKeyAsString()); - } - } - - InternalBucketMetricValue minBucketValue = response.getAggregations().get("min_bucket"); - assertThat(minBucketValue, notNullValue()); - assertThat(minBucketValue.getName(), equalTo("min_bucket")); - assertThat(minBucketValue.value(), equalTo(minValue)); - assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()]))); - } - - public void testDocCountAsSubAgg() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) - ) - .subAggregation(minBucket("min_bucket", "histo>_count")) - ) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - List minKeys = new ArrayList<>(); - double minValue = Double.POSITIVE_INFINITY; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - if (bucket.getDocCount() < minValue) { - minValue = bucket.getDocCount(); - minKeys = new ArrayList<>(); - minKeys.add(bucket.getKeyAsString()); - } else if (bucket.getDocCount() == minValue) { - minKeys.add(bucket.getKeyAsString()); - } - } - - InternalBucketMetricValue minBucketValue = termsBucket.getAggregations().get("min_bucket"); - assertThat(minBucketValue, notNullValue()); - assertThat(minBucketValue.getName(), equalTo("min_bucket")); - assertThat(minBucketValue.value(), equalTo(minValue)); - assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()]))); - } + protected MinBucketPipelineAggregationBuilder BucketMetricsPipelineAgg(String name, String bucketsPath) { + return minBucket(name, bucketsPath); } - public void testMetricTopLevel() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(minBucket("min_bucket", "terms>sum")) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(interval)); - + @Override + protected void assertResult( + IntToDoubleFunction bucketValues, + Function bucketKeys, + int numBuckets, + InternalBucketMetricValue pipelineBucket + ) { List minKeys = new ArrayList<>(); double minValue = Double.POSITIVE_INFINITY; - for (int i = 0; i < interval; ++i) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("tag" + (i % interval))); - assertThat(bucket.getDocCount(), greaterThan(0L)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - if (sum.value() < minValue) { - minValue = sum.value(); + for (int i = 0; i < numBuckets; ++i) { + double bucketValue = bucketValues.applyAsDouble(i); + if (bucketValue < minValue) { + minValue = bucketValue; minKeys = new ArrayList<>(); - minKeys.add(bucket.getKeyAsString()); - } else if (sum.value() == minValue) { - minKeys.add(bucket.getKeyAsString()); + minKeys.add(bucketKeys.apply(i)); + } else if (bucketValue == minValue) { + minKeys.add(bucketKeys.apply(i)); } } - - InternalBucketMetricValue minBucketValue = response.getAggregations().get("min_bucket"); - assertThat(minBucketValue, notNullValue()); - assertThat(minBucketValue.getName(), equalTo("min_bucket")); - assertThat(minBucketValue.value(), equalTo(minValue)); - assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()]))); - } - - public void testMetricAsSubAgg() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ) - .subAggregation(minBucket("min_bucket", "histo>sum")) - ) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - List minKeys = new ArrayList<>(); - double minValue = Double.POSITIVE_INFINITY; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - if (bucket.getDocCount() != 0) { - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - if (sum.value() < minValue) { - minValue = sum.value(); - minKeys = new ArrayList<>(); - minKeys.add(bucket.getKeyAsString()); - } else if (sum.value() == minValue) { - minKeys.add(bucket.getKeyAsString()); - } - } - } - - InternalBucketMetricValue minBucketValue = termsBucket.getAggregations().get("min_bucket"); - assertThat(minBucketValue, notNullValue()); - assertThat(minBucketValue.getName(), equalTo("min_bucket")); - assertThat(minBucketValue.value(), equalTo(minValue)); - assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()]))); - } - } - - public void testMetricAsSubAggWithInsertZeros() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ) - .subAggregation(minBucket("min_bucket", "histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS)) - ) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - List minKeys = new ArrayList<>(); - double minValue = Double.POSITIVE_INFINITY; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - if (sum.value() < minValue) { - minValue = sum.value(); - minKeys = new ArrayList<>(); - minKeys.add(bucket.getKeyAsString()); - } else if (sum.value() == minValue) { - minKeys.add(bucket.getKeyAsString()); - } - } - - InternalBucketMetricValue minBucketValue = termsBucket.getAggregations().get("min_bucket"); - assertThat(minBucketValue, notNullValue()); - assertThat(minBucketValue.getName(), equalTo("min_bucket")); - assertThat(minBucketValue.value(), equalTo(minValue)); - assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()]))); - } + assertThat(pipelineBucket.value(), equalTo(minValue)); + assertThat(pipelineBucket.keys(), equalTo(minKeys.toArray(new String[0]))); } - public void testNoBuckets() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .includeExclude(new IncludeExclude(null, "tag.*")) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ) - .addAggregation(minBucket("min_bucket", "terms>sum")) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(0)); - - InternalBucketMetricValue minBucketValue = response.getAggregations().get("min_bucket"); - assertThat(minBucketValue, notNullValue()); - assertThat(minBucketValue.getName(), equalTo("min_bucket")); - assertThat(minBucketValue.value(), equalTo(Double.POSITIVE_INFINITY)); - assertThat(minBucketValue.keys(), equalTo(new String[0])); + @Override + protected String nestedMetric() { + return "value"; } - public void testNested() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) - ) - .subAggregation(minBucket("min_histo_bucket", "histo>_count")) - ) - .addAggregation(minBucket("min_terms_bucket", "terms>min_histo_bucket")) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - List minTermsKeys = new ArrayList<>(); - double minTermsValue = Double.POSITIVE_INFINITY; - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - List minHistoKeys = new ArrayList<>(); - double minHistoValue = Double.POSITIVE_INFINITY; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - if (bucket.getDocCount() < minHistoValue) { - minHistoValue = bucket.getDocCount(); - minHistoKeys = new ArrayList<>(); - minHistoKeys.add(bucket.getKeyAsString()); - } else if (bucket.getDocCount() == minHistoValue) { - minHistoKeys.add(bucket.getKeyAsString()); - } - } - - InternalBucketMetricValue minBucketValue = termsBucket.getAggregations().get("min_histo_bucket"); - assertThat(minBucketValue, notNullValue()); - assertThat(minBucketValue.getName(), equalTo("min_histo_bucket")); - assertThat(minBucketValue.value(), equalTo(minHistoValue)); - assertThat(minBucketValue.keys(), equalTo(minHistoKeys.toArray(new String[minHistoKeys.size()]))); - if (minHistoValue < minTermsValue) { - minTermsValue = minHistoValue; - minTermsKeys = new ArrayList<>(); - minTermsKeys.add(termsBucket.getKeyAsString()); - } else if (minHistoValue == minTermsValue) { - minTermsKeys.add(termsBucket.getKeyAsString()); - } - } - - InternalBucketMetricValue minBucketValue = response.getAggregations().get("min_terms_bucket"); - assertThat(minBucketValue, notNullValue()); - assertThat(minBucketValue.getName(), equalTo("min_terms_bucket")); - assertThat(minBucketValue.value(), equalTo(minTermsValue)); - assertThat(minBucketValue.keys(), equalTo(minTermsKeys.toArray(new String[minTermsKeys.size()]))); + @Override + protected double getNestedMetric(InternalBucketMetricValue bucket) { + return bucket.value(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java index 69e0fa84f9086..6c432a13815e4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketIT.java @@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.search.aggregations.BucketOrder; @@ -18,204 +17,67 @@ import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.Percentile; import org.elasticsearch.search.aggregations.metrics.Sum; -import org.elasticsearch.test.ESIntegTestCase; -import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.function.Function; +import java.util.function.IntToDoubleFunction; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.percentilesBucket; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.core.IsNull.notNullValue; -@ESIntegTestCase.SuiteScopeTestCase -public class PercentilesBucketIT extends ESIntegTestCase { +public class PercentilesBucketIT extends BucketMetricsPipeLineAggregationTestCase { - private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; private static final double[] PERCENTS = { 0.0, 1.0, 25.0, 50.0, 75.0, 99.0, 100.0 }; - static int numDocs; - static int interval; - static int minRandomValue; - static int maxRandomValue; - static int numValueBuckets; - static long[] valueCounts; @Override - public void setupSuiteScopeCluster() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); - createIndex("idx_unmapped"); - - numDocs = randomIntBetween(6, 20); - interval = randomIntBetween(2, 5); - - minRandomValue = 0; - maxRandomValue = 20; - - numValueBuckets = ((maxRandomValue - minRandomValue) / interval) + 1; - valueCounts = new long[numValueBuckets]; - - List builders = new ArrayList<>(); - - for (int i = 0; i < numDocs; i++) { - int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); - builders.add( - client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, fieldValue) - .field("tag", "tag" + (i % interval)) - .endObject() - ) - ); - final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); - valueCounts[bucket]++; - } - - assertAcked(prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer")); - for (int i = 0; i < 2; i++) { - builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) - ); - } - indexRandom(true, builders); - ensureSearchable(); + protected PercentilesBucketPipelineAggregationBuilder BucketMetricsPipelineAgg(String name, String bucketsPath) { + return percentilesBucket(name, bucketsPath).setPercents(PERCENTS); } - public void testDocCountopLevel() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) - ) - .addAggregation(percentilesBucket("percentiles_bucket", "histo>_count").setPercents(PERCENTS)) - .get(); - - assertSearchResponse(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); - - double[] values = new double[numValueBuckets]; - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); - values[i] = bucket.getDocCount(); + @Override + protected void assertResult( + IntToDoubleFunction bucketValues, + Function bucketKeys, + int numBuckets, + PercentilesBucket pipelineBucket + ) { + double[] values = new double[numBuckets]; + for (int i = 0; i < numBuckets; ++i) { + values[i] = bucketValues.applyAsDouble(i); } - Arrays.sort(values); - - PercentilesBucket percentilesBucketValue = response.getAggregations().get("percentiles_bucket"); - assertThat(percentilesBucketValue, notNullValue()); - assertThat(percentilesBucketValue.getName(), equalTo("percentiles_bucket")); - assertPercentileBucket(PERCENTS, values, percentilesBucketValue); + assertPercentileBucket(PERCENTS, values, pipelineBucket); } - public void testDocCountAsSubAgg() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) - ) - .subAggregation(percentilesBucket("percentiles_bucket", "histo>_count").setPercents(PERCENTS)) - ) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - double[] values = new double[numValueBuckets]; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - values[j] = bucket.getDocCount(); - } - - Arrays.sort(values); - - PercentilesBucket percentilesBucketValue = termsBucket.getAggregations().get("percentiles_bucket"); - assertThat(percentilesBucketValue, notNullValue()); - assertThat(percentilesBucketValue.getName(), equalTo("percentiles_bucket")); - assertPercentileBucket(PERCENTS, values, percentilesBucketValue); - } + @Override + protected String nestedMetric() { + return "50"; } - public void testMetricTopLevel() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(percentilesBucket("percentiles_bucket", "terms>sum").setPercents(PERCENTS)) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(interval)); - - double[] values = new double[interval]; - for (int i = 0; i < interval; ++i) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("tag" + (i % interval))); - assertThat(bucket.getDocCount(), greaterThan(0L)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - values[i] = sum.value(); - } - - Arrays.sort(values); - - PercentilesBucket percentilesBucketValue = response.getAggregations().get("percentiles_bucket"); - assertThat(percentilesBucketValue, notNullValue()); - assertThat(percentilesBucketValue.getName(), equalTo("percentiles_bucket")); - assertPercentileBucket(PERCENTS, values, percentilesBucketValue); + @Override + protected double getNestedMetric(PercentilesBucket bucket) { + return bucket.percentile(50); } public void testMetricTopLevelDefaultPercents() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(percentilesBucket("percentiles_bucket", "terms>sum")) + .addAggregation(terms(termsName).field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .addAggregation(percentilesBucket("percentiles_bucket", termsName + ">sum")) .get(); assertSearchResponse(response); - Terms terms = response.getAggregations().get("terms"); + Terms terms = response.getAggregations().get(termsName); assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getName(), equalTo(termsName)); List buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(interval)); @@ -238,157 +100,21 @@ public void testMetricTopLevelDefaultPercents() throws Exception { assertPercentileBucket(values, percentilesBucketValue); } - public void testMetricAsSubAgg() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ) - .subAggregation(percentilesBucket("percentiles_bucket", "histo>sum").setPercents(PERCENTS)) - ) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - List values = new ArrayList<>(numValueBuckets); - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - if (bucket.getDocCount() != 0) { - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - values.add(sum.value()); - } - } - - Collections.sort(values); - - PercentilesBucket percentilesBucketValue = termsBucket.getAggregations().get("percentiles_bucket"); - assertThat(percentilesBucketValue, notNullValue()); - assertThat(percentilesBucketValue.getName(), equalTo("percentiles_bucket")); - assertPercentileBucket(PERCENTS, values.stream().mapToDouble(Double::doubleValue).toArray(), percentilesBucketValue); - } - } - - public void testMetricAsSubAggWithInsertZeros() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ) - .subAggregation( - percentilesBucket("percentiles_bucket", "histo>sum").gapPolicy(BucketHelpers.GapPolicy.INSERT_ZEROS) - .setPercents(PERCENTS) - ) - ) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - double[] values = new double[numValueBuckets]; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - - values[j] = sum.value(); - } - - Arrays.sort(values); - - PercentilesBucket percentilesBucketValue = termsBucket.getAggregations().get("percentiles_bucket"); - assertThat(percentilesBucketValue, notNullValue()); - assertThat(percentilesBucketValue.getName(), equalTo("percentiles_bucket")); - assertPercentileBucket(PERCENTS, values, percentilesBucketValue); - } - } - - public void testNoBuckets() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .includeExclude(new IncludeExclude(null, "tag.*")) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ) - .addAggregation(percentilesBucket("percentiles_bucket", "terms>sum").setPercents(PERCENTS)) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(0)); - - PercentilesBucket percentilesBucketValue = response.getAggregations().get("percentiles_bucket"); - assertThat(percentilesBucketValue, notNullValue()); - assertThat(percentilesBucketValue.getName(), equalTo("percentiles_bucket")); - for (Double p : PERCENTS) { - assertThat(percentilesBucketValue.percentile(p), equalTo(Double.NaN)); - } - } - public void testWrongPercents() throws Exception { SearchResponse response = client().prepareSearch("idx") .addAggregation( - terms("terms").field("tag") + terms(termsName).field("tag") .includeExclude(new IncludeExclude(null, "tag.*")) .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) ) - .addAggregation(percentilesBucket("percentiles_bucket", "terms>sum").setPercents(PERCENTS)) + .addAggregation(percentilesBucket("percentiles_bucket", termsName + ">sum").setPercents(PERCENTS)) .get(); assertSearchResponse(response); - Terms terms = response.getAggregations().get("terms"); + Terms terms = response.getAggregations().get(termsName); assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getName(), equalTo(termsName)); List buckets = terms.getBuckets(); assertThat(buckets.size(), equalTo(0)); @@ -409,8 +135,8 @@ public void testBadPercents() throws Exception { try { client().prepareSearch("idx") - .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(percentilesBucket("percentiles_bucket", "terms>sum").setPercents(badPercents)) + .addAggregation(terms(termsName).field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .addAggregation(percentilesBucket("percentiles_bucket", termsName + ">sum").setPercents(badPercents)) .get(); fail("Illegal percent's were provided but no exception was thrown."); @@ -437,14 +163,14 @@ public void testBadPercents_asSubAgg() throws Exception { try { client().prepareSearch("idx") .addAggregation( - terms("terms").field("tag") + terms(termsName).field("tag") .order(BucketOrder.key(true)) .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) + histogram(histoName).field(SINGLE_VALUED_FIELD_NAME) .interval(interval) .extendedBounds(minRandomValue, maxRandomValue) ) - .subAggregation(percentilesBucket("percentiles_bucket", "histo>_count").setPercents(badPercents)) + .subAggregation(percentilesBucket("percentiles_bucket", histoName + ">_count").setPercents(badPercents)) ) .get(); @@ -466,82 +192,27 @@ public void testBadPercents_asSubAgg() throws Exception { } - public void testNested() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) - ) - .subAggregation(percentilesBucket("percentile_histo_bucket", "histo>_count").setPercents(PERCENTS)) - ) - .addAggregation(percentilesBucket("percentile_terms_bucket", "terms>percentile_histo_bucket.50").setPercents(PERCENTS)) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - double[] values = new double[termsBuckets.size()]; - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - double[] innerValues = new double[numValueBuckets]; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - - innerValues[j] = bucket.getDocCount(); - } - Arrays.sort(innerValues); - - PercentilesBucket percentilesBucketValue = termsBucket.getAggregations().get("percentile_histo_bucket"); - assertThat(percentilesBucketValue, notNullValue()); - assertThat(percentilesBucketValue.getName(), equalTo("percentile_histo_bucket")); - assertPercentileBucket(PERCENTS, innerValues, percentilesBucketValue); - values[i] = percentilesBucketValue.percentile(50.0); - } - - Arrays.sort(values); - - PercentilesBucket percentilesBucketValue = response.getAggregations().get("percentile_terms_bucket"); - assertThat(percentilesBucketValue, notNullValue()); - assertThat(percentilesBucketValue.getName(), equalTo("percentile_terms_bucket")); - assertPercentileBucket(PERCENTS, values, percentilesBucketValue); - } - public void testNestedWithDecimal() throws Exception { double[] percent = { 99.9 }; SearchResponse response = client().prepareSearch("idx") .addAggregation( - terms("terms").field("tag") + terms(termsName).field("tag") .order(BucketOrder.key(true)) .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) + histogram(histoName).field(SINGLE_VALUED_FIELD_NAME) + .interval(interval) + .extendedBounds(minRandomValue, maxRandomValue) ) - .subAggregation(percentilesBucket("percentile_histo_bucket", "histo>_count").setPercents(percent)) + .subAggregation(percentilesBucket("percentile_histo_bucket", histoName + ">_count").setPercents(percent)) ) - .addAggregation(percentilesBucket("percentile_terms_bucket", "terms>percentile_histo_bucket[99.9]").setPercents(percent)) + .addAggregation(percentilesBucket("percentile_terms_bucket", termsName + ">percentile_histo_bucket[99.9]").setPercents(percent)) .get(); assertSearchResponse(response); - Terms terms = response.getAggregations().get("terms"); + Terms terms = response.getAggregations().get(termsName); assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); + assertThat(terms.getName(), equalTo(termsName)); List termsBuckets = terms.getBuckets(); assertThat(termsBuckets.size(), equalTo(interval)); @@ -551,9 +222,9 @@ public void testNestedWithDecimal() throws Exception { assertThat(termsBucket, notNullValue()); assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - Histogram histo = termsBucket.getAggregations().get("histo"); + Histogram histo = termsBucket.getAggregations().get(histoName); assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); + assertThat(histo.getName(), equalTo(histoName)); List buckets = histo.getBuckets(); double[] innerValues = new double[numValueBuckets]; @@ -587,8 +258,12 @@ public void testNestedWithDecimal() throws Exception { private void assertPercentileBucket(double[] values, PercentilesBucket percentiles) { for (Percentile percentile : percentiles) { assertEquals(percentiles.percentile(percentile.getPercent()), percentile.getValue(), 0d); - int index = (int) Math.round((percentile.getPercent() / 100.0) * (values.length - 1)); - assertThat(percentile.getValue(), equalTo(values[index])); + if (values.length == 0) { + assertThat(percentile.getValue(), equalTo(Double.NaN)); + } else { + int index = (int) Math.round((percentile.getPercent() / 100.0) * (values.length - 1)); + assertThat(percentile.getValue(), equalTo(values[index])); + } } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java index ab2700005e785..7040f3bf115f3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketIT.java @@ -8,429 +8,51 @@ package org.elasticsearch.search.aggregations.pipeline; -import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.search.aggregations.BucketOrder; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; -import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude; -import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.Sum; -import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.test.ESIntegTestCase; +import java.util.function.Function; +import java.util.function.IntToDoubleFunction; -import java.util.ArrayList; -import java.util.List; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; -import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; -import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.statsBucket; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.core.IsNull.notNullValue; - -@ESIntegTestCase.SuiteScopeTestCase -public class StatsBucketIT extends ESIntegTestCase { - - private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; - static int numDocs; - static int interval; - static int minRandomValue; - static int maxRandomValue; - static int numValueBuckets; - static long[] valueCounts; +public class StatsBucketIT extends BucketMetricsPipeLineAggregationTestCase { @Override - public void setupSuiteScopeCluster() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); - createIndex("idx_unmapped"); - - numDocs = randomIntBetween(6, 20); - interval = randomIntBetween(2, 5); - - minRandomValue = 0; - maxRandomValue = 20; - - numValueBuckets = ((maxRandomValue - minRandomValue) / interval) + 1; - valueCounts = new long[numValueBuckets]; - - List builders = new ArrayList<>(); - - for (int i = 0; i < numDocs; i++) { - int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); - builders.add( - client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, fieldValue) - .field("tag", "tag" + (i % interval)) - .endObject() - ) - ); - final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); - valueCounts[bucket]++; - } - - assertAcked(prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer")); - for (int i = 0; i < 2; i++) { - builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) - ); - } - indexRandom(true, builders); - ensureSearchable(); + protected StatsBucketPipelineAggregationBuilder BucketMetricsPipelineAgg(String name, String bucketsPath) { + return statsBucket(name, bucketsPath); } - public void testDocCountTopLevel() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) - ) - .addAggregation(statsBucket("stats_bucket", "histo>_count")) - .get(); - - assertSearchResponse(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); - + @Override + protected void assertResult( + IntToDoubleFunction bucketValues, + Function bucketKeys, + int numBuckets, + StatsBucket pipelineBucket + ) { double sum = 0; int count = 0; double min = Double.POSITIVE_INFINITY; double max = Double.NEGATIVE_INFINITY; - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + for (int i = 0; i < numBuckets; ++i) { + double bucketValue = bucketValues.applyAsDouble(i); count++; - sum += bucket.getDocCount(); - min = Math.min(min, bucket.getDocCount()); - max = Math.max(max, bucket.getDocCount()); + sum += bucketValue; + min = Math.min(min, bucketValue); + max = Math.max(max, bucketValue); } double avgValue = count == 0 ? Double.NaN : (sum / count); - StatsBucket statsBucketValue = response.getAggregations().get("stats_bucket"); - assertThat(statsBucketValue, notNullValue()); - assertThat(statsBucketValue.getName(), equalTo("stats_bucket")); - assertThat(statsBucketValue.getAvg(), equalTo(avgValue)); - assertThat(statsBucketValue.getMin(), equalTo(min)); - assertThat(statsBucketValue.getMax(), equalTo(max)); - } - - public void testDocCountAsSubAgg() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) - ) - .subAggregation(statsBucket("stats_bucket", "histo>_count")) - ) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - double sum = 0; - int count = 0; - double min = Double.POSITIVE_INFINITY; - double max = Double.NEGATIVE_INFINITY; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - count++; - sum += bucket.getDocCount(); - min = Math.min(min, bucket.getDocCount()); - max = Math.max(max, bucket.getDocCount()); - } - - double avgValue = count == 0 ? Double.NaN : (sum / count); - StatsBucket statsBucketValue = termsBucket.getAggregations().get("stats_bucket"); - assertThat(statsBucketValue, notNullValue()); - assertThat(statsBucketValue.getName(), equalTo("stats_bucket")); - assertThat(statsBucketValue.getAvg(), equalTo(avgValue)); - assertThat(statsBucketValue.getMin(), equalTo(min)); - assertThat(statsBucketValue.getMax(), equalTo(max)); - } - } - - public void testMetricTopLevel() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(statsBucket("stats_bucket", "terms>sum")) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(interval)); - - double bucketSum = 0; - int count = 0; - double min = Double.POSITIVE_INFINITY; - double max = Double.NEGATIVE_INFINITY; - for (int i = 0; i < interval; ++i) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("tag" + (i % interval))); - assertThat(bucket.getDocCount(), greaterThan(0L)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - count++; - bucketSum += sum.value(); - min = Math.min(min, sum.value()); - max = Math.max(max, sum.value()); - } - - double avgValue = count == 0 ? Double.NaN : (bucketSum / count); - StatsBucket statsBucketValue = response.getAggregations().get("stats_bucket"); - assertThat(statsBucketValue, notNullValue()); - assertThat(statsBucketValue.getName(), equalTo("stats_bucket")); - assertThat(statsBucketValue.getAvg(), equalTo(avgValue)); - assertThat(statsBucketValue.getMin(), equalTo(min)); - assertThat(statsBucketValue.getMax(), equalTo(max)); + assertThat(pipelineBucket.getAvg(), equalTo(avgValue)); + assertThat(pipelineBucket.getMin(), equalTo(min)); + assertThat(pipelineBucket.getMax(), equalTo(max)); } - public void testMetricAsSubAgg() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ) - .subAggregation(statsBucket("stats_bucket", "histo>sum")) - ) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - double bucketSum = 0; - int count = 0; - double min = Double.POSITIVE_INFINITY; - double max = Double.NEGATIVE_INFINITY; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - if (bucket.getDocCount() != 0) { - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - count++; - bucketSum += sum.value(); - min = Math.min(min, sum.value()); - max = Math.max(max, sum.value()); - } - } - - double avgValue = count == 0 ? Double.NaN : (bucketSum / count); - StatsBucket statsBucketValue = termsBucket.getAggregations().get("stats_bucket"); - assertThat(statsBucketValue, notNullValue()); - assertThat(statsBucketValue.getName(), equalTo("stats_bucket")); - assertThat(statsBucketValue.getAvg(), equalTo(avgValue)); - assertThat(statsBucketValue.getMin(), equalTo(min)); - assertThat(statsBucketValue.getMax(), equalTo(max)); - } - } - - public void testMetricAsSubAggWithInsertZeros() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ) - .subAggregation(statsBucket("stats_bucket", "histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS)) - ) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - double bucketSum = 0; - int count = 0; - double min = Double.POSITIVE_INFINITY; - double max = Double.NEGATIVE_INFINITY; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - - count++; - bucketSum += sum.value(); - min = Math.min(min, sum.value()); - max = Math.max(max, sum.value()); - } - - double avgValue = count == 0 ? Double.NaN : (bucketSum / count); - StatsBucket statsBucketValue = termsBucket.getAggregations().get("stats_bucket"); - assertThat(statsBucketValue, notNullValue()); - assertThat(statsBucketValue.getName(), equalTo("stats_bucket")); - assertThat(statsBucketValue.getAvg(), equalTo(avgValue)); - assertThat(statsBucketValue.getMin(), equalTo(min)); - assertThat(statsBucketValue.getMax(), equalTo(max)); - } - } - - public void testNoBuckets() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .includeExclude(new IncludeExclude(null, "tag.*")) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ) - .addAggregation(statsBucket("stats_bucket", "terms>sum")) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(0)); - - StatsBucket statsBucketValue = response.getAggregations().get("stats_bucket"); - assertThat(statsBucketValue, notNullValue()); - assertThat(statsBucketValue.getName(), equalTo("stats_bucket")); - assertThat(statsBucketValue.getAvg(), equalTo(Double.NaN)); + @Override + protected String nestedMetric() { + return "avg"; } - public void testNested() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) - ) - .subAggregation(statsBucket("avg_histo_bucket", "histo>_count")) - ) - .addAggregation(statsBucket("avg_terms_bucket", "terms>avg_histo_bucket.avg")) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - double aggTermsSum = 0; - int aggTermsCount = 0; - double min = Double.POSITIVE_INFINITY; - double max = Double.NEGATIVE_INFINITY; - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - double aggHistoSum = 0; - int aggHistoCount = 0; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - - aggHistoCount++; - aggHistoSum += bucket.getDocCount(); - } - - double avgHistoValue = aggHistoCount == 0 ? Double.NaN : (aggHistoSum / aggHistoCount); - StatsBucket statsBucketValue = termsBucket.getAggregations().get("avg_histo_bucket"); - assertThat(statsBucketValue, notNullValue()); - assertThat(statsBucketValue.getName(), equalTo("avg_histo_bucket")); - assertThat(statsBucketValue.getAvg(), equalTo(avgHistoValue)); - - aggTermsCount++; - aggTermsSum += avgHistoValue; - min = Math.min(min, avgHistoValue); - max = Math.max(max, avgHistoValue); - } - - double avgTermsValue = aggTermsCount == 0 ? Double.NaN : (aggTermsSum / aggTermsCount); - StatsBucket statsBucketValue = response.getAggregations().get("avg_terms_bucket"); - assertThat(statsBucketValue, notNullValue()); - assertThat(statsBucketValue.getName(), equalTo("avg_terms_bucket")); - assertThat(statsBucketValue.getAvg(), equalTo(avgTermsValue)); - assertThat(statsBucketValue.getMin(), equalTo(min)); - assertThat(statsBucketValue.getMax(), equalTo(max)); + @Override + protected double getNestedMetric(StatsBucket bucket) { + return bucket.getAvg(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java index 6cf7799ca2508..285322ef12355 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/SumBucketIT.java @@ -8,372 +8,40 @@ package org.elasticsearch.search.aggregations.pipeline; -import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.search.aggregations.BucketOrder; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; -import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude; -import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.aggregations.metrics.Sum; -import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.elasticsearch.test.ESIntegTestCase; +import java.util.function.Function; +import java.util.function.IntToDoubleFunction; -import java.util.ArrayList; -import java.util.List; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; -import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; -import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.sumBucket; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.core.IsNull.notNullValue; - -@ESIntegTestCase.SuiteScopeTestCase -public class SumBucketIT extends ESIntegTestCase { - - private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; - static int numDocs; - static int interval; - static int minRandomValue; - static int maxRandomValue; - static int numValueBuckets; - static long[] valueCounts; +public class SumBucketIT extends BucketMetricsPipeLineAggregationTestCase { @Override - public void setupSuiteScopeCluster() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx").setMapping("tag", "type=keyword").get()); - createIndex("idx_unmapped"); - - numDocs = randomIntBetween(6, 20); - interval = randomIntBetween(2, 5); - - minRandomValue = 0; - maxRandomValue = 20; - - numValueBuckets = ((maxRandomValue - minRandomValue) / interval) + 1; - valueCounts = new long[numValueBuckets]; - - List builders = new ArrayList<>(); - - for (int i = 0; i < numDocs; i++) { - int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); - builders.add( - client().prepareIndex("idx") - .setSource( - jsonBuilder().startObject() - .field(SINGLE_VALUED_FIELD_NAME, fieldValue) - .field("tag", "tag" + (i % interval)) - .endObject() - ) - ); - final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); - valueCounts[bucket]++; - } - - assertAcked(prepareCreate("empty_bucket_idx").setMapping(SINGLE_VALUED_FIELD_NAME, "type=integer")); - for (int i = 0; i < 2; i++) { - builders.add( - client().prepareIndex("empty_bucket_idx") - .setId("" + i) - .setSource(jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject()) - ); - } - indexRandom(true, builders); - ensureSearchable(); + protected SumBucketPipelineAggregationBuilder BucketMetricsPipelineAgg(String name, String bucketsPath) { + return sumBucket(name, bucketsPath); } - public void testDocCountTopLevel() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) - ) - .addAggregation(sumBucket("sum_bucket", "histo>_count")) - .get(); - - assertSearchResponse(response); - - Histogram histo = response.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - assertThat(buckets.size(), equalTo(numValueBuckets)); - + @Override + protected void assertResult( + IntToDoubleFunction bucketValues, + Function bucketKeys, + int numBuckets, + InternalSimpleValue pipelineBucket + ) { double sum = 0; - for (int i = 0; i < numValueBuckets; ++i) { - Histogram.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); - assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); - sum += bucket.getDocCount(); - } - - InternalSimpleValue sumBucketValue = response.getAggregations().get("sum_bucket"); - assertThat(sumBucketValue, notNullValue()); - assertThat(sumBucketValue.getName(), equalTo("sum_bucket")); - assertThat(sumBucketValue.value(), equalTo(sum)); - } - - public void testDocCountAsSubAgg() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) - ) - .subAggregation(sumBucket("sum_bucket", "histo>_count")) - ) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - double sum = 0; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - sum += bucket.getDocCount(); - } - - InternalSimpleValue sumBucketValue = termsBucket.getAggregations().get("sum_bucket"); - assertThat(sumBucketValue, notNullValue()); - assertThat(sumBucketValue.getName(), equalTo("sum_bucket")); - assertThat(sumBucketValue.value(), equalTo(sum)); - } - } - - public void testMetricTopLevel() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) - .addAggregation(sumBucket("sum_bucket", "terms>sum")) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(interval)); - - double bucketSum = 0; - for (int i = 0; i < interval; ++i) { - Terms.Bucket bucket = buckets.get(i); - assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("tag" + (i % interval))); - assertThat(bucket.getDocCount(), greaterThan(0L)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - bucketSum += sum.value(); - } - - InternalSimpleValue sumBucketValue = response.getAggregations().get("sum_bucket"); - assertThat(sumBucketValue, notNullValue()); - assertThat(sumBucketValue.getName(), equalTo("sum_bucket")); - assertThat(sumBucketValue.value(), equalTo(bucketSum)); - } - - public void testMetricAsSubAgg() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ) - .subAggregation(sumBucket("sum_bucket", "histo>sum")) - ) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - double bucketSum = 0; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - if (bucket.getDocCount() != 0) { - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - bucketSum += sum.value(); - } - } - - InternalSimpleValue sumBucketValue = termsBucket.getAggregations().get("sum_bucket"); - assertThat(sumBucketValue, notNullValue()); - assertThat(sumBucketValue.getName(), equalTo("sum_bucket")); - assertThat(sumBucketValue.value(), equalTo(bucketSum)); + for (int i = 0; i < numBuckets; ++i) { + sum += bucketValues.applyAsDouble(i); } + assertThat(pipelineBucket.value(), equalTo(sum)); } - public void testMetricAsSubAggWithInsertZeros() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME) - .interval(interval) - .extendedBounds(minRandomValue, maxRandomValue) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ) - .subAggregation(sumBucket("sum_bucket", "histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS)) - ) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - double bucketSum = 0; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - Sum sum = bucket.getAggregations().get("sum"); - assertThat(sum, notNullValue()); - - bucketSum += sum.value(); - } - - InternalSimpleValue sumBucketValue = termsBucket.getAggregations().get("sum_bucket"); - assertThat(sumBucketValue, notNullValue()); - assertThat(sumBucketValue.getName(), equalTo("sum_bucket")); - assertThat(sumBucketValue.value(), equalTo(bucketSum)); - } - } - - public void testNoBuckets() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .includeExclude(new IncludeExclude(null, "tag.*")) - .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) - ) - .addAggregation(sumBucket("sum_bucket", "terms>sum")) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List buckets = terms.getBuckets(); - assertThat(buckets.size(), equalTo(0)); - - InternalSimpleValue sumBucketValue = response.getAggregations().get("sum_bucket"); - assertThat(sumBucketValue, notNullValue()); - assertThat(sumBucketValue.getName(), equalTo("sum_bucket")); - assertThat(sumBucketValue.value(), equalTo(0.0)); + @Override + protected String nestedMetric() { + return "value"; } - public void testNested() throws Exception { - SearchResponse response = client().prepareSearch("idx") - .addAggregation( - terms("terms").field("tag") - .order(BucketOrder.key(true)) - .subAggregation( - histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue) - ) - .subAggregation(sumBucket("sum_histo_bucket", "histo>_count")) - ) - .addAggregation(sumBucket("sum_terms_bucket", "terms>sum_histo_bucket")) - .get(); - - assertSearchResponse(response); - - Terms terms = response.getAggregations().get("terms"); - assertThat(terms, notNullValue()); - assertThat(terms.getName(), equalTo("terms")); - List termsBuckets = terms.getBuckets(); - assertThat(termsBuckets.size(), equalTo(interval)); - - double aggTermsSum = 0; - for (int i = 0; i < interval; ++i) { - Terms.Bucket termsBucket = termsBuckets.get(i); - assertThat(termsBucket, notNullValue()); - assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); - - Histogram histo = termsBucket.getAggregations().get("histo"); - assertThat(histo, notNullValue()); - assertThat(histo.getName(), equalTo("histo")); - List buckets = histo.getBuckets(); - - double aggHistoSum = 0; - for (int j = 0; j < numValueBuckets; ++j) { - Histogram.Bucket bucket = buckets.get(j); - assertThat(bucket, notNullValue()); - assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); - - aggHistoSum += bucket.getDocCount(); - } - - InternalSimpleValue sumBucketValue = termsBucket.getAggregations().get("sum_histo_bucket"); - assertThat(sumBucketValue, notNullValue()); - assertThat(sumBucketValue.getName(), equalTo("sum_histo_bucket")); - assertThat(sumBucketValue.value(), equalTo(aggHistoSum)); - - aggTermsSum += aggHistoSum; - } - - InternalSimpleValue sumBucketValue = response.getAggregations().get("sum_terms_bucket"); - assertThat(sumBucketValue, notNullValue()); - assertThat(sumBucketValue.getName(), equalTo("sum_terms_bucket")); - assertThat(sumBucketValue.value(), equalTo(aggTermsSum)); + @Override + protected double getNestedMetric(InternalSimpleValue bucket) { + return bucket.value(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryLegacyGeoShapeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryLegacyGeoShapeIT.java index c12897ac2d63d..b8589bb3407a4 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryLegacyGeoShapeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoBoundingBoxQueryLegacyGeoShapeIT.java @@ -11,12 +11,26 @@ import org.elasticsearch.Version; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.TestLegacyGeoShapeFieldMapperPlugin; import org.elasticsearch.test.VersionUtils; import java.io.IOException; +import java.util.Collection; +import java.util.Collections; public class GeoBoundingBoxQueryLegacyGeoShapeIT extends GeoBoundingBoxQueryIntegTestCase { + @Override + protected boolean addMockGeoShapeFieldMapper() { + return false; + } + + @Override + protected Collection> nodePlugins() { + return Collections.singleton(TestLegacyGeoShapeFieldMapperPlugin.class); + } + @Override public XContentBuilder getMapping() throws IOException { return XContentFactory.jsonBuilder().startObject().startObject("_doc") @@ -25,8 +39,8 @@ public XContentBuilder getMapping() throws IOException { } @Override - public Version randomSupportedVersion() throws IOException { - return VersionUtils.randomPreviousCompatibleVersion(random(), Version.V_8_0_0); + public Version randomSupportedVersion() { + return VersionUtils.randomIndexCompatibleVersion(random()); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoShapeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoShapeIT.java index 70ff028360165..41e20e4748de9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoShapeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoShapeIT.java @@ -22,7 +22,7 @@ protected void getGeoShapeMapping(XContentBuilder b) throws IOException { } @Override - protected Version getVersion() { + protected Version randomSupportedVersion() { return VersionUtils.randomIndexCompatibleVersion(random()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/LegacyGeoShapeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/LegacyGeoShapeIT.java index 3aff066a5bff9..375ab6e41f061 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/LegacyGeoShapeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/LegacyGeoShapeIT.java @@ -12,19 +12,31 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.geometry.Circle; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.TestLegacyGeoShapeFieldMapperPlugin; import org.elasticsearch.test.VersionUtils; import java.io.IOException; +import java.util.Collection; +import java.util.Collections; import static org.elasticsearch.index.query.QueryBuilders.geoShapeQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class LegacyGeoShapeIT extends GeoShapeIntegTestCase { + @Override + protected boolean addMockGeoShapeFieldMapper() { + return false; + } + + @Override + protected Collection> nodePlugins() { + return Collections.singleton(TestLegacyGeoShapeFieldMapperPlugin.class); + } + @Override protected void getGeoShapeMapping(XContentBuilder b) throws IOException { b.field("type", "geo_shape"); @@ -32,9 +44,8 @@ protected void getGeoShapeMapping(XContentBuilder b) throws IOException { } @Override - protected Version getVersion() { - // legacy shapes can only be created in version lower than 8.x - return VersionUtils.randomPreviousCompatibleVersion(random(), Version.V_8_0_0); + protected Version randomSupportedVersion() { + return VersionUtils.randomIndexCompatibleVersion(random()); } @Override @@ -42,32 +53,12 @@ protected boolean allowExpensiveQueries() { return false; } - public void testMappingUpdate() { - // create index - assertAcked(client().admin().indices().prepareCreate("test").setSettings(settings(getVersion()).build()) - .setMapping("shape", "type=geo_shape,strategy=recursive").get()); - ensureGreen(); - - String update ="{\n" + - " \"properties\": {\n" + - " \"shape\": {\n" + - " \"type\": \"geo_shape\"" + - " }\n" + - " }\n" + - "}"; - - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> client().admin().indices() - .preparePutMapping("test") - .setSource(update, XContentType.JSON).get()); - assertThat(e.getMessage(), containsString("mapper [shape] of type [geo_shape] cannot change strategy from [recursive] to [BKD]")); - } - /** * Test that the circle is still supported for the legacy shapes */ public void testLegacyCircle() throws Exception { // create index - assertAcked(prepareCreate("test").setSettings(settings(getVersion()).build()) + assertAcked(prepareCreate("test").setSettings(settings(randomSupportedVersion()).build()) .setMapping("shape", "type=geo_shape,strategy=recursive,tree=geohash").get()); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java index ddd66e0dc4ef1..0937832f0bea5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java @@ -23,7 +23,7 @@ import org.elasticsearch.search.aggregations.bucket.terms.GlobalOrdinalsStringTermsAggregator; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; import org.elasticsearch.search.profile.ProfileResult; -import org.elasticsearch.search.profile.ProfileShardResult; +import org.elasticsearch.search.profile.SearchProfileShardResult; import org.elasticsearch.test.ESIntegTestCase; import org.joda.time.Instant; @@ -119,10 +119,10 @@ public void testSimpleProfile() { SearchResponse response = client().prepareSearch("idx").setProfile(true) .addAggregation(histogram("histo").field(NUMBER_FIELD).interval(1L)).get(); assertSearchResponse(response); - Map profileResults = response.getProfileResults(); + Map profileResults = response.getProfileResults(); assertThat(profileResults, notNullValue()); assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); - for (ProfileShardResult profileShardResult : profileResults.values()) { + for (SearchProfileShardResult profileShardResult : profileResults.values()) { assertThat(profileShardResult, notNullValue()); AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); assertThat(aggProfileResults, notNullValue()); @@ -164,10 +164,10 @@ public void testMultiLevelProfile() { ) ).get(); assertSearchResponse(response); - Map profileResults = response.getProfileResults(); + Map profileResults = response.getProfileResults(); assertThat(profileResults, notNullValue()); assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); - for (ProfileShardResult profileShardResult : profileResults.values()) { + for (SearchProfileShardResult profileShardResult : profileResults.values()) { assertThat(profileShardResult, notNullValue()); AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); assertThat(aggProfileResults, notNullValue()); @@ -247,10 +247,10 @@ public void testMultiLevelProfileBreadthFirst() { .collectMode(SubAggCollectionMode.BREADTH_FIRST).field(TAG_FIELD).subAggregation(avg("avg").field(NUMBER_FIELD)))) .get(); assertSearchResponse(response); - Map profileResults = response.getProfileResults(); + Map profileResults = response.getProfileResults(); assertThat(profileResults, notNullValue()); assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); - for (ProfileShardResult profileShardResult : profileResults.values()) { + for (SearchProfileShardResult profileShardResult : profileResults.values()) { assertThat(profileShardResult, notNullValue()); AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); assertThat(aggProfileResults, notNullValue()); @@ -317,10 +317,10 @@ public void testDiversifiedAggProfile() { .subAggregation(max("max").field(NUMBER_FIELD))) .get(); assertSearchResponse(response); - Map profileResults = response.getProfileResults(); + Map profileResults = response.getProfileResults(); assertThat(profileResults, notNullValue()); assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); - for (ProfileShardResult profileShardResult : profileResults.values()) { + for (SearchProfileShardResult profileShardResult : profileResults.values()) { assertThat(profileShardResult, notNullValue()); AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); assertThat(aggProfileResults, notNullValue()); @@ -377,10 +377,10 @@ public void testComplexProfile() { .subAggregation(max("max").field(NUMBER_FIELD))))) .get(); assertSearchResponse(response); - Map profileResults = response.getProfileResults(); + Map profileResults = response.getProfileResults(); assertThat(profileResults, notNullValue()); assertThat(profileResults.size(), equalTo(getNumShards("idx").numPrimaries)); - for (ProfileShardResult profileShardResult : profileResults.values()) { + for (SearchProfileShardResult profileShardResult : profileResults.values()) { assertThat(profileShardResult, notNullValue()); AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); assertThat(aggProfileResults, notNullValue()); @@ -581,7 +581,7 @@ public void testNoProfile() { .subAggregation(max("max").field(NUMBER_FIELD))))) .get(); assertSearchResponse(response); - Map profileResults = response.getProfileResults(); + Map profileResults = response.getProfileResults(); assertThat(profileResults, notNullValue()); assertThat(profileResults.size(), equalTo(0)); } @@ -611,10 +611,10 @@ public void testFilterByFilter() throws InterruptedException, IOException { .subAggregation(new MaxAggregationBuilder("m").field("date"))) .get(); assertSearchResponse(response); - Map profileResults = response.getProfileResults(); + Map profileResults = response.getProfileResults(); assertThat(profileResults, notNullValue()); assertThat(profileResults.size(), equalTo(getNumShards("dateidx").numPrimaries)); - for (ProfileShardResult profileShardResult : profileResults.values()) { + for (SearchProfileShardResult profileShardResult : profileResults.values()) { assertThat(profileShardResult, notNullValue()); AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); assertThat(aggProfileResults, notNullValue()); @@ -698,10 +698,10 @@ public void testDateHistogramFilterByFilterDisabled() throws InterruptedExceptio .addAggregation(new DateHistogramAggregationBuilder("histo").field("date").calendarInterval(DateHistogramInterval.MONTH)) .get(); assertSearchResponse(response); - Map profileResults = response.getProfileResults(); + Map profileResults = response.getProfileResults(); assertThat(profileResults, notNullValue()); assertThat(profileResults.size(), equalTo(getNumShards("date_filter_by_filter_disabled").numPrimaries)); - for (ProfileShardResult profileShardResult : profileResults.values()) { + for (SearchProfileShardResult profileShardResult : profileResults.values()) { assertThat(profileShardResult, notNullValue()); AggregationProfileShardResult aggProfileResults = profileShardResult.getAggregationProfileResults(); assertThat(aggProfileResults, notNullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java index 3c760b340b5ce..5392aa8f43f35 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/profile/query/QueryProfilerIT.java @@ -20,7 +20,7 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.profile.ProfileResult; -import org.elasticsearch.search.profile.ProfileShardResult; +import org.elasticsearch.search.profile.SearchProfileShardResult; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; @@ -77,7 +77,7 @@ public void testProfileQuery() throws Exception { assertNotNull("Profile response element should not be null", resp.getProfileResults()); assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); - for (Map.Entry shard : resp.getProfileResults().entrySet()) { + for (Map.Entry shard : resp.getProfileResults().entrySet()) { for (QueryProfileShardResult searchProfiles : shard.getValue().getQueryProfileResults()) { for (ProfileResult result : searchProfiles.getQueryResults()) { assertNotNull(result.getQueryName()); @@ -210,11 +210,11 @@ public void testSimpleMatch() throws Exception { .setSearchType(SearchType.QUERY_THEN_FETCH) .get(); - Map p = resp.getProfileResults(); + Map p = resp.getProfileResults(); assertNotNull(p); assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { + for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { for (ProfileResult result : searchProfiles.getQueryResults()) { assertEquals(result.getQueryName(), "TermQuery"); @@ -257,11 +257,11 @@ public void testBool() throws Exception { .setSearchType(SearchType.QUERY_THEN_FETCH) .get(); - Map p = resp.getProfileResults(); + Map p = resp.getProfileResults(); assertNotNull(p); assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { + for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { for (ProfileResult result : searchProfiles.getQueryResults()) { assertEquals(result.getQueryName(), "BooleanQuery"); @@ -329,7 +329,7 @@ public void testEmptyBool() throws Exception { assertNotNull("Profile response element should not be null", resp.getProfileResults()); assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { + for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { for (ProfileResult result : searchProfiles.getQueryResults()) { assertNotNull(result.getQueryName()); @@ -381,7 +381,7 @@ public void testCollapsingBool() throws Exception { assertNotNull("Profile response element should not be null", resp.getProfileResults()); assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { + for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { for (ProfileResult result : searchProfiles.getQueryResults()) { assertNotNull(result.getQueryName()); @@ -428,7 +428,7 @@ public void testBoosting() throws Exception { assertNotNull("Profile response element should not be null", resp.getProfileResults()); assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { + for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { for (ProfileResult result : searchProfiles.getQueryResults()) { assertNotNull(result.getQueryName()); @@ -475,7 +475,7 @@ public void testDisMaxRange() throws Exception { assertNotNull("Profile response element should not be null", resp.getProfileResults()); assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { + for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { for (ProfileResult result : searchProfiles.getQueryResults()) { assertNotNull(result.getQueryName()); @@ -521,7 +521,7 @@ public void testRange() throws Exception { assertNotNull("Profile response element should not be null", resp.getProfileResults()); assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { + for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { for (ProfileResult result : searchProfiles.getQueryResults()) { assertNotNull(result.getQueryName()); @@ -575,7 +575,7 @@ public void testPhrase() throws Exception { assertNotNull("Profile response element should not be null", resp.getProfileResults()); assertThat("Profile response should not be an empty array", resp.getProfileResults().size(), not(0)); - for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { + for (Map.Entry shardResult : resp.getProfileResults().entrySet()) { for (QueryProfileShardResult searchProfiles : shardResult.getValue().getQueryProfileResults()) { for (ProfileResult result : searchProfiles.getQueryResults()) { assertNotNull(result.getQueryName()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java index 365434910b5b0..88aff5d411027 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java @@ -24,8 +24,11 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.in; import static org.hamcrest.Matchers.is; @@ -75,38 +78,39 @@ public void testSortBy() throws Exception { private void doTestSortOrder(String repoName, Collection allSnapshotNames, SortOrder order) { final List defaultSorting = clusterAdmin().prepareGetSnapshots(repoName).setOrder(order).get().getSnapshots(); assertSnapshotListSorted(defaultSorting, null, order); + final String[] repos = { repoName }; assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.NAME, order), + allSnapshotsSorted(allSnapshotNames, repos, GetSnapshotsRequest.SortBy.NAME, order), GetSnapshotsRequest.SortBy.NAME, order ); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.DURATION, order), + allSnapshotsSorted(allSnapshotNames, repos, GetSnapshotsRequest.SortBy.DURATION, order), GetSnapshotsRequest.SortBy.DURATION, order ); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.INDICES, order), + allSnapshotsSorted(allSnapshotNames, repos, GetSnapshotsRequest.SortBy.INDICES, order), GetSnapshotsRequest.SortBy.INDICES, order ); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.START_TIME, order), + allSnapshotsSorted(allSnapshotNames, repos, GetSnapshotsRequest.SortBy.START_TIME, order), GetSnapshotsRequest.SortBy.START_TIME, order ); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.SHARDS, order), + allSnapshotsSorted(allSnapshotNames, repos, GetSnapshotsRequest.SortBy.SHARDS, order), GetSnapshotsRequest.SortBy.SHARDS, order ); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.FAILED_SHARDS, order), + allSnapshotsSorted(allSnapshotNames, repos, GetSnapshotsRequest.SortBy.FAILED_SHARDS, order), GetSnapshotsRequest.SortBy.FAILED_SHARDS, order ); assertSnapshotListSorted( - allSnapshotsSorted(allSnapshotNames, repoName, GetSnapshotsRequest.SortBy.REPOSITORY, order), + allSnapshotsSorted(allSnapshotNames, repos, GetSnapshotsRequest.SortBy.REPOSITORY, order), GetSnapshotsRequest.SortBy.REPOSITORY, order ); @@ -127,22 +131,23 @@ public void testResponseSizeLimit() throws Exception { } private void doTestPagination(String repoName, List names, GetSnapshotsRequest.SortBy sort, SortOrder order) { - final List allSnapshotsSorted = allSnapshotsSorted(names, repoName, sort, order); - final GetSnapshotsResponse batch1 = sortedWithLimit(repoName, sort, null, 2, order); + final String[] repos = { repoName }; + final List allSnapshotsSorted = allSnapshotsSorted(names, repos, sort, order); + final GetSnapshotsResponse batch1 = sortedWithLimit(repos, sort, null, 2, order); assertEquals(allSnapshotsSorted.subList(0, 2), batch1.getSnapshots()); - final GetSnapshotsResponse batch2 = sortedWithLimit(repoName, sort, batch1.next(), 2, order); + final GetSnapshotsResponse batch2 = sortedWithLimit(repos, sort, batch1.next(), 2, order); assertEquals(allSnapshotsSorted.subList(2, 4), batch2.getSnapshots()); final int lastBatch = names.size() - batch1.getSnapshots().size() - batch2.getSnapshots().size(); - final GetSnapshotsResponse batch3 = sortedWithLimit(repoName, sort, batch2.next(), lastBatch, order); + final GetSnapshotsResponse batch3 = sortedWithLimit(repos, sort, batch2.next(), lastBatch, order); assertEquals( batch3.getSnapshots(), allSnapshotsSorted.subList(batch1.getSnapshots().size() + batch2.getSnapshots().size(), names.size()) ); - final GetSnapshotsResponse batch3NoLimit = sortedWithLimit(repoName, sort, batch2.next(), GetSnapshotsRequest.NO_LIMIT, order); + final GetSnapshotsResponse batch3NoLimit = sortedWithLimit(repos, sort, batch2.next(), GetSnapshotsRequest.NO_LIMIT, order); assertNull(batch3NoLimit.next()); assertEquals(batch3.getSnapshots(), batch3NoLimit.getSnapshots()); final GetSnapshotsResponse batch3LargeLimit = sortedWithLimit( - repoName, + repos, sort, batch2.next(), lastBatch + randomIntBetween(1, 100), @@ -172,18 +177,27 @@ public void testSortAndPaginateWithInProgress() throws Exception { } awaitNumberOfSnapshotsInProgress(inProgressCount); - assertStablePagination(repoName, allSnapshotNames, GetSnapshotsRequest.SortBy.START_TIME); - assertStablePagination(repoName, allSnapshotNames, GetSnapshotsRequest.SortBy.NAME); - assertStablePagination(repoName, allSnapshotNames, GetSnapshotsRequest.SortBy.INDICES); + final String[] repos = { repoName }; + assertStablePagination(repos, allSnapshotNames, GetSnapshotsRequest.SortBy.START_TIME); + assertStablePagination(repos, allSnapshotNames, GetSnapshotsRequest.SortBy.NAME); + assertStablePagination(repos, allSnapshotNames, GetSnapshotsRequest.SortBy.INDICES); + + assertThat( + clusterAdmin().prepareGetSnapshots(matchAllPattern()) + .setSnapshots(GetSnapshotsRequest.CURRENT_SNAPSHOT, "-snap*") + .get() + .getSnapshots(), + empty() + ); unblockAllDataNodes(repoName); for (ActionFuture inProgressSnapshot : inProgressSnapshots) { assertSuccessful(inProgressSnapshot); } - assertStablePagination(repoName, allSnapshotNames, GetSnapshotsRequest.SortBy.START_TIME); - assertStablePagination(repoName, allSnapshotNames, GetSnapshotsRequest.SortBy.NAME); - assertStablePagination(repoName, allSnapshotNames, GetSnapshotsRequest.SortBy.INDICES); + assertStablePagination(repos, allSnapshotNames, GetSnapshotsRequest.SortBy.START_TIME); + assertStablePagination(repos, allSnapshotNames, GetSnapshotsRequest.SortBy.NAME); + assertStablePagination(repos, allSnapshotNames, GetSnapshotsRequest.SortBy.INDICES); } public void testPaginationRequiresVerboseListing() throws Exception { @@ -210,12 +224,186 @@ public void testPaginationRequiresVerboseListing() throws Exception { ); } + public void testExcludePatterns() throws Exception { + final String repoName1 = "test-repo-1"; + final String repoName2 = "test-repo-2"; + final String otherRepo = "other-repo"; + createRepository(repoName1, "fs"); + createRepository(repoName2, "fs"); + createRepository(otherRepo, "fs"); + + final List namesRepo1 = createNSnapshots(repoName1, randomIntBetween(1, 5)); + final List namesRepo2 = createNSnapshots(repoName2, randomIntBetween(1, 5)); + final List namesOtherRepo = createNSnapshots(otherRepo, randomIntBetween(1, 5)); + + final Collection allSnapshotNames = new HashSet<>(namesRepo1); + allSnapshotNames.addAll(namesRepo2); + final Collection allSnapshotNamesWithoutOther = Set.copyOf(allSnapshotNames); + allSnapshotNames.addAll(namesOtherRepo); + + final SortOrder order = SortOrder.DESC; + final List allSorted = allSnapshotsSorted( + allSnapshotNames, + new String[] { "*" }, + GetSnapshotsRequest.SortBy.REPOSITORY, + order + ); + final List allSortedWithoutOther = allSnapshotsSorted( + allSnapshotNamesWithoutOther, + new String[] { "*", "-" + otherRepo }, + GetSnapshotsRequest.SortBy.REPOSITORY, + order + ); + assertThat(allSortedWithoutOther, is(allSorted.subList(0, allSnapshotNamesWithoutOther.size()))); + + final List allInOther = allSnapshotsSorted( + namesOtherRepo, + new String[] { "*", "-test-repo-*" }, + GetSnapshotsRequest.SortBy.REPOSITORY, + order + ); + assertThat(allInOther, is(allSorted.subList(allSnapshotNamesWithoutOther.size(), allSorted.size()))); + + final String otherPrefixSnapshot1 = "other-prefix-snapshot-1"; + createFullSnapshot(otherRepo, otherPrefixSnapshot1); + final String otherPrefixSnapshot2 = "other-prefix-snapshot-2"; + createFullSnapshot(otherRepo, otherPrefixSnapshot2); + + final String[] patternOtherRepo = randomBoolean() ? new String[] { otherRepo } : new String[] { "*", "-test-repo-*" }; + final List allInOtherWithoutOtherPrefix = allSnapshotsSorted( + namesOtherRepo, + patternOtherRepo, + GetSnapshotsRequest.SortBy.REPOSITORY, + order, + "-other*" + ); + assertThat(allInOtherWithoutOtherPrefix, is(allInOther)); + + final List allInOtherWithoutOtherExplicit = allSnapshotsSorted( + namesOtherRepo, + patternOtherRepo, + GetSnapshotsRequest.SortBy.REPOSITORY, + order, + "-" + otherPrefixSnapshot1, + "-" + otherPrefixSnapshot2 + ); + assertThat(allInOtherWithoutOtherExplicit, is(allInOther)); + + assertThat(clusterAdmin().prepareGetSnapshots(matchAllPattern()).setSnapshots("other*", "-o*").get().getSnapshots(), empty()); + assertThat(clusterAdmin().prepareGetSnapshots("other*", "-o*").setSnapshots(matchAllPattern()).get().getSnapshots(), empty()); + assertThat( + clusterAdmin().prepareGetSnapshots("other*", otherRepo, "-o*").setSnapshots(matchAllPattern()).get().getSnapshots(), + empty() + ); + assertThat( + clusterAdmin().prepareGetSnapshots(matchAllPattern()) + .setSnapshots("non-existing*", otherPrefixSnapshot1, "-o*") + .get() + .getSnapshots(), + empty() + ); + } + + public void testNamesStartingInDash() { + final String repoName1 = "test-repo"; + final String weirdRepo1 = "-weird-repo-1"; + final String weirdRepo2 = "-weird-repo-2"; + createRepository(repoName1, "fs"); + createRepository(weirdRepo1, "fs"); + createRepository(weirdRepo2, "fs"); + + final String snapshotName = "test-snapshot"; + final String weirdSnapshot1 = "-weird-snapshot-1"; + final String weirdSnapshot2 = "-weird-snapshot-2"; + + final SnapshotInfo snapshotInRepo1 = createFullSnapshot(repoName1, snapshotName); + final SnapshotInfo weirdSnapshot1InRepo1 = createFullSnapshot(repoName1, weirdSnapshot1); + final SnapshotInfo weirdSnapshot2InRepo1 = createFullSnapshot(repoName1, weirdSnapshot2); + + final SnapshotInfo snapshotInWeird1 = createFullSnapshot(weirdRepo1, snapshotName); + final SnapshotInfo weirdSnapshot1InWeird1 = createFullSnapshot(weirdRepo1, weirdSnapshot1); + final SnapshotInfo weirdSnapshot2InWeird1 = createFullSnapshot(weirdRepo1, weirdSnapshot2); + + final SnapshotInfo snapshotInWeird2 = createFullSnapshot(weirdRepo2, snapshotName); + final SnapshotInfo weirdSnapshot1InWeird2 = createFullSnapshot(weirdRepo2, weirdSnapshot1); + final SnapshotInfo weirdSnapshot2InWeird2 = createFullSnapshot(weirdRepo2, weirdSnapshot2); + + final List allSnapshots = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + .setSort(GetSnapshotsRequest.SortBy.REPOSITORY) + .get() + .getSnapshots(); + assertThat(allSnapshots, hasSize(9)); + + final List allSnapshotsByAll = getAllByPatterns(matchAllPattern(), matchAllPattern()); + assertThat(allSnapshotsByAll, is(allSnapshots)); + assertThat(getAllByPatterns(matchAllPattern(), new String[] { snapshotName, weirdSnapshot1, weirdSnapshot2 }), is(allSnapshots)); + assertThat(getAllByPatterns(new String[] { repoName1, weirdRepo1, weirdRepo2 }, matchAllPattern()), is(allSnapshots)); + + assertThat( + getAllByPatterns(matchAllPattern(), new String[] { snapshotName }), + is(List.of(snapshotInWeird1, snapshotInWeird2, snapshotInRepo1)) + ); + assertThat( + getAllByPatterns(matchAllPattern(), new String[] { weirdSnapshot1 }), + is(List.of(weirdSnapshot1InWeird1, weirdSnapshot1InWeird2, weirdSnapshot1InRepo1)) + ); + assertThat( + getAllByPatterns(matchAllPattern(), new String[] { snapshotName, weirdSnapshot1 }), + is( + List.of( + weirdSnapshot1InWeird1, + snapshotInWeird1, + weirdSnapshot1InWeird2, + snapshotInWeird2, + weirdSnapshot1InRepo1, + snapshotInRepo1 + ) + ) + ); + assertThat(getAllByPatterns(matchAllPattern(), new String[] { "non-existing*", weirdSnapshot1 }), empty()); + assertThat( + getAllByPatterns(matchAllPattern(), new String[] { "*", "--weird-snapshot-1" }), + is( + List.of( + weirdSnapshot2InWeird1, + snapshotInWeird1, + weirdSnapshot2InWeird2, + snapshotInWeird2, + weirdSnapshot2InRepo1, + snapshotInRepo1 + ) + ) + ); + assertThat( + getAllByPatterns(matchAllPattern(), new String[] { "-*" }), + is( + List.of( + weirdSnapshot1InWeird1, + weirdSnapshot2InWeird1, + weirdSnapshot1InWeird2, + weirdSnapshot2InWeird2, + weirdSnapshot1InRepo1, + weirdSnapshot2InRepo1 + + ) + ) + ); + } + + private List getAllByPatterns(String[] repos, String[] snapshots) { + return clusterAdmin().prepareGetSnapshots(repos) + .setSnapshots(snapshots) + .setSort(GetSnapshotsRequest.SortBy.REPOSITORY) + .get() + .getSnapshots(); + } + public void testFilterBySLMPolicy() throws Exception { final String repoName = "test-repo"; createRepository(repoName, "fs"); createNSnapshots(repoName, randomIntBetween(1, 5)); - final List snapshotsWithoutPolicy = clusterAdmin().prepareGetSnapshots("*") - .setSnapshots("*") + final List snapshotsWithoutPolicy = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + .setSnapshots(matchAllPattern()) .setSort(GetSnapshotsRequest.SortBy.NAME) .get() .getSnapshots(); @@ -250,8 +438,8 @@ public void testFilterBySLMPolicy() throws Exception { assertThat(getAllSnapshotsForPolicies(policyName, otherPolicyName), is(List.of(withOtherPolicy, withPolicy))); assertThat(getAllSnapshotsForPolicies(policyName, otherPolicyName, "no-such-policy*"), is(List.of(withOtherPolicy, withPolicy))); - final List allSnapshots = clusterAdmin().prepareGetSnapshots("*") - .setSnapshots("*") + final List allSnapshots = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + .setSnapshots(matchAllPattern()) .setSort(GetSnapshotsRequest.SortBy.NAME) .get() .getSnapshots(); @@ -259,21 +447,197 @@ public void testFilterBySLMPolicy() throws Exception { assertThat(getAllSnapshotsForPolicies(GetSnapshotsRequest.NO_POLICY_PATTERN, "*"), is(allSnapshots)); } + public void testSortAfter() throws Exception { + final String repoName = "test-repo"; + createRepository(repoName, "fs"); + final Set startTimes = new HashSet<>(); + final Set durations = new HashSet<>(); + final SnapshotInfo snapshot1 = createFullSnapshotWithUniqueTimestamps(repoName, "snapshot-1", startTimes, durations); + createIndexWithContent("index-1"); + final SnapshotInfo snapshot2 = createFullSnapshotWithUniqueTimestamps(repoName, "snapshot-2", startTimes, durations); + createIndexWithContent("index-2"); + final SnapshotInfo snapshot3 = createFullSnapshotWithUniqueTimestamps(repoName, "snapshot-3", startTimes, durations); + createIndexWithContent("index-3"); + + final List allSnapshotInfo = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + .setSnapshots(matchAllPattern()) + .setSort(GetSnapshotsRequest.SortBy.START_TIME) + .get() + .getSnapshots(); + assertThat(allSnapshotInfo, is(List.of(snapshot1, snapshot2, snapshot3))); + + final long startTime1 = snapshot1.startTime(); + final long startTime2 = snapshot2.startTime(); + final long startTime3 = snapshot3.startTime(); + + assertThat(allAfterStartTimeAscending(startTime1 - 1), is(allSnapshotInfo)); + assertThat(allAfterStartTimeAscending(startTime1), is(allSnapshotInfo)); + assertThat(allAfterStartTimeAscending(startTime2), is(List.of(snapshot2, snapshot3))); + assertThat(allAfterStartTimeAscending(startTime3), is(List.of(snapshot3))); + assertThat(allAfterStartTimeAscending(startTime3 + 1), empty()); + + final String name1 = snapshot1.snapshotId().getName(); + final String name2 = snapshot2.snapshotId().getName(); + final String name3 = snapshot3.snapshotId().getName(); + + assertThat(allAfterNameAscending("a"), is(allSnapshotInfo)); + assertThat(allAfterNameAscending(name1), is(allSnapshotInfo)); + assertThat(allAfterNameAscending(name2), is(List.of(snapshot2, snapshot3))); + assertThat(allAfterNameAscending(name3), is(List.of(snapshot3))); + assertThat(allAfterNameAscending("z"), empty()); + + final List allSnapshotInfoDesc = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + .setSnapshots(matchAllPattern()) + .setSort(GetSnapshotsRequest.SortBy.START_TIME) + .setOrder(SortOrder.DESC) + .get() + .getSnapshots(); + assertThat(allSnapshotInfoDesc, is(List.of(snapshot3, snapshot2, snapshot1))); + + assertThat(allBeforeStartTimeDescending(startTime3 + 1), is(allSnapshotInfoDesc)); + assertThat(allBeforeStartTimeDescending(startTime3), is(allSnapshotInfoDesc)); + assertThat(allBeforeStartTimeDescending(startTime2), is(List.of(snapshot2, snapshot1))); + assertThat(allBeforeStartTimeDescending(startTime1), is(List.of(snapshot1))); + assertThat(allBeforeStartTimeDescending(startTime1 - 1), empty()); + + assertThat(allSnapshotInfoDesc, is(List.of(snapshot3, snapshot2, snapshot1))); + assertThat(allBeforeNameDescending("z"), is(allSnapshotInfoDesc)); + assertThat(allBeforeNameDescending(name3), is(allSnapshotInfoDesc)); + assertThat(allBeforeNameDescending(name2), is(List.of(snapshot2, snapshot1))); + assertThat(allBeforeNameDescending(name1), is(List.of(snapshot1))); + assertThat(allBeforeNameDescending("a"), empty()); + + final List allSnapshotInfoByDuration = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + .setSnapshots(matchAllPattern()) + .setSort(GetSnapshotsRequest.SortBy.DURATION) + .get() + .getSnapshots(); + + final long duration1 = allSnapshotInfoByDuration.get(0).endTime() - allSnapshotInfoByDuration.get(0).startTime(); + final long duration2 = allSnapshotInfoByDuration.get(1).endTime() - allSnapshotInfoByDuration.get(1).startTime(); + final long duration3 = allSnapshotInfoByDuration.get(2).endTime() - allSnapshotInfoByDuration.get(2).startTime(); + + assertThat(allAfterDurationAscending(duration1 - 1), is(allSnapshotInfoByDuration)); + assertThat(allAfterDurationAscending(duration1), is(allSnapshotInfoByDuration)); + assertThat(allAfterDurationAscending(duration2), is(allSnapshotInfoByDuration.subList(1, 3))); + assertThat(allAfterDurationAscending(duration3), is(List.of(allSnapshotInfoByDuration.get(2)))); + assertThat(allAfterDurationAscending(duration3 + 1), empty()); + + final List allSnapshotInfoByDurationDesc = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + .setSnapshots(matchAllPattern()) + .setSort(GetSnapshotsRequest.SortBy.DURATION) + .setOrder(SortOrder.DESC) + .get() + .getSnapshots(); + + assertThat(allBeforeDurationDescending(duration3 + 1), is(allSnapshotInfoByDurationDesc)); + assertThat(allBeforeDurationDescending(duration3), is(allSnapshotInfoByDurationDesc)); + assertThat(allBeforeDurationDescending(duration2), is(allSnapshotInfoByDurationDesc.subList(1, 3))); + assertThat(allBeforeDurationDescending(duration1), is(List.of(allSnapshotInfoByDurationDesc.get(2)))); + assertThat(allBeforeDurationDescending(duration1 - 1), empty()); + + final SnapshotInfo otherSnapshot = createFullSnapshot(repoName, "other-snapshot"); + + assertThat(allSnapshots(new String[] { "snap*" }, GetSnapshotsRequest.SortBy.NAME, SortOrder.ASC, "a"), is(allSnapshotInfo)); + assertThat(allSnapshots(new String[] { "o*" }, GetSnapshotsRequest.SortBy.NAME, SortOrder.ASC, "a"), is(List.of(otherSnapshot))); + + final GetSnapshotsResponse paginatedResponse = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + .setSnapshots("snap*") + .setSort(GetSnapshotsRequest.SortBy.NAME) + .setFromSortValue("a") + .setOffset(1) + .setSize(1) + .get(); + assertThat(paginatedResponse.getSnapshots(), is(List.of(snapshot2))); + assertThat(paginatedResponse.totalCount(), is(3)); + final GetSnapshotsResponse paginatedResponse2 = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + .setSnapshots("snap*") + .setSort(GetSnapshotsRequest.SortBy.NAME) + .setFromSortValue("a") + .setOffset(0) + .setSize(2) + .get(); + assertThat(paginatedResponse2.getSnapshots(), is(List.of(snapshot1, snapshot2))); + assertThat(paginatedResponse2.totalCount(), is(3)); + } + + // Create a snapshot that is guaranteed to have a unique start time and duration for tests around ordering by either. + // Don't use this with more than 3 snapshots on platforms with low-resolution clocks as the durations could always collide there + // causing an infinite loop + private SnapshotInfo createFullSnapshotWithUniqueTimestamps( + String repoName, + String snapshotName, + Set forbiddenStartTimes, + Set forbiddenDurations + ) throws Exception { + while (true) { + final SnapshotInfo snapshotInfo = createFullSnapshot(repoName, snapshotName); + final long duration = snapshotInfo.endTime() - snapshotInfo.startTime(); + if (forbiddenStartTimes.contains(snapshotInfo.startTime()) || forbiddenDurations.contains(duration)) { + logger.info("--> snapshot start time or duration collided"); + assertAcked(startDeleteSnapshot(repoName, snapshotName).get()); + } else { + assertTrue(forbiddenStartTimes.add(snapshotInfo.startTime())); + assertTrue(forbiddenDurations.add(duration)); + return snapshotInfo; + } + } + } + + private List allAfterStartTimeAscending(long timestamp) { + return allSnapshots(matchAllPattern(), GetSnapshotsRequest.SortBy.START_TIME, SortOrder.ASC, timestamp); + } + + private List allBeforeStartTimeDescending(long timestamp) { + return allSnapshots(matchAllPattern(), GetSnapshotsRequest.SortBy.START_TIME, SortOrder.DESC, timestamp); + } + + private List allAfterNameAscending(String name) { + return allSnapshots(matchAllPattern(), GetSnapshotsRequest.SortBy.NAME, SortOrder.ASC, name); + } + + private List allBeforeNameDescending(String name) { + return allSnapshots(matchAllPattern(), GetSnapshotsRequest.SortBy.NAME, SortOrder.DESC, name); + } + + private List allAfterDurationAscending(long duration) { + return allSnapshots(matchAllPattern(), GetSnapshotsRequest.SortBy.DURATION, SortOrder.ASC, duration); + } + + private List allBeforeDurationDescending(long duration) { + return allSnapshots(matchAllPattern(), GetSnapshotsRequest.SortBy.DURATION, SortOrder.DESC, duration); + } + + private static List allSnapshots( + String[] snapshotNames, + GetSnapshotsRequest.SortBy sortBy, + SortOrder order, + Object fromSortValue + ) { + return clusterAdmin().prepareGetSnapshots(matchAllPattern()) + .setSnapshots(snapshotNames) + .setSort(sortBy) + .setFromSortValue(fromSortValue.toString()) + .setOrder(order) + .get() + .getSnapshots(); + } + private static List getAllSnapshotsForPolicies(String... policies) { - return clusterAdmin().prepareGetSnapshots("*") - .setSnapshots("*") + return clusterAdmin().prepareGetSnapshots(matchAllPattern()) + .setSnapshots(matchAllPattern()) .setPolicies(policies) .setSort(GetSnapshotsRequest.SortBy.NAME) .get() .getSnapshots(); } - private static void assertStablePagination(String repoName, Collection allSnapshotNames, GetSnapshotsRequest.SortBy sort) { + private static void assertStablePagination(String[] repoNames, Collection allSnapshotNames, GetSnapshotsRequest.SortBy sort) { final SortOrder order = randomFrom(SortOrder.values()); - final List allSorted = allSnapshotsSorted(allSnapshotNames, repoName, sort, order); + final List allSorted = allSnapshotsSorted(allSnapshotNames, repoNames, sort, order); for (int i = 1; i <= allSnapshotNames.size(); i++) { - final GetSnapshotsResponse subsetSorted = sortedWithLimit(repoName, sort, null, i, order); + final GetSnapshotsResponse subsetSorted = sortedWithLimit(repoNames, sort, null, i, order); assertEquals(allSorted.subList(0, i), subsetSorted.getSnapshots()); } @@ -281,13 +645,13 @@ private static void assertStablePagination(String repoName, Collection a final SnapshotInfo after = allSorted.get(j); for (int i = 1; i < allSnapshotNames.size() - j; i++) { final GetSnapshotsResponse getSnapshotsResponse = sortedWithLimit( - repoName, + repoNames, sort, GetSnapshotsRequest.After.from(after, sort).asQueryParam(), i, order ); - final GetSnapshotsResponse getSnapshotsResponseNumeric = sortedWithLimit(repoName, sort, j + 1, i, order); + final GetSnapshotsResponse getSnapshotsResponseNumeric = sortedWithLimit(repoNames, sort, j + 1, i, order); final List subsetSorted = getSnapshotsResponse.getSnapshots(); assertEquals(subsetSorted, getSnapshotsResponseNumeric.getSnapshots()); assertEquals(subsetSorted, allSorted.subList(j + 1, j + i + 1)); @@ -302,11 +666,19 @@ private static void assertStablePagination(String repoName, Collection a private static List allSnapshotsSorted( Collection allSnapshotNames, - String repoName, + String[] repoNames, GetSnapshotsRequest.SortBy sortBy, - SortOrder order + SortOrder order, + String... namePatterns ) { - final GetSnapshotsResponse getSnapshotsResponse = sortedWithLimit(repoName, sortBy, null, GetSnapshotsRequest.NO_LIMIT, order); + final GetSnapshotsResponse getSnapshotsResponse = sortedWithLimit( + repoNames, + sortBy, + null, + GetSnapshotsRequest.NO_LIMIT, + order, + namePatterns + ); final List snapshotInfos = getSnapshotsResponse.getSnapshots(); assertEquals(snapshotInfos.size(), allSnapshotNames.size()); assertEquals(getSnapshotsResponse.totalCount(), allSnapshotNames.size()); @@ -318,37 +690,33 @@ private static List allSnapshotsSorted( } private static GetSnapshotsResponse sortedWithLimit( - String repoName, + String[] repoNames, GetSnapshotsRequest.SortBy sortBy, String after, int size, - SortOrder order + SortOrder order, + String... namePatterns ) { - return baseGetSnapshotsRequest(repoName).setAfter(after).setSort(sortBy).setSize(size).setOrder(order).get(); + return baseGetSnapshotsRequest(repoNames).setAfter(after) + .setSort(sortBy) + .setSize(size) + .setOrder(order) + .addSnapshots(namePatterns) + .get(); } private static GetSnapshotsResponse sortedWithLimit( - String repoName, + String[] repoNames, GetSnapshotsRequest.SortBy sortBy, int offset, int size, SortOrder order ) { - return baseGetSnapshotsRequest(repoName).setOffset(offset).setSort(sortBy).setSize(size).setOrder(order).get(); + return baseGetSnapshotsRequest(repoNames).setOffset(offset).setSort(sortBy).setSize(size).setOrder(order).get(); } - private static GetSnapshotsRequestBuilder baseGetSnapshotsRequest(String repoName) { - final GetSnapshotsRequestBuilder builder = clusterAdmin().prepareGetSnapshots(repoName); - // exclude old version snapshot from test assertions every time and do a prefixed query in either case half the time - if (randomBoolean() - || clusterAdmin().prepareGetSnapshots(repoName) - .setSnapshots(AbstractSnapshotIntegTestCase.OLD_VERSION_SNAPSHOT_PREFIX + "*") - .setIgnoreUnavailable(true) - .get() - .getSnapshots() - .isEmpty() == false) { - builder.setSnapshots(RANDOM_SNAPSHOT_NAME_PREFIX + "*"); - } - return builder; + private static GetSnapshotsRequestBuilder baseGetSnapshotsRequest(String[] repoNames) { + return clusterAdmin().prepareGetSnapshots(repoNames) + .setSnapshots("*", "-" + AbstractSnapshotIntegTestCase.OLD_VERSION_SNAPSHOT_PREFIX + "*"); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java index fa73d9ef54b33..b01cc4f463d6b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java @@ -364,7 +364,7 @@ public void testRestoringSystemIndexByNameIsDeprecated() throws IllegalAccessExc new MockLogAppender.SeenEventExpectation( "restore-system-index-from-snapshot", "org.elasticsearch.deprecation.snapshots.RestoreService", - DeprecationLogger.DEPRECATION, + DeprecationLogger.CRITICAL, "Restoring system indices by name is deprecated. Use feature states instead. System indices: [.test-system-idx]" ) ); diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 1f071c9d8f4ed..7579025b99dd7 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -89,8 +89,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_14_1 = new Version(7140199, org.apache.lucene.util.Version.LUCENE_8_9_0); public static final Version V_7_14_2 = new Version(7140299, org.apache.lucene.util.Version.LUCENE_8_9_0); public static final Version V_7_15_0 = new Version(7150099, org.apache.lucene.util.Version.LUCENE_8_9_0); - public static final Version V_7_16_0 = new Version(7160099, org.apache.lucene.util.Version.LUCENE_8_9_0); - public static final Version V_8_0_0 = new Version(8000099, org.apache.lucene.util.Version.LUCENE_8_9_0); + public static final Version V_7_16_0 = new Version(7160099, org.apache.lucene.util.Version.LUCENE_8_10_0); + public static final Version V_8_0_0 = new Version(8000099, org.apache.lucene.util.Version.LUCENE_8_10_0); public static final Version CURRENT = V_8_0_0; private static final ImmutableOpenIntMap idToVersion; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java index d199d28ca4ca1..1801be97c48b7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.monitor.jvm.HotThreads; import java.io.IOException; import java.util.concurrent.TimeUnit; @@ -19,7 +20,7 @@ public class NodesHotThreadsRequest extends BaseNodesRequest { int threads = 3; - String type = "cpu"; + HotThreads.ReportType type = HotThreads.ReportType.CPU; TimeValue interval = new TimeValue(500, TimeUnit.MILLISECONDS); int snapshots = 10; boolean ignoreIdleThreads = true; @@ -29,7 +30,7 @@ public NodesHotThreadsRequest(StreamInput in) throws IOException { super(in); threads = in.readInt(); ignoreIdleThreads = in.readBoolean(); - type = in.readString(); + type = HotThreads.ReportType.of(in.readString()); interval = in.readTimeValue(); snapshots = in.readInt(); } @@ -60,12 +61,12 @@ public NodesHotThreadsRequest ignoreIdleThreads(boolean ignoreIdleThreads) { return this; } - public NodesHotThreadsRequest type(String type) { + public NodesHotThreadsRequest type(HotThreads.ReportType type) { this.type = type; return this; } - public String type() { + public HotThreads.ReportType type() { return this.type; } @@ -92,7 +93,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeInt(threads); out.writeBoolean(ignoreIdleThreads); - out.writeString(type); + out.writeString(type.getTypeValue()); out.writeTimeValue(interval); out.writeInt(snapshots); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java index d25559b0236cd..50751c7e1f9c3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.monitor.jvm.HotThreads; public class NodesHotThreadsRequestBuilder extends NodesOperationRequestBuilder { @@ -29,7 +30,7 @@ public NodesHotThreadsRequestBuilder setIgnoreIdleThreads(boolean ignoreIdleThre return this; } - public NodesHotThreadsRequestBuilder setType(String type) { + public NodesHotThreadsRequestBuilder setType(HotThreads.ReportType type) { request.type(type); return this; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java index a6c9f9c717b96..9b787a26134bf 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.metadata.RepositoriesMetadata; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.repositories.RepositoryMissingException; @@ -35,6 +36,8 @@ */ public class TransportGetRepositoriesAction extends TransportMasterNodeReadAction { + public static final String ALL_PATTERN = "_all"; + @Inject public TransportGetRepositoriesAction( TransportService transportService, @@ -56,6 +59,11 @@ public TransportGetRepositoriesAction( ); } + public static boolean isMatchAll(String[] patterns) { + return (patterns.length == 0) + || (patterns.length == 1 && (ALL_PATTERN.equalsIgnoreCase(patterns[0]) || Regex.isMatchAllPattern(patterns[0]))); + } + @Override protected ClusterBlockException checkBlock(GetRepositoriesRequest request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); @@ -80,30 +88,37 @@ protected void masterOperation( */ public static List getRepositories(ClusterState state, String[] repoNames) { RepositoriesMetadata repositories = state.metadata().custom(RepositoriesMetadata.TYPE, RepositoriesMetadata.EMPTY); - if (repoNames.length == 0 || (repoNames.length == 1 && ("_all".equals(repoNames[0]) || "*".equals(repoNames[0])))) { + if (isMatchAll(repoNames)) { return repositories.repositories(); - } else { - Set repositoriesToGet = new LinkedHashSet<>(); // to keep insertion order - for (String repositoryOrPattern : repoNames) { - if (Regex.isSimpleMatchPattern(repositoryOrPattern) == false) { - repositoriesToGet.add(repositoryOrPattern); + } + final List includePatterns = new ArrayList<>(); + final List excludePatterns = new ArrayList<>(); + boolean seenWildcard = false; + for (String repositoryOrPattern : repoNames) { + if (seenWildcard && repositoryOrPattern.length() > 1 && repositoryOrPattern.startsWith("-")) { + excludePatterns.add(repositoryOrPattern.substring(1)); + } else { + if (Regex.isSimpleMatchPattern(repositoryOrPattern)) { + seenWildcard = true; } else { - for (RepositoryMetadata repository : repositories.repositories()) { - if (Regex.simpleMatch(repositoryOrPattern, repository.name())) { - repositoriesToGet.add(repository.name()); - } + if (repositories.repository(repositoryOrPattern) == null) { + throw new RepositoryMissingException(repositoryOrPattern); } } + includePatterns.add(repositoryOrPattern); } - List repositoryListBuilder = new ArrayList<>(); - for (String repository : repositoriesToGet) { - RepositoryMetadata repositoryMetadata = repositories.repository(repository); - if (repositoryMetadata == null) { - throw new RepositoryMissingException(repository); + } + final String[] excludes = excludePatterns.toArray(Strings.EMPTY_ARRAY); + final Set repositoryListBuilder = new LinkedHashSet<>(); // to keep insertion order + for (String repositoryOrPattern : includePatterns) { + for (RepositoryMetadata repository : repositories.repositories()) { + if (repositoryListBuilder.contains(repository) == false + && Regex.simpleMatch(repositoryOrPattern, repository.name()) + && Regex.simpleMatch(excludes, repository.name()) == false) { + repositoryListBuilder.add(repository); } - repositoryListBuilder.add(repositoryMetadata); } - return repositoryListBuilder; } + return List.copyOf(repositoryListBuilder); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java index 54314bc872bd2..e60facbf5a089 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java @@ -36,13 +36,14 @@ */ public class GetSnapshotsRequest extends MasterNodeRequest { - public static final String ALL_SNAPSHOTS = "_all"; public static final String CURRENT_SNAPSHOT = "_current"; public static final String NO_POLICY_PATTERN = "_none"; public static final boolean DEFAULT_VERBOSE_MODE = true; public static final Version SLM_POLICY_FILTERING_VERSION = Version.V_8_0_0; + public static final Version FROM_SORT_VALUE_VERSION = Version.V_8_0_0; + public static final Version MULTIPLE_REPOSITORIES_SUPPORT_ADDED = Version.V_7_14_0; public static final Version PAGINATED_GET_SNAPSHOTS_VERSION = Version.V_7_14_0; @@ -66,6 +67,9 @@ public class GetSnapshotsRequest extends MasterNodeRequest @Nullable private After after; + @Nullable + private String fromSortValue; + private SortBy sort = SortBy.START_TIME; private SortOrder order = SortOrder.ASC; @@ -123,6 +127,9 @@ public GetSnapshotsRequest(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(SLM_POLICY_FILTERING_VERSION)) { policies = in.readStringArray(); } + if (in.getVersion().onOrAfter(FROM_SORT_VALUE_VERSION)) { + fromSortValue = in.readOptionalString(); + } } } @@ -172,6 +179,11 @@ public void writeTo(StreamOutput out) throws IOException { "can't use slm policy filter in snapshots request with node version [" + out.getVersion() + "]" ); } + if (out.getVersion().onOrAfter(FROM_SORT_VALUE_VERSION)) { + out.writeOptionalString(fromSortValue); + } else if (fromSortValue != null) { + throw new IllegalArgumentException("can't use after-value in snapshot request with node version [" + out.getVersion() + "]"); + } } @Override @@ -202,8 +214,15 @@ public ActionRequestValidationException validate() { if (policies.length != 0) { validationException = addValidationError("can't use slm policy filter with verbose=false", validationException); } - } else if (after != null && offset > 0) { - validationException = addValidationError("can't use after and offset simultaneously", validationException); + if (fromSortValue != null) { + validationException = addValidationError("can't use from_sort_value with verbose=false", validationException); + } + } else if (offset > 0) { + if (after != null) { + validationException = addValidationError("can't use after and offset simultaneously", validationException); + } + } else if (after != null && fromSortValue != null) { + validationException = addValidationError("can't use after and from_sort_value simultaneously", validationException); } return validationException; } @@ -318,6 +337,16 @@ public GetSnapshotsRequest after(@Nullable After after) { return this; } + public GetSnapshotsRequest fromSortValue(@Nullable String fromSortValue) { + this.fromSortValue = fromSortValue; + return this; + } + + @Nullable + public String fromSortValue() { + return fromSortValue; + } + public GetSnapshotsRequest sort(SortBy sort) { this.sort = sort; return this; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java index ff202a070a52b..7eda87afbc34e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java @@ -117,6 +117,11 @@ public GetSnapshotsRequestBuilder setAfter(@Nullable GetSnapshotsRequest.After a return this; } + public GetSnapshotsRequestBuilder setFromSortValue(@Nullable String fromSortValue) { + request.fromSortValue(fromSortValue); + return this; + } + public GetSnapshotsRequestBuilder setSort(GetSnapshotsRequest.SortBy sort) { request.sort(sort); return this; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index ea416b476ac72..d07caa0ea8a11 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -121,7 +121,7 @@ protected void masterOperation( request.offset(), request.size(), request.order(), - request.policies(), + buildSnapshotPredicate(request.sort(), request.order(), request.policies(), request.fromSortValue()), listener ); } @@ -139,7 +139,7 @@ private void getMultipleReposSnapshotInfo( int offset, int size, SortOrder order, - String[] slmPolicies, + @Nullable Predicate predicate, ActionListener listener ) { // short-circuit if there are no repos, because we can not create GroupedActionListener of size 0 @@ -159,7 +159,7 @@ private void getMultipleReposSnapshotInfo( .map(Tuple::v1) .filter(Objects::nonNull) .collect(Collectors.toMap(Tuple::v1, Tuple::v2)); - final SnapshotsInRepo snInfos = sortAndFilterSnapshots(allSnapshots, sortBy, after, offset, size, order, slmPolicies); + final SnapshotsInRepo snInfos = sortAndFilterSnapshots(allSnapshots, sortBy, after, offset, size, order, predicate); final List snapshotInfos = snInfos.snapshotInfos; final int remaining = snInfos.remaining + responses.stream() .map(Tuple::v2) @@ -183,7 +183,7 @@ private void getMultipleReposSnapshotInfo( snapshotsInProgress, repoName, snapshots, - slmPolicies, + predicate, ignoreUnavailable, verbose, cancellableTask, @@ -205,7 +205,7 @@ private void getSingleRepoSnapshotInfo( SnapshotsInProgress snapshotsInProgress, String repo, String[] snapshots, - String[] slmPolicies, + Predicate predicate, boolean ignoreUnavailable, boolean verbose, CancellableTask task, @@ -243,7 +243,7 @@ private void getSingleRepoSnapshotInfo( sortBy, after, order, - slmPolicies, + predicate, listener ), listener::onFailure @@ -283,7 +283,7 @@ private void loadSnapshotInfos( GetSnapshotsRequest.SortBy sortBy, @Nullable final GetSnapshotsRequest.After after, SortOrder order, - String[] slmPolicies, + @Nullable Predicate predicate, ActionListener listener ) { if (task.notifyIfCancelled(listener)) { @@ -297,27 +297,49 @@ private void loadSnapshotInfos( } final Set toResolve = new HashSet<>(); - if (isAllSnapshots(snapshots)) { + if (TransportGetRepositoriesAction.isMatchAll(snapshots)) { toResolve.addAll(allSnapshotIds.values()); } else { + final List includePatterns = new ArrayList<>(); + final List excludePatterns = new ArrayList<>(); + boolean hasCurrent = false; + boolean seenWildcard = false; for (String snapshotOrPattern : snapshots) { - if (GetSnapshotsRequest.CURRENT_SNAPSHOT.equalsIgnoreCase(snapshotOrPattern)) { - toResolve.addAll(currentSnapshots.stream().map(SnapshotInfo::snapshot).collect(Collectors.toList())); - } else if (Regex.isSimpleMatchPattern(snapshotOrPattern) == false) { - if (allSnapshotIds.containsKey(snapshotOrPattern)) { - toResolve.add(allSnapshotIds.get(snapshotOrPattern)); - } else if (ignoreUnavailable == false) { - throw new SnapshotMissingException(repo, snapshotOrPattern); - } + if (seenWildcard && snapshotOrPattern.length() > 1 && snapshotOrPattern.startsWith("-")) { + excludePatterns.add(snapshotOrPattern.substring(1)); } else { - for (Map.Entry entry : allSnapshotIds.entrySet()) { - if (Regex.simpleMatch(snapshotOrPattern, entry.getKey())) { - toResolve.add(entry.getValue()); + if (Regex.isSimpleMatchPattern(snapshotOrPattern)) { + seenWildcard = true; + includePatterns.add(snapshotOrPattern); + } else if (GetSnapshotsRequest.CURRENT_SNAPSHOT.equalsIgnoreCase(snapshotOrPattern)) { + hasCurrent = true; + seenWildcard = true; + } else { + if (ignoreUnavailable == false && allSnapshotIds.containsKey(snapshotOrPattern) == false) { + throw new SnapshotMissingException(repo, snapshotOrPattern); } + includePatterns.add(snapshotOrPattern); + } + } + } + final String[] includes = includePatterns.toArray(Strings.EMPTY_ARRAY); + final String[] excludes = excludePatterns.toArray(Strings.EMPTY_ARRAY); + for (Map.Entry entry : allSnapshotIds.entrySet()) { + final Snapshot snapshot = entry.getValue(); + if (toResolve.contains(snapshot) == false + && Regex.simpleMatch(includes, entry.getKey()) + && Regex.simpleMatch(excludes, entry.getKey()) == false) { + toResolve.add(snapshot); + } + } + if (hasCurrent) { + for (SnapshotInfo snapshotInfo : currentSnapshots) { + final Snapshot snapshot = snapshotInfo.snapshot(); + if (Regex.simpleMatch(excludes, snapshot.getSnapshotId().getName()) == false) { + toResolve.add(snapshot); } } } - if (toResolve.isEmpty() && ignoreUnavailable == false && isCurrentSnapshotsOnly(snapshots) == false) { throw new SnapshotMissingException(repo, snapshots[0]); } @@ -333,14 +355,11 @@ private void loadSnapshotInfos( sortBy, after, order, - slmPolicies, + predicate, listener ); } else { - assert slmPolicies.length == 0 - : "slm policy filtering not support for non-verbose request but saw [" - + Strings.arrayToCommaDelimitedString(slmPolicies) - + "]"; + assert predicate == null : "filtering is not supported in non-verbose mode"; final SnapshotsInRepo snapshotInfos; if (repositoryData != null) { // want non-current snapshots as well, which are found in the repository data @@ -376,7 +395,7 @@ private void snapshots( GetSnapshotsRequest.SortBy sortBy, @Nullable GetSnapshotsRequest.After after, SortOrder order, - String[] slmPolicies, + @Nullable Predicate predicate, ActionListener listener ) { if (task.notifyIfCancelled(listener)) { @@ -405,7 +424,7 @@ private void snapshots( final ActionListener allDoneListener = listener.delegateFailure((l, v) -> { final ArrayList snapshotList = new ArrayList<>(snapshotInfos); snapshotList.addAll(snapshotSet); - listener.onResponse(sortAndFilterSnapshots(snapshotList, sortBy, after, 0, GetSnapshotsRequest.NO_LIMIT, order, slmPolicies)); + listener.onResponse(sortAndFilterSnapshots(snapshotList, sortBy, after, 0, GetSnapshotsRequest.NO_LIMIT, order, predicate)); }); if (snapshotIdsToIterate.isEmpty()) { allDoneListener.onResponse(null); @@ -440,10 +459,6 @@ public void onFailure(Exception e) { ); } - private boolean isAllSnapshots(String[] snapshots) { - return (snapshots.length == 0) || (snapshots.length == 1 && GetSnapshotsRequest.ALL_SNAPSHOTS.equalsIgnoreCase(snapshots[0])); - } - private boolean isCurrentSnapshotsOnly(String[] snapshots) { return (snapshots.length == 1 && GetSnapshotsRequest.CURRENT_SNAPSHOT.equalsIgnoreCase(snapshots[0])); } @@ -515,17 +530,38 @@ private static SnapshotsInRepo sortAndFilterSnapshots( final int offset, final int size, final SortOrder order, - final String[] slmPolicies + final @Nullable Predicate predicate ) { final List filteredSnapshotInfos; - if (slmPolicies.length == 0) { + if (predicate == null) { filteredSnapshotInfos = snapshotInfos; } else { - filteredSnapshotInfos = filterBySLMPolicies(snapshotInfos, slmPolicies); + filteredSnapshotInfos = snapshotInfos.stream().filter(predicate).collect(Collectors.toUnmodifiableList()); } return sortSnapshots(filteredSnapshotInfos, sortBy, after, offset, size, order); } + private static Predicate buildSnapshotPredicate( + GetSnapshotsRequest.SortBy sortBy, + SortOrder order, + String[] slmPolicies, + String fromSortValue + ) { + Predicate predicate = null; + if (slmPolicies.length > 0) { + predicate = filterBySLMPolicies(slmPolicies); + } + if (fromSortValue != null) { + final Predicate fromSortValuePredicate = buildFromSortValuePredicate(sortBy, fromSortValue, order, null, null); + if (predicate == null) { + predicate = fromSortValuePredicate; + } else { + predicate = fromSortValuePredicate.and(predicate); + } + } + return predicate; + } + private static SnapshotsInRepo sortSnapshots( List snapshotInfos, GetSnapshotsRequest.SortBy sortBy, @@ -565,57 +601,7 @@ private static SnapshotsInRepo sortSnapshots( if (after != null) { assert offset == 0 : "can't combine after and offset but saw [" + after + "] and offset [" + offset + "]"; - final Predicate isAfter; - final String snapshotName = after.snapshotName(); - final String repoName = after.repoName(); - switch (sortBy) { - case START_TIME: - isAfter = filterByLongOffset(SnapshotInfo::startTime, Long.parseLong(after.value()), snapshotName, repoName, order); - break; - case NAME: - isAfter = order == SortOrder.ASC - ? (info -> compareName(snapshotName, repoName, info) < 0) - : (info -> compareName(snapshotName, repoName, info) > 0); - break; - case DURATION: - isAfter = filterByLongOffset( - info -> info.endTime() - info.startTime(), - Long.parseLong(after.value()), - snapshotName, - repoName, - order - ); - break; - case INDICES: - isAfter = filterByLongOffset( - info -> info.indices().size(), - Integer.parseInt(after.value()), - snapshotName, - repoName, - order - ); - break; - case SHARDS: - isAfter = filterByLongOffset(SnapshotInfo::totalShards, Integer.parseInt(after.value()), snapshotName, repoName, order); - break; - case FAILED_SHARDS: - isAfter = filterByLongOffset( - SnapshotInfo::failedShards, - Integer.parseInt(after.value()), - snapshotName, - repoName, - order - ); - break; - case REPOSITORY: - isAfter = order == SortOrder.ASC - ? (info -> compareRepositoryName(snapshotName, repoName, info) < 0) - : (info -> compareRepositoryName(snapshotName, repoName, info) > 0); - break; - default: - throw new AssertionError("unexpected sort column [" + sortBy + "]"); - } - infos = infos.filter(isAfter); + infos = infos.filter(buildFromSortValuePredicate(sortBy, after.value(), order, after.snapshotName(), after.repoName())); } infos = infos.sorted(order == SortOrder.DESC ? comparator.reversed() : comparator).skip(offset); final List allSnapshots = infos.collect(Collectors.toUnmodifiableList()); @@ -631,7 +617,67 @@ private static SnapshotsInRepo sortSnapshots( return new SnapshotsInRepo(resultSet, snapshotInfos.size(), allSnapshots.size() - resultSet.size()); } - private static List filterBySLMPolicies(List snapshotInfos, String[] slmPolicies) { + private static Predicate buildFromSortValuePredicate( + GetSnapshotsRequest.SortBy sortBy, + String after, + SortOrder order, + @Nullable String snapshotName, + @Nullable String repoName + ) { + final Predicate isAfter; + switch (sortBy) { + case START_TIME: + isAfter = filterByLongOffset(SnapshotInfo::startTime, Long.parseLong(after), snapshotName, repoName, order); + break; + case NAME: + if (snapshotName == null) { + assert repoName == null : "no snapshot name given but saw repo name [" + repoName + "]"; + isAfter = order == SortOrder.ASC + ? snapshotInfo -> after.compareTo(snapshotInfo.snapshotId().getName()) <= 0 + : snapshotInfo -> after.compareTo(snapshotInfo.snapshotId().getName()) >= 0; + } else { + isAfter = order == SortOrder.ASC + ? (info -> compareName(snapshotName, repoName, info) < 0) + : (info -> compareName(snapshotName, repoName, info) > 0); + } + break; + case DURATION: + isAfter = filterByLongOffset( + info -> info.endTime() - info.startTime(), + Long.parseLong(after), + snapshotName, + repoName, + order + ); + break; + case INDICES: + isAfter = filterByLongOffset(info -> info.indices().size(), Integer.parseInt(after), snapshotName, repoName, order); + break; + case SHARDS: + isAfter = filterByLongOffset(SnapshotInfo::totalShards, Integer.parseInt(after), snapshotName, repoName, order); + break; + case FAILED_SHARDS: + isAfter = filterByLongOffset(SnapshotInfo::failedShards, Integer.parseInt(after), snapshotName, repoName, order); + break; + case REPOSITORY: + if (snapshotName == null) { + assert repoName == null : "no snapshot name given but saw repo name [" + repoName + "]"; + isAfter = order == SortOrder.ASC + ? snapshotInfo -> after.compareTo(snapshotInfo.repository()) <= 0 + : snapshotInfo -> after.compareTo(snapshotInfo.repository()) >= 0; + } else { + isAfter = order == SortOrder.ASC + ? (info -> compareRepositoryName(snapshotName, repoName, info) < 0) + : (info -> compareRepositoryName(snapshotName, repoName, info) > 0); + } + break; + default: + throw new AssertionError("unexpected sort column [" + sortBy + "]"); + } + return isAfter; + } + + private static Predicate filterBySLMPolicies(String[] slmPolicies) { final List includePatterns = new ArrayList<>(); final List excludePatterns = new ArrayList<>(); boolean seenWildcard = false; @@ -651,7 +697,7 @@ private static List filterBySLMPolicies(List snapsho final String[] includes = includePatterns.toArray(Strings.EMPTY_ARRAY); final String[] excludes = excludePatterns.toArray(Strings.EMPTY_ARRAY); final boolean matchWithoutPolicy = matchNoPolicy; - return snapshotInfos.stream().filter(snapshotInfo -> { + return snapshotInfo -> { final Map metadata = snapshotInfo.userMetadata(); final String policy; if (metadata == null) { @@ -667,16 +713,20 @@ private static List filterBySLMPolicies(List snapsho return false; } return excludes.length == 0 || Regex.simpleMatch(excludes, policy) == false; - }).collect(Collectors.toUnmodifiableList()); + }; } private static Predicate filterByLongOffset( ToLongFunction extractor, long after, - String snapshotName, - String repoName, + @Nullable String snapshotName, + @Nullable String repoName, SortOrder order ) { + if (snapshotName == null) { + assert repoName == null : "no snapshot name given but saw repo name [" + repoName + "]"; + return order == SortOrder.ASC ? info -> after <= extractor.applyAsLong(info) : info -> after >= extractor.applyAsLong(info); + } return order == SortOrder.ASC ? info -> { final long val = extractor.applyAsLong(info); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java index 9175af743aee0..dde5fd637d6a0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java @@ -19,9 +19,11 @@ public class GetAliasesRequest extends MasterNodeReadRequest implements AliasesRequest { + public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandHidden(); + private String[] indices = Strings.EMPTY_ARRAY; private String[] aliases = Strings.EMPTY_ARRAY; - private IndicesOptions indicesOptions = IndicesOptions.strictExpandHidden(); + private IndicesOptions indicesOptions = DEFAULT_INDICES_OPTIONS; private String[] originalAliases = Strings.EMPTY_ARRAY; public GetAliasesRequest(String... aliases) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index c5fe0cea2d9ac..30fcc16c8ac08 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -150,7 +150,7 @@ private static void checkSystemIndexAccess(GetAliasesRequest request, SystemIndi } } if (systemIndicesNames.isEmpty() == false) { - deprecationLogger.deprecate(DeprecationCategory.API, "open_system_index_access", + deprecationLogger.critical(DeprecationCategory.API, "open_system_index_access", "this request accesses system indices: {}, but in a future major version, direct access to system " + "indices will be prevented by default", systemIndicesNames); } @@ -186,7 +186,7 @@ private static void checkSystemAliasAccess(GetAliasesRequest request, SystemIndi } if (systemAliases.isEmpty() == false) { - deprecationLogger.deprecate(DeprecationCategory.API, "open_system_alias_access", + deprecationLogger.critical(DeprecationCategory.API, "open_system_alias_access", "this request accesses aliases with names reserved for system indices: {}, but in a future major version, direct " + "access to system indices and their aliases will not be allowed", systemAliases); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java index 11d8d61d7096c..141108df95580 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java @@ -25,9 +25,12 @@ */ public class DeleteIndexRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { + public static final IndicesOptions DEFAULT_INDICES_OPTIONS = + IndicesOptions.fromOptions(false, true, true, true, false, false, true, false); + private String[] indices; // Delete index should work by default on both open and closed indices. - private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true, false, false, true, false); + private IndicesOptions indicesOptions = DEFAULT_INDICES_OPTIONS; public DeleteIndexRequest(StreamInput in) throws IOException { super(in); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java index 591293afb485b..3d01fd0ef4ac3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java @@ -30,8 +30,8 @@ import org.elasticsearch.transport.TransportService; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; -import java.util.stream.StreamSupport; /** * Get index action. @@ -62,8 +62,9 @@ protected void doMasterOperation(Task task, final GetIndexRequest request, Strin ImmutableOpenMap settings = ImmutableOpenMap.of(); ImmutableOpenMap defaultSettings = ImmutableOpenMap.of(); ImmutableOpenMap dataStreams = ImmutableOpenMap.builder() - .putAll(StreamSupport.stream(state.metadata().findDataStreams(concreteIndices).spliterator(), false) - .collect(Collectors.toMap(k -> k.key, v -> v.value.getName()))).build(); + .putAll(state.metadata().findDataStreams(concreteIndices).stream() + .collect(Collectors.toMap(Map.Entry::getKey, v -> v.getValue().getName()))) + .build(); Feature[] features = request.features(); boolean doneAliases = false; boolean doneMappings = false; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java index b5c15d9432b30..e59653f4d8d29 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingClusterStateUpdateRequest.java @@ -9,19 +9,22 @@ package org.elasticsearch.action.admin.indices.mapping.put; import org.elasticsearch.cluster.ack.IndicesClusterStateUpdateRequest; +import org.elasticsearch.common.compress.CompressedXContent; + +import java.io.IOException; /** * Cluster state update request that allows to put a mapping */ public class PutMappingClusterStateUpdateRequest extends IndicesClusterStateUpdateRequest { - private final String source; + private final CompressedXContent source; - public PutMappingClusterStateUpdateRequest(String source) { - this.source = source; + public PutMappingClusterStateUpdateRequest(String source) throws IOException { + this.source = new CompressedXContent(source); } - public String source() { + public CompressedXContent source() { return source; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index d43e78ffe7bf6..cc93cc7169ce2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -32,6 +32,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -125,15 +126,22 @@ static void performMappingUpdate(Index[] concreteIndices, PutMappingRequest request, ActionListener listener, MetadataMappingService metadataMappingService) { - PutMappingClusterStateUpdateRequest updateRequest = new PutMappingClusterStateUpdateRequest(request.source()) - .indices(concreteIndices) - .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()); - - metadataMappingService.putMapping(updateRequest, listener.delegateResponse((l, e) -> { + final ActionListener wrappedListener = listener.delegateResponse((l, e) -> { logger.debug(() -> new ParameterizedMessage("failed to put mappings on indices [{}]", Arrays.asList(concreteIndices)), e); l.onFailure(e); - })); + }); + final PutMappingClusterStateUpdateRequest updateRequest; + try { + updateRequest = new PutMappingClusterStateUpdateRequest(request.source()) + .indices(concreteIndices) + .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()); + } catch (IOException e) { + wrappedListener.onFailure(e); + return; + } + + metadataMappingService.putMapping(updateRequest, wrappedListener); } static String checkForSystemIndexViolations(SystemIndices systemIndices, Index[] concreteIndices, PutMappingRequest request) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequest.java index 6474aa85a50e5..2f15b0f8b190a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexRequest.java @@ -26,8 +26,10 @@ */ public class OpenIndexRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { + public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.fromOptions(false, true, false, true); + private String[] indices; - private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, false, true); + private IndicesOptions indicesOptions = DEFAULT_INDICES_OPTIONS; private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; public OpenIndexRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequest.java index b209f08feefac..beaacd377bf7e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequest.java @@ -23,8 +23,10 @@ public class GetSettingsRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { + public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.fromOptions(false, true, true, true); + private String[] indices = Strings.EMPTY_ARRAY; - private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true); + private IndicesOptions indicesOptions = DEFAULT_INDICES_OPTIONS; private String[] names = Strings.EMPTY_ARRAY; private boolean humanReadable = false; private boolean includeDefaults = false; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index ec41573a86759..78cf32952a66c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -39,8 +39,10 @@ public class UpdateSettingsRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable, ToXContentObject { + public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.fromOptions(false, false, true, true); + private String[] indices; - private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, true); + private IndicesOptions indicesOptions = DEFAULT_INDICES_OPTIONS; private Settings settings = EMPTY_SETTINGS; private boolean preserveExisting = false; private String origin = ""; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java index ffcff1d4d642d..4ca09393977eb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java @@ -31,6 +31,8 @@ */ public class ValidateQueryRequest extends BroadcastRequest implements ToXContentObject { + public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.fromOptions(false, false, true, false); + private QueryBuilder query = new MatchAllQueryBuilder(); private boolean explain; @@ -65,7 +67,7 @@ public ValidateQueryRequest(StreamInput in) throws IOException { */ public ValidateQueryRequest(String... indices) { super(indices); - indicesOptions(IndicesOptions.fromOptions(false, false, true, false)); + indicesOptions(DEFAULT_INDICES_OPTIONS); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java index a595655212404..f0bab54d1e2d3 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestParser.java @@ -187,7 +187,7 @@ public void parse( if (parser.getRestApiVersion().matches(RestApiVersion.equalTo(RestApiVersion.V_7))) { // for bigger bulks, deprecation throttling might not be enough if (deprecateOrErrorOnType && typesDeprecationLogged == false) { - deprecationLogger.compatibleApiWarning("bulk_with_types", + deprecationLogger.compatibleCritical("bulk_with_types", RestBulkAction.TYPES_DEPRECATION_MESSAGE); typesDeprecationLogged = true; } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 692c0c59882e9..3e284f068c917 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -42,12 +42,13 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.routing.IndexRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexingPressure; @@ -429,6 +430,7 @@ protected void doRun() { Metadata metadata = clusterState.metadata(); // Group the requests by ShardId -> Operations mapping Map> requestsByShard = new HashMap<>(); + Map indexRoutings = new HashMap<>(); for (int i = 0; i < bulkRequest.requests.size(); i++) { DocWriteRequest docWriteRequest = bulkRequest.requests.get(i); //the request can only be null because we set it to null in the previous step, so it gets ignored @@ -480,8 +482,13 @@ protected void doRun() { break; default: throw new AssertionError("request type not supported: [" + docWriteRequest.opType() + "]"); } - ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex.getName(), - docWriteRequest.id(), docWriteRequest.routing()).shardId(); + IndexRouting indexRouting = indexRoutings.computeIfAbsent( + concreteIndex, + idx -> IndexRouting.fromIndexMetadata(clusterState.metadata().getIndexSafe(idx)) + ); + ShardId shardId = clusterService.operationRouting() + .indexShards(clusterState, concreteIndex.getName(), indexRouting, docWriteRequest.id(), docWriteRequest.routing()) + .shardId(); List shardRequests = requestsByShard.computeIfAbsent(shardId, shard -> new ArrayList<>()); shardRequests.add(new BulkItemRequest(i, docWriteRequest)); } catch (ElasticsearchParseException | IllegalArgumentException | RoutingMissingException e) { diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java index 63d4d611a1b75..d69366793e87a 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -30,9 +30,10 @@ public final class FieldCapabilitiesRequest extends ActionRequest implements IndicesRequest.Replaceable, ToXContentObject { public static final String NAME = "field_caps_request"; + public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpen(); private String[] indices = Strings.EMPTY_ARRAY; - private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); + private IndicesOptions indicesOptions = DEFAULT_INDICES_OPTIONS; private String[] fields = Strings.EMPTY_ARRAY; private boolean includeUnmapped = false; // pkg private API mainly for cross cluster search to signal that we do multiple reductions ie. the results should not be merged diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/IndexFieldCapabilities.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/IndexFieldCapabilities.java index 3057339e7538a..a59a75a4a5e09 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/IndexFieldCapabilities.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/IndexFieldCapabilities.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.StringLiteralDeduplicator; import java.io.IOException; import java.util.Map; @@ -21,6 +22,8 @@ */ public class IndexFieldCapabilities implements Writeable { + private static final StringLiteralDeduplicator typeStringDeduplicator = new StringLiteralDeduplicator(); + private final String name; private final String type; private final boolean isMetadatafield; @@ -50,7 +53,7 @@ public class IndexFieldCapabilities implements Writeable { IndexFieldCapabilities(StreamInput in) throws IOException { this.name = in.readString(); - this.type = in.readString(); + this.type = typeStringDeduplicator.deduplicate(in.readString()); this.isMetadatafield = in.readBoolean(); this.isSearchable = in.readBoolean(); this.isAggregatable = in.readBoolean(); diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java index f721519823a86..f0736230ee133 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java @@ -392,7 +392,7 @@ private static void parseDocuments(XContentParser parser, List items, @Nul id = parser.text(); } else if(parser.getRestApiVersion() == RestApiVersion.V_7 && TYPE.match(currentFieldName,parser.getDeprecationHandler())) { - deprecationLogger.compatibleApiWarning("mget_with_types", RestMultiGetAction.TYPES_DEPRECATION_MESSAGE); + deprecationLogger.compatibleCritical("mget_with_types", RestMultiGetAction.TYPES_DEPRECATION_MESSAGE); } else if (ROUTING.match(currentFieldName, parser.getDeprecationHandler())) { routing = parser.text(); } else if (FIELDS.match(currentFieldName, parser.getDeprecationHandler())) { diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java index 91e8aa81aa2bc..5c949598d6979 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java @@ -197,7 +197,7 @@ private static MultiGetItemResponse parseItem(XContentParser parser) throws IOEx if (INDEX.match(currentFieldName, parser.getDeprecationHandler())) { index = parser.text(); } else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) { - deprecationLogger.compatibleApiWarning("mget_with_types", RestMultiGetAction.TYPES_DEPRECATION_MESSAGE); + deprecationLogger.compatibleCritical("mget_with_types", RestMultiGetAction.TYPES_DEPRECATION_MESSAGE); } else if (ID.match(currentFieldName, parser.getDeprecationHandler())) { id = parser.text(); } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java index b9dc90dca0b0c..586671cad3de6 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineRequest.java @@ -187,7 +187,7 @@ private static List parseDocs(Map config, RestAp String routing = ConfigurationUtils.readOptionalStringOrIntProperty(null, null, dataMap, Metadata.ROUTING.getFieldName()); if (restApiVersion == RestApiVersion.V_7 && dataMap.containsKey(Metadata.TYPE.getFieldName())) { - deprecationLogger.compatibleApiWarning("simulate_pipeline_with_types", + deprecationLogger.compatibleCritical("simulate_pipeline_with_types", "[types removal] specifying _type in pipeline simulation requests is deprecated"); } Long version = null; diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java index 448384d7bfee7..489291ec56d3a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java @@ -189,7 +189,7 @@ public static void readMultiLineFormat(BytesReference data, } // support first line with \n if (restApiVersion == RestApiVersion.V_7 && nextMarker == 0) { - deprecationLogger.compatibleApiWarning("msearch_first_line_empty", FIRST_LINE_EMPTY_DEPRECATION_MESSAGE); + deprecationLogger.compatibleCritical("msearch_first_line_empty", FIRST_LINE_EMPTY_DEPRECATION_MESSAGE); from = nextMarker + 1; continue; } @@ -250,7 +250,7 @@ public static void readMultiLineFormat(BytesReference data, ignoreThrottled = value; } else if(restApiVersion == RestApiVersion.V_7 && ("type".equals(entry.getKey()) || "types".equals(entry.getKey()))) { - deprecationLogger.compatibleApiWarning("msearch_with_types", RestMultiSearchAction.TYPES_DEPRECATION_MESSAGE); + deprecationLogger.compatibleCritical("msearch_with_types", RestMultiSearchAction.TYPES_DEPRECATION_MESSAGE); } else { throw new IllegalArgumentException("key [" + entry.getKey() + "] is not supported in the metadata section"); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 31bbff1b22634..909549dc1de17 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -40,8 +40,9 @@ import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.profile.ProfileShardResult; -import org.elasticsearch.search.profile.SearchProfileShardResults; +import org.elasticsearch.search.profile.SearchProfileQueryPhaseResult; +import org.elasticsearch.search.profile.SearchProfileResults; +import org.elasticsearch.search.profile.SearchProfileResultsBuilder; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.Suggest.Suggestion; @@ -290,7 +291,7 @@ public InternalSearchResponse merge(boolean ignoreFrom, ReducedQueryPhase reduce assert currentOffset == sortedDocs.length : "expected no more score doc slices"; } } - return reducedQueryPhase.buildResponse(hits); + return reducedQueryPhase.buildResponse(hits, fetchResults); } private SearchHits getHits(ReducedQueryPhase reducedQueryPhase, boolean ignoreFrom, @@ -401,8 +402,22 @@ ReducedQueryPhase reducedQueryPhase(Collection quer numReducePhases++; // increment for this phase if (queryResults.isEmpty()) { // early terminate we have nothing to reduce final TotalHits totalHits = topDocsStats.getTotalHits(); - return new ReducedQueryPhase(totalHits, topDocsStats.fetchHits, topDocsStats.getMaxScore(), - false, null, null, null, null, SortedTopDocs.EMPTY, null, numReducePhases, 0, 0, true); + return new ReducedQueryPhase( + totalHits, + topDocsStats.fetchHits, + topDocsStats.getMaxScore(), + false, + null, + null, + null, + null, + SortedTopDocs.EMPTY, + null, + numReducePhases, + 0, + 0, + true + ); } int total = queryResults.size(); queryResults = queryResults.stream() @@ -419,7 +434,8 @@ ReducedQueryPhase reducedQueryPhase(Collection quer // count the total (we use the query result provider here, since we might not get any hits (we scrolled past them)) final Map>> groupedSuggestions = hasSuggest ? new HashMap<>() : Collections.emptyMap(); - final Map profileResults = hasProfileResults ? new HashMap<>(queryResults.size()) + final Map profileShardResults = hasProfileResults + ? new HashMap<>(queryResults.size()) : Collections.emptyMap(); int from = 0; int size = 0; @@ -449,7 +465,7 @@ ReducedQueryPhase reducedQueryPhase(Collection quer } if (hasProfileResults) { String key = result.getSearchShardTarget().toString(); - profileResults.put(key, result.consumeProfileResult()); + profileShardResults.put(key, result.consumeProfileResult()); } } final Suggest reducedSuggest; @@ -462,11 +478,13 @@ ReducedQueryPhase reducedQueryPhase(Collection quer reducedCompletionSuggestions = reducedSuggest.filter(CompletionSuggestion.class); } final InternalAggregations aggregations = reduceAggs(aggReduceContextBuilder, performFinalReduce, bufferedAggs); - final SearchProfileShardResults shardResults = profileResults.isEmpty() ? null : new SearchProfileShardResults(profileResults); + final SearchProfileResultsBuilder profileBuilder = profileShardResults.isEmpty() + ? null + : new SearchProfileResultsBuilder(profileShardResults); final SortedTopDocs sortedTopDocs = sortDocs(isScrollRequest, bufferedTopDocs, from, size, reducedCompletionSuggestions); final TotalHits totalHits = topDocsStats.getTotalHits(); return new ReducedQueryPhase(totalHits, topDocsStats.fetchHits, topDocsStats.getMaxScore(), - topDocsStats.timedOut, topDocsStats.terminatedEarly, reducedSuggest, aggregations, shardResults, sortedTopDocs, + topDocsStats.timedOut, topDocsStats.terminatedEarly, reducedSuggest, aggregations, profileBuilder, sortedTopDocs, sortValueFormats, numReducePhases, size, from, false); } @@ -535,7 +553,7 @@ public static final class ReducedQueryPhase { // the reduced internal aggregations final InternalAggregations aggregations; // the reduced profile results - final SearchProfileShardResults shardResults; + final SearchProfileResultsBuilder profileBuilder; // the number of reduces phases final int numReducePhases; //encloses info about the merged top docs, the sort fields used to sort the score docs etc. @@ -549,9 +567,22 @@ public static final class ReducedQueryPhase { // sort value formats used to sort / format the result final DocValueFormat[] sortValueFormats; - ReducedQueryPhase(TotalHits totalHits, long fetchHits, float maxScore, boolean timedOut, Boolean terminatedEarly, Suggest suggest, - InternalAggregations aggregations, SearchProfileShardResults shardResults, SortedTopDocs sortedTopDocs, - DocValueFormat[] sortValueFormats, int numReducePhases, int size, int from, boolean isEmptyResult) { + ReducedQueryPhase( + TotalHits totalHits, + long fetchHits, + float maxScore, + boolean timedOut, + Boolean terminatedEarly, + Suggest suggest, + InternalAggregations aggregations, + SearchProfileResultsBuilder profileBuilder, + SortedTopDocs sortedTopDocs, + DocValueFormat[] sortValueFormats, + int numReducePhases, + int size, + int from, + boolean isEmptyResult + ) { if (numReducePhases <= 0) { throw new IllegalArgumentException("at least one reduce phase must have been applied but was: " + numReducePhases); } @@ -562,7 +593,7 @@ public static final class ReducedQueryPhase { this.terminatedEarly = terminatedEarly; this.suggest = suggest; this.aggregations = aggregations; - this.shardResults = shardResults; + this.profileBuilder = profileBuilder; this.numReducePhases = numReducePhases; this.sortedTopDocs = sortedTopDocs; this.size = size; @@ -575,8 +606,28 @@ public static final class ReducedQueryPhase { * Creates a new search response from the given merged hits. * @see #merge(boolean, ReducedQueryPhase, Collection, IntFunction) */ - public InternalSearchResponse buildResponse(SearchHits hits) { - return new InternalSearchResponse(hits, aggregations, suggest, shardResults, timedOut, terminatedEarly, numReducePhases); + public InternalSearchResponse buildResponse(SearchHits hits, Collection fetchResults) { + return new InternalSearchResponse( + hits, + aggregations, + suggest, + buildSearchProfileResults(fetchResults), + timedOut, + terminatedEarly, + numReducePhases + ); + } + + private SearchProfileResults buildSearchProfileResults(Collection fetchResults) { + if (profileBuilder == null) { + assert fetchResults.stream() + .map(SearchPhaseResult::fetchResult) + .filter(r -> r != null) + .allMatch(r -> r.profileResult() == null) : "found fetch profile without search profile"; + return null; + + } + return profileBuilder.build(fetchResults); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index 660869e344364..88bb8ccea050c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -11,18 +11,18 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.core.TimeValue; +import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.xcontent.StatusToXContentObject; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestActions; import org.elasticsearch.search.SearchHit; @@ -30,8 +30,8 @@ import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.internal.InternalSearchResponse; -import org.elasticsearch.search.profile.ProfileShardResult; -import org.elasticsearch.search.profile.SearchProfileShardResults; +import org.elasticsearch.search.profile.SearchProfileResults; +import org.elasticsearch.search.profile.SearchProfileShardResult; import org.elasticsearch.search.suggest.Suggest; import java.io.IOException; @@ -225,7 +225,7 @@ public String pointInTimeId() { * @return The profile results or an empty map */ @Nullable - public Map getProfileResults() { + public Map getProfileResults() { return internalResponse.profile(); } @@ -280,7 +280,7 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE SearchHits hits = null; Aggregations aggs = null; Suggest suggest = null; - SearchProfileShardResults profile = null; + SearchProfileResults profile = null; boolean timedOut = false; Boolean terminatedEarly = null; int numReducePhases = 1; @@ -318,8 +318,8 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE aggs = Aggregations.fromXContent(parser); } else if (Suggest.NAME.equals(currentFieldName)) { suggest = Suggest.fromXContent(parser); - } else if (SearchProfileShardResults.PROFILE_FIELD.equals(currentFieldName)) { - profile = SearchProfileShardResults.fromXContent(parser); + } else if (SearchProfileResults.PROFILE_FIELD.equals(currentFieldName)) { + profile = SearchProfileResults.fromXContent(parser); } else if (RestActions._SHARDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { while ((token = parser.nextToken()) != Token.END_OBJECT) { if (token == Token.FIELD_NAME) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java index 0eee2f6ebfcd9..3de3c95324571 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java @@ -27,8 +27,8 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.internal.InternalSearchResponse; -import org.elasticsearch.search.profile.ProfileShardResult; -import org.elasticsearch.search.profile.SearchProfileShardResults; +import org.elasticsearch.search.profile.SearchProfileResults; +import org.elasticsearch.search.profile.SearchProfileShardResult; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.completion.CompletionSuggestion; @@ -111,7 +111,7 @@ SearchResponse getMergedResponse(Clusters clusters) { //the current reduce phase counts as one int numReducePhases = 1; List failures = new ArrayList<>(); - Map profileResults = new HashMap<>(); + Map profileResults = new HashMap<>(); List aggs = new ArrayList<>(); Map shards = new TreeMap<>(); List topDocsList = new ArrayList<>(searchResponses.size()); @@ -187,7 +187,7 @@ SearchResponse getMergedResponse(Clusters clusters) { Suggest suggest = groupedSuggestions.isEmpty() ? null : new Suggest(Suggest.reduce(groupedSuggestions)); InternalAggregations reducedAggs = InternalAggregations.topLevelReduce(aggs, aggReduceContextBuilder.forFinalReduction()); ShardSearchFailure[] shardFailures = failures.toArray(ShardSearchFailure.EMPTY_ARRAY); - SearchProfileShardResults profileShardResults = profileResults.isEmpty() ? null : new SearchProfileShardResults(profileResults); + SearchProfileResults profileShardResults = profileResults.isEmpty() ? null : new SearchProfileResults(profileResults); //make failures ordering consistent between ordinary search and CCS by looking at the shard they come from Arrays.sort(shardFailures, FAILURES_COMPARATOR); InternalSearchResponse response = new InternalSearchResponse(mergedSearchHits, reducedAggs, suggest, profileShardResults, diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java index 72774df68639c..dc955aa377921 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java @@ -13,8 +13,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregations; -import org.elasticsearch.search.profile.ProfileShardResult; -import org.elasticsearch.search.profile.SearchProfileShardResults; +import org.elasticsearch.search.profile.SearchProfileResults; +import org.elasticsearch.search.profile.SearchProfileShardResult; import org.elasticsearch.search.suggest.Suggest; import java.io.IOException; @@ -34,13 +34,20 @@ public class SearchResponseSections implements ToXContentFragment { protected final SearchHits hits; protected final Aggregations aggregations; protected final Suggest suggest; - protected final SearchProfileShardResults profileResults; + protected final SearchProfileResults profileResults; protected final boolean timedOut; protected final Boolean terminatedEarly; protected final int numReducePhases; - public SearchResponseSections(SearchHits hits, Aggregations aggregations, Suggest suggest, boolean timedOut, Boolean terminatedEarly, - SearchProfileShardResults profileResults, int numReducePhases) { + public SearchResponseSections( + SearchHits hits, + Aggregations aggregations, + Suggest suggest, + boolean timedOut, + Boolean terminatedEarly, + SearchProfileResults profileResults, + int numReducePhases + ) { this.hits = hits; this.aggregations = aggregations; this.suggest = suggest; @@ -83,7 +90,7 @@ public final int getNumReducePhases() { * * @return Profile results */ - public final Map profile() { + public final Map profile() { if (profileResults == null) { return Collections.emptyMap(); } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 861f48564d13c..77da80ad0f7e0 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -28,7 +28,6 @@ import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.core.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.inject.Inject; @@ -36,9 +35,10 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.index.shard.ShardId; @@ -53,8 +53,8 @@ import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.profile.ProfileShardResult; -import org.elasticsearch.search.profile.SearchProfileShardResults; +import org.elasticsearch.search.profile.SearchProfileResults; +import org.elasticsearch.search.profile.SearchProfileShardResult; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; @@ -366,9 +366,10 @@ static void ccsRemoteReduce(TaskId parentTaskId, SearchRequest searchRequest, Or remoteClusterClient.search(ccsSearchRequest, new ActionListener() { @Override public void onResponse(SearchResponse searchResponse) { - Map profileResults = searchResponse.getProfileResults(); - SearchProfileShardResults profile = profileResults == null || profileResults.isEmpty() - ? null : new SearchProfileShardResults(profileResults); + Map profileResults = searchResponse.getProfileResults(); + SearchProfileResults profile = profileResults == null || profileResults.isEmpty() + ? null + : new SearchProfileResults(profileResults); InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchResponse.getHits(), (InternalAggregations) searchResponse.getAggregations(), searchResponse.getSuggest(), profile, searchResponse.isTimedOut(), searchResponse.isTerminatedEarly(), searchResponse.getNumReducePhases()); diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index 3f3f790c15018..cd1299ae8754a 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -8,6 +8,8 @@ package org.elasticsearch.action.support; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.core.Nullable; import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.io.stream.StreamInput; @@ -103,6 +105,10 @@ public enum Option { public static final EnumSet